diff --git a/docs/en/14-reference/11-docker/index.md b/docs/en/14-reference/11-docker/index.md index b9278c6961ec5f395f68795422bde682f9467f77..5a48f2e4b1e58132c728dadfba2afbe39ae8bef3 100644 --- a/docs/en/14-reference/11-docker/index.md +++ b/docs/en/14-reference/11-docker/index.md @@ -273,49 +273,48 @@ password: taosdata ## Start the TDengine cluster with docker-compose -1. The following docker-compose file starts a TDengine cluster with two replicas, two management nodes, two data nodes, and one arbitrator. - - ```docker - version: "3" - services: - arbitrator: - image: tdengine/tdengine:$VERSION - command: tarbitrator - td-1: - image: tdengine/tdengine:$VERSION - environment: - TAOS_FQDN: "td-1" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td1:/var/lib/taos/ - - taoslog-td1:/var/log/taos/ - td-2: - image: tdengine/tdengine:$VERSION - environment: - TAOS_FQDN: "td-2" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td2:/var/lib/taos/ - - taoslog-td2:/var/log/taos/ - volumes: - taosdata-td1: - taoslog-td1: - taosdata-td2: - taoslog-td2: - ``` +1. The following docker-compose file starts a TDengine cluster with three nodes. + +```yml +version: "3" +services: + td-1: + image: tdengine/tdengine:$VERSION + environment: + TAOS_FQDN: "td-1" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td1:/var/lib/taos/ + - taoslog-td1:/var/log/taos/ + td-2: + image: tdengine/tdengine:$VERSION + environment: + TAOS_FQDN: "td-2" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td2:/var/lib/taos/ + - taoslog-td2:/var/log/taos/ + td-3: + image: tdengine/tdengine:$VERSION + environment: + TAOS_FQDN: "td-3" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td3:/var/lib/taos/ + - taoslog-td3:/var/log/taos/ +volumes: + taosdata-td1: + taoslog-td1: + taosdata-td2: + taoslog-td2: + taosdata-td3: + taoslog-td3: +``` :::note - The `VERSION` environment variable is used to set the tdengine image tag - `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time -- `TAOS_REPLICA` is used to set the default number of database replicas. Its value range is [1,3] - We recommend setting it with `TAOS_ARBITRATOR` to use arbitrator in a two-nodes environment. ::: 2. Start the cluster @@ -345,17 +344,18 @@ password: taosdata 4. Show dnodes via TDengine CLI - ```shell - $ docker-compose exec td-1 taos -s "show dnodes" - - taos> show dnodes - id | end_point | vnodes | cores | status | role | create_time | offline reason | - ====================================================================================================================================== - 1 | td-1:6030 | 1 | 8 | ready | any | 2022-01-18 02:47:42.871 | | - 2 | td-2:6030 | 0 | 8 | ready | any | 2022-01-18 02:47:43.518 | | - 0 | arbitrator:6042 | 0 | 0 | ready | arb | 2022-01-18 02:47:43.633 | - | - Query OK, 3 row(s) in set (0.000811s) - ``` +```shell +$ docker-compose exec td-1 taos -s "show dnodes" + +taos> show dnodes + id | endpoint | vnodes | support_vnodes | status | create_time | note | +====================================================================================================================================== + 1 | td-1:6030 | 0 | 32 | ready | 2022-08-19 07:57:29.971 | | + 2 | td-2:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.415 | | + 3 | td-3:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.417 | | +Query OK, 3 rows in database (0.021262s) + +``` ## taosAdapter @@ -373,83 +373,70 @@ password: taosdata Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example: - ```docker - version: "3" - - networks: - inter: - api: - - services: - arbitrator: - image: tdengine/tdengine:$VERSION - command: tarbitrator - networks: - - inter - td-1: - image: tdengine/tdengine:$VERSION - networks: - - inter - environment: - TAOS_FQDN: "td-1" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td1:/var/lib/taos/ - - taoslog-td1:/var/log/taos/ - td-2: - image: tdengine/tdengine:$VERSION - networks: - - inter - environment: - TAOS_FQDN: "td-2" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td2:/var/lib/taos/ - - taoslog-td2:/var/log/taos/ - adapter: - image: tdengine/tdengine:$VERSION - command: taosadapter - networks: - - inter - environment: - TAOS_FIRST_EP: "td-1" - TAOS_SECOND_EP: "td-2" - deploy: - replicas: 4 - nginx: - image: nginx - depends_on: - - adapter - networks: - - inter - - api - ports: - - 6041:6041 - - 6044:6044/udp - command: [ - "sh", - "-c", - "while true; - do curl -s http://adapter:6041/-/ping >/dev/null && break; - done; - printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}' - > /etc/nginx/conf.d/rest.conf; - printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}' - >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf; - nginx -g 'daemon off;'", - ] - volumes: - taosdata-td1: - taoslog-td1: - taosdata-td2: - taoslog-td2: - ``` +```yml +version: "3" + +networks: + inter: + +services: + td-1: + image: tdengine/tdengine:$VERSION + networks: + - inter + environment: + TAOS_FQDN: "td-1" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td1:/var/lib/taos/ + - taoslog-td1:/var/log/taos/ + td-2: + image: tdengine/tdengine:$VERSION + networks: + - inter + environment: + TAOS_FQDN: "td-2" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td2:/var/lib/taos/ + - taoslog-td2:/var/log/taos/ + adapter: + image: tdengine/tdengine:$VERSION + entrypoint: "taosadapter" + networks: + - inter + environment: + TAOS_FIRST_EP: "td-1" + TAOS_SECOND_EP: "td-2" + deploy: + replicas: 4 + nginx: + image: nginx + depends_on: + - adapter + networks: + - inter + ports: + - 6041:6041 + - 6044:6044/udp + command: [ + "sh", + "-c", + "while true; + do curl -s http://adapter:6041/-/ping >/dev/null && break; + done; + printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}' + > /etc/nginx/conf.d/rest.conf; + printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}' + >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf; + nginx -g 'daemon off;'", + ] +volumes: + taosdata-td1: + taoslog-td1: + taosdata-td2: + taoslog-td2: +``` ## Deploy with docker swarm diff --git a/docs/zh/14-reference/11-docker/index.md b/docs/zh/14-reference/11-docker/index.md index 086d72940c88adfe0e0c4c30cf16cc9cb2701630..a6696977f9cb08da2eb8ff68c42c54d388e74980 100644 --- a/docs/zh/14-reference/11-docker/index.md +++ b/docs/zh/14-reference/11-docker/index.md @@ -309,7 +309,7 @@ services: TAOS_FIRST_EP: "td-1" volumes: - taosdata-td3:/var/lib/taos/ - - taoslog-td3:/var/log/taos/ + - taoslog-td3:/var/log/taos/ volumes: taosdata-td1: taoslog-td1: @@ -473,18 +473,18 @@ Creating service taos_adapter ```shell $ docker stack ps taos ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -7m3sbf532bqp taos_adapter.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago -pj403n6ofmmh taos_adapter.2 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago -rxqfwsyk5q1h taos_adapter.3 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago -qj40lpxr40oc taos_adapter.4 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago -oe3455ulxpze taos_nginx.1 nginx:latest vm98 Running Running about a minute ago -o0tsg70nrrc6 taos_td-1.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +7m3sbf532bqp taos_adapter.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +pj403n6ofmmh taos_adapter.2 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +rxqfwsyk5q1h taos_adapter.3 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +qj40lpxr40oc taos_adapter.4 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago +oe3455ulxpze taos_nginx.1 nginx:latest vm98 Running Running about a minute ago +o0tsg70nrrc6 taos_td-1.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago q5m1oxs589cp taos_td-2.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago $ docker service ls ID NAME MODE REPLICAS IMAGE PORTS -ozuklorgl8bs taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0 +ozuklorgl8bs taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0 crmhdjw6vxw0 taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp -o86ngy7csv5n taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0 +o86ngy7csv5n taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0 rma040ny4tb0 taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0 ``` @@ -495,11 +495,11 @@ rma040ny4tb0 taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0 ```shell $ docker service scale taos_adapter=1 taos_adapter scaled to 1 -overall progress: 1 out of 1 tasks -1/1: running [==================================================>] +overall progress: 1 out of 1 tasks +1/1: running [==================================================>] verify: Service converged $ docker service ls -f name=taos_adapter ID NAME MODE REPLICAS IMAGE PORTS -ozuklorgl8bs taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0 +ozuklorgl8bs taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0 ``` diff --git a/examples/c/asyncdemo.c b/examples/c/asyncdemo.c index c86cd44354d5b8a4e59e8c12fde32b4c06dc376a..91ec6f24b190fdc1c4a4c7f9eba479dc2fc1125d 100644 --- a/examples/c/asyncdemo.c +++ b/examples/c/asyncdemo.c @@ -92,7 +92,7 @@ int main(int argc, char *argv[]) } // a simple way to parse input parameters - if (argc >= 3) strcpy(db, argv[2]); + if (argc >= 3) strncpy(db, argv[2], sizeof(db) - 1); if (argc >= 4) points = atoi(argv[3]); if (argc >= 5) numOfTables = atoi(argv[4]); diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index 0bfb057f207145f6fca890d15f5b8681980e340c..8be5cb4d413b9392702c628d72f27cb0662687a3 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -106,7 +106,7 @@ int32_t tBufferReserve(SBuffer *pBuffer, int64_t nData, void **ppData); // SRow ================================ int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow); -void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal); +int32_t tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal); void tRowDestroy(SRow *pRow); void tRowSort(SArray *aRowP); int32_t tRowMerge(SArray *aRowP, STSchema *pTSchema, int8_t flag); diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 7aae38f7baf51f3c7e20c59dd23b2feb28811221..46ca814e50ac6d33f24456e6ed9ecb7e8769dd41 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -172,8 +172,8 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_UPTIME_TIMER, "uptime-timer", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, "lost-consumer-clear", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL) + // TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL) + // TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL) TD_NEW_MSG_SEG(TDMT_VND_MSG) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 77a4384c3042c966a0ac72c9b526fc47ebdf462e..1d301623b1c325a7393a219140187aead37677f7 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -175,20 +175,24 @@ typedef struct { void streamFreeQitem(SStreamQueueItem* data); +#if 0 bool streamQueueResEmpty(const SStreamQueueRes* pRes); int64_t streamQueueResSize(const SStreamQueueRes* pRes); SStreamQueueNode* streamQueueResFront(SStreamQueueRes* pRes); SStreamQueueNode* streamQueueResPop(SStreamQueueRes* pRes); void streamQueueResClear(SStreamQueueRes* pRes); SStreamQueueRes streamQueueBuildRes(SStreamQueueNode* pNode); +#endif typedef struct { SStreamQueueNode* pHead; } SStreamQueue1; +#if 0 bool streamQueueHasTask(const SStreamQueue1* pQueue); int32_t streamQueuePush(SStreamQueue1* pQueue, SStreamQueueItem* pItem); SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue); +#endif typedef struct { STaosQueue* queue; @@ -636,7 +640,7 @@ void streamMetaClose(SStreamMeta* streamMeta); int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask); int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask); int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen); -SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId); +// SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId); SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 56c10f246378424eb19f429f77b89f24c7e4ade6..75860a4b1e3524c40b15793bf11056025679881e 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -119,6 +119,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_APP_IS_STARTING TAOS_DEF_ERROR_CODE(0, 0x0130) // #define TSDB_CODE_APP_IS_STOPPING TAOS_DEF_ERROR_CODE(0, 0x0131) // +#define TSDB_CODE_IVLD_DATA_FMT TAOS_DEF_ERROR_CODE(0, 0x0132) // //client #define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200) diff --git a/packaging/docker/README.md b/packaging/docker/README.md index 763ab73724587eb4dc231eb399a60937eaba6dca..4509a7a1a9ae521906e19e018a3f2037a7bb5a5f 100644 --- a/packaging/docker/README.md +++ b/packaging/docker/README.md @@ -18,65 +18,58 @@ TDengine is an open-sourced big data platform under [GNU AGPL v3.0](http://www.g ## How to use this image -### Start a TDengine instance with RESTful API exposed +### Starting TDengine -Simply, you can use `docker run` to start a TDengine instance and connect it with restful connectors(eg. [JDBC-RESTful](https://www.taosdata.com/cn/documentation/connector/java)). +The TDengine image starts with the HTTP service activated by default, using the following command: -```bash +```shell docker run -d --name tdengine -p 6041:6041 tdengine/tdengine ``` -This command starts a docker container by name `tdengine` with TDengine server running, and maps the container's HTTP port 6041 to the host's port 6041. If you have `curl` in your host, you can list the databases by the command: +The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. -```bash +```shell curl -u root:taosdata -d "show databases" localhost:6041/rest/sql ``` -You can execute the `taos` shell command in the container: +The TDengine client taos can be executed in this container to access TDengine using the following command. -```bash +```shell $ docker exec -it tdengine taos -Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - taos> show databases; - name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | -==================================================================================================================================================================================================================================================================================== - log | 2022-01-17 13:57:22.270 | 10 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | -Query OK, 1 row(s) in set (0.002843s) + name | +================================= + information_schema | + performance_schema | +Query OK, 2 row(s) in set (0.002843s) ``` -Since TDengine use container hostname to establish connections, it's a bit more complex to use TDengine CLI and native connectors(such as JDBC-JNI) with TDengine container instance. This is the recommended way to expose ports and use TDengine with docker in simple cases. If you want to use TDengine CLI or taosc/connectors smoothly outside the `tdengine` container, see next use cases that match you need. +The TDengine server running in the container uses the container's hostname to establish a connection. Using TDengine CLI or various connectors (such as JDBC-JNI) to access the TDengine inside the container from outside the container is more complicated. So the above is the simplest way to access the TDengine service in the container and is suitable for some simple scenarios. Please refer to the next section if you want to access the TDengine service in the container from outside the container using TDengine CLI or various connectors for complex scenarios. -### Start with host network +### Start TDengine on the host network -```bash +```shell docker run -d --name tdengine --network host tdengine/tdengine ``` -Starts container with `host` network will use host's hostname as fqdn instead of container id. It's much like starting natively with `systemd` in host. After installing the client, you can use `taos` shell as normal in host path. +The above command starts TDengine on the host network and uses the host's FQDN to establish a connection instead of the container's hostname. It is the equivalent of using `systemctl` to start TDengine on the host. If the TDengine client is already installed on the host, you can access it directly with the following command. -```bash +```shell $ taos -Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. - taos> show dnodes; id | end_point | vnodes | cores | status | role | create_time | offline reason | ====================================================================================================================================== - 1 | host:6030 | 1 | 8 | ready | any | 2022-01-17 22:10:32.619 | | + 1 | myhost:6030 | 1 | 8 | ready | any | 2022-01-17 22:10:32.619 | | Query OK, 1 row(s) in set (0.003233s) ``` -### Start with exposed ports and specified hostname - -Set the fqdn explicitly will help you to use in other environment or applications. We provide environment variable `TAOS_FQDN` or `fqdn` config option to explicitly set the hostname used by TDengine container instance(s). +### Start TDengine with the specified hostname and port -Use `TAOS_FQDN` variable within `docker run` command: +The `TAOS_FQDN` environment variable or the `fqdn` configuration item in `taos.cfg` allows TDengine to establish a connection at the specified hostname. This approach provides greater flexibility for deployment. -```bash +```shell docker run -d \ --name tdengine \ -e TAOS_FQDN=tdengine \ @@ -85,79 +78,58 @@ docker run -d \ tdengine/tdengine ``` -This command starts a docker container with TDengine server running and maps the container's TCP ports from 6030 to 6049 to the host's ports from 6030 to 6049 with TCP protocol and UDP ports range 6030-6039 to the host's UDP ports 6030-6039. If the host is already running TDengine server and occupying the same port(s), you need to map the container's port to a different unused port segment. (Please see TDengine 2.0 Port Description for details). In order to support TDengine clients accessing TDengine server services, both TCP and UDP ports need to be exposed by default(unless `rpcForceTcp` is set to `1`). +The above command starts a TDengine service in the container, which listens to the hostname tdengine, and maps the container's port segment 6030 to 6049 to the host's port segment 6030 to 6049 (both TCP and UDP ports need to be mapped). If the port segment is already occupied on the host, you can modify the above command to specify a free port segment on the host. If `rpcForceTcp` is set to `1`, you can map only the TCP protocol. -If you want to use TDengine CLI or native connectors([JDBC-JNI](https://www.taosdata.com/cn/documentation/connector/java), or [driver-go](https://github.com/taosdata/driver-go)), you need to make sure the `TAOS_FQDN` is resolvable at `/etc/hosts` or with custom DNS service. +Next, ensure the hostname "tdengine" is resolvable in `/etc/hosts`. -If you set the `TAOS_FQDN` to host's hostname, it will works as using `hosts` network like previous use case. Otherwise, like in `-e TAOS_FQDN=tdengine`, you can add the hostname record `tdengine` into `/etc/hosts` (use `127.0.0.1` here in host path, if use TDengine client/application in other hosts, you should set the right ip to the host eg. `192.168.10.1`(check the real ip in host with `hostname -i` or `ip route list default`) to make the TDengine endpoint resolvable): - -```bash +```shell echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts ``` -Then you can use `taos` with the host `tdengine`: +Finally, the TDengine service can be accessed from the TDengine CLI or any connector with "tdengine" as the server address. -```bash -taos -h tdengine +```shell +taos -h tdengine -P 6030 ``` -Or develop/test applications with native connectors. As in python: +If set `TAOS_FQDN` to the same hostname, the effect is the same as "Start TDengine on host network". -```python -import taos; -conn = taos.connect(host = "tdengine") -res = conn.query("show databases") -for row in res.fetch_all_into_dict(): - print(row) -``` +### Start TDengine on the specified network -See the results: - -```bash -Python 3.8.10 (default, Nov 26 2021, 20:14:08) -[GCC 9.3.0] on linux -Type "help", "copyright", "credits" or "license" for more information. ->>> import taos; ->>> conn = taos.connect(host = "tdengine") ->>> res = conn.query("show databases") ->>> for row in res.fetch_all_into_dict(): -... print(row) -... -{'name': 'log', 'created_time': datetime.datetime(2022, 1, 17, 22, 56, 2, 490000), 'ntables': 11, 'vgroups': 1, 'replica': 1, 'quorum': 1, 'days': 10, 'keep': '30', 'cache(MB)': 1, 'blocks': 3, 'minrows': 100, 'maxrows': 4096, 'wallevel': 1, 'fsync': 3000, 'comp': 2, 'cachelast': 0, 'precision': 'us', 'update': 0, 'status': 'ready'} -``` +You can also start TDengine on a specific network. Perform the following steps: -### Start with specific network +1. First, create a docker network named `td-net` -Alternatively, you can use TDengine natively by using specific network. + ```shell + docker network create td-net + ``` -First, create network for TDengine server and client/application. +2. Start TDengine -```bash -docker network create td-net -``` + Start the TDengine service on the `td-net` network with the following command: -Start TDengine instance with service name as fqdn (explicitly set with `TAOS_FQDN`): + ```shell + docker run -d --name tdengine --network td-net \ + -e TAOS_FQDN=tdengine \ + tdengine/tdengine + ``` -```bash -docker run -d --name tdengine --network td-net \ - -e TAOS_FQDN=tdengine \ - tdengine/tdengine -``` +3. Start the TDengine client in another container on the same network -Start TDengine client in another container with the specific network: + ```shell + docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine tdengine/tdengine taos + # or + #docker run --rm -it --network td-net -e tdengine/tdengine taos -h tdengine + ``` -```bash -docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine tdengine/tdengine taos -# or -docker run --rm -it --network td-net -e tdengine/tdengine taos -h tdengine -``` +### Launching a client application in a container -When you build your application with docker, you should add the TDengine client in the dockerfile, as based on `ubuntu:20.04` image, install the client like this: +If you want to start your application in a container, you need to add the corresponding dependencies on TDengine to the image as well, e.g. -```dockerfile +```docker FROM ubuntu:20.04 RUN apt-get update && apt-get install -y wget -ENV TDENGINE_VERSION=2.4.0.0 +ENV TDENGINE_VERSION=3.0.0.0 RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ @@ -169,10 +141,7 @@ RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENG #CMD ["app"] ``` -Here is an Go example app: - - - +Here is an example GO program: ```go /* @@ -181,19 +150,19 @@ Here is an Go example app: package main import ( - "database/sql" - "flag" - "fmt" - "time" + "database/sql" + "flag" + "fmt" + "time" - _ "github.com/taosdata/driver-go/v2/taosSql" + _ "github.com/taosdata/driver-go/v3/taosSql" ) type config struct { - hostName string - serverPort string - user string - password string + hostName string + serverPort string + user string + password string } var configPara config @@ -201,70 +170,67 @@ var taosDriverName = "taosSql" var url string func init() { - flag.StringVar(&configPara.hostName, "h", "", "The host to connect to TDengine server.") - flag.StringVar(&configPara.serverPort, "p", "", "The TCP/IP port number to use for the connection to TDengine server.") - flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.") - flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.") - flag.Parse() + flag.StringVar(&configPara.hostName, "h", "", "The host to connect to TDengine server.") + flag.StringVar(&configPara.serverPort, "p", "", "The TCP/IP port number to use for the connection to TDengine server.") + flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.") + flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.") + flag.Parse() } func printAllArgs() { - fmt.Printf("============= args parse result: =============\n") - fmt.Printf("hostName: %v\n", configPara.hostName) - fmt.Printf("serverPort: %v\n", configPara.serverPort) - fmt.Printf("usr: %v\n", configPara.user) - fmt.Printf("password: %v\n", configPara.password) - fmt.Printf("================================================\n") + fmt.Printf("============= args parse result: =============\n") + fmt.Printf("hostName: %v\n", configPara.hostName) + fmt.Printf("serverPort: %v\n", configPara.serverPort) + fmt.Printf("usr: %v\n", configPara.user) + fmt.Printf("password: %v\n", configPara.password) + fmt.Printf("================================================\n") } func main() { - printAllArgs() - - url = "root:taosdata@/tcp(" + configPara.hostName + ":" + configPara.serverPort + ")/" - - taos, err := sql.Open(taosDriverName, url) - checkErr(err, "open database error") - defer taos.Close() - - taos.Exec("create database if not exists test") - taos.Exec("use test") - taos.Exec("create table if not exists tb1 (ts timestamp, a int)") - _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") - checkErr(err, "failed to insert") - rows, err := taos.Query("select * from tb1") - checkErr(err, "failed to select") - - defer rows.Close() - for rows.Next() { - var r struct { - ts time.Time - a int - } - err := rows.Scan(&r.ts, &r.a) - if err != nil { - fmt.Println("scan error:\n", err) - return - } - fmt.Println(r.ts, r.a) - } + printAllArgs() + + url = "root:taosdata@/tcp(" + configPara.hostName + ":" + configPara.serverPort + ")/" + + taos, err := sql.Open(taosDriverName, url) + checkErr(err, "open database error") + defer taos.Close() + + taos.Exec("create database if not exists test") + taos.Exec("use test") + taos.Exec("create table if not exists tb1 (ts timestamp, a int)") + _, err = taos.Exec("insert into tb1 values(now, 0)(now+1s,1)(now+2s,2)(now+3s,3)") + checkErr(err, "failed to insert") + rows, err := taos.Query("select * from tb1") + checkErr(err, "failed to select") + + defer rows.Close() + for rows.Next() { + var r struct { + ts time.Time + a int + } + err := rows.Scan(&r.ts, &r.a) + if err != nil { + fmt.Println("scan error:\n", err) + return + } + fmt.Println(r.ts, r.a) + } } func checkErr(err error, prompt string) { - if err != nil { - fmt.Println("ERROR: %s\n", prompt) - panic(err) - } + if err != nil { + fmt.Println("ERROR: %s\n", prompt) + panic(err) + } } ``` - - - -Full version of dockerfile could be: +Here is the full Dockerfile: -```dockerfile +```docker FROM golang:1.17.6-buster as builder -ENV TDENGINE_VERSION=2.4.0.0 +ENV TDENGINE_VERSION=3.0.0.0 RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ @@ -274,11 +240,13 @@ RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENG WORKDIR /usr/src/app/ ENV GOPROXY="https://goproxy.io,direct" COPY ./main.go ./go.mod ./go.sum /usr/src/app/ -RUN go env && go mod tidy && go build +RUN go env +RUN go mod tidy +RUN go build FROM ubuntu:20.04 RUN apt-get update && apt-get install -y wget -ENV TDENGINE_VERSION=2.4.0.0 +ENV TDENGINE_VERSION=3.0.0.0 RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \ && cd TDengine-client-${TDENGINE_VERSION} \ @@ -291,9 +259,9 @@ COPY --from=builder /usr/src/app/app /usr/bin/ CMD ["app"] ``` -Suppose you have `main.go`, `go.mod` `go.sum`, `app.dockerfile`, build the app and run it with network `td-net`: +Now that we have `main.go`, `go.mod`, `go.sum`, `app.dockerfile`, we can build the application and start it on the `td-net` network. -```bash +```shell $ docker build -t app -f app.dockerfile $ docker run --rm --network td-net app -h tdengine -p 6030 ============= args parse result: ============= @@ -316,26 +284,18 @@ password: taosdata 2022-01-18 01:43:51.029 +0000 UTC 3 ``` -Now you must be much familiar with developing and testing with TDengine, let's see some more complex cases. +### Start the TDengine cluster with docker-compose -### Start with docker-compose with multiple nodes(instances) +1. The following docker-compose file starts a TDengine cluster with three nodes. -Start a 2-replicas-2-mnodes-2-dnodes-1-arbitrator TDengine cluster with `docker-compose` is quite simple. Save the file as `docker-compose.yml`: - -```yaml +```yml version: "3" services: - arbitrator: - image: tdengine/tdengine:$VERSION - command: tarbitrator td-1: image: tdengine/tdengine:$VERSION environment: TAOS_FQDN: "td-1" TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 volumes: - taosdata-td1:/var/lib/taos/ - taoslog-td1:/var/log/taos/ @@ -344,101 +304,95 @@ services: environment: TAOS_FQDN: "td-2" TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 volumes: - taosdata-td2:/var/lib/taos/ - taoslog-td2:/var/log/taos/ + td-3: + image: tdengine/tdengine:$VERSION + environment: + TAOS_FQDN: "td-3" + TAOS_FIRST_EP: "td-1" + volumes: + - taosdata-td3:/var/lib/taos/ + - taoslog-td3:/var/log/taos/ volumes: taosdata-td1: taoslog-td1: taosdata-td2: taoslog-td2: + taosdata-td3: + taoslog-td3: ``` -You may notice that: - -- We use `VERSION` environment variable to set `tdengine` image tag version once. -- **`TAOS_FIRST_EP`** **MUST** be set to join the newly created instances into an existing TDengine cluster. If you want more instances, use `TAOS_SECOND_EP` in case of HA(High Availability) concerns. -- `TAOS_NUM_OF_MNODES` is for setting number of mnodes for the cluster. -- `TAOS_REPLICA` set the default database replicas, `2` means there're one master and one slave copy of data. The `replica` option should be `1 <= replica <= 3`, and not greater than dnodes number. -- `TAOS_ARBITRATOR` set the arbitrator entrypoint of the cluster for failover/election stuff. It's better to use arbitrator in a two nodes cluster. -- The way to start an arbitrator service is as easy as abc: just add command name `tarbitrator`(which is the binary name of arbitrator daemon) in docker-compose service option: `command: tarbitrator`, and everything is ok now. - -Now run `docker-compose up -d` with version specified: - -```bash -$ VERSION=2.4.0.0 docker-compose up -d -Creating network "test_default" with the default driver -Creating volume "test_taosdata-td1" with default driver -Creating volume "test_taoslog-td1" with default driver -Creating volume "test_taosdata-td2" with default driver -Creating volume "test_taoslog-td2" with default driver -Creating test_td-1_1 ... done -Creating test_arbitrator_1 ... done -Creating test_td-2_1 ... done -``` +:::note -Check the status: +- The `VERSION` environment variable is used to set the tdengine image tag +- `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time + ::: -```bash -$ docker-compose ps - Name Command State Ports ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -test_arbitrator_1 /usr/bin/entrypoint.sh tar ... Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp -test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp -test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp -``` +2. Start the cluster -Check dnodes with TDengine CLI: + ```shell + $ VERSION=3.0.0.0 docker-compose up -d + Creating network "test_default" with the default driver + Creating volume "test_taosdata-td1" with default driver + Creating volume "test_taoslog-td1" with default driver + Creating volume "test_taosdata-td2" with default driver + Creating volume "test_taoslog-td2" with default driver + Creating test_td-1_1 ... done + Creating test_arbitrator_1 ... done + Creating test_td-2_1 ... done + ``` -```bash -$ docker-compose exec td-1 taos -s "show dnodes" +3. Check the status of each node + + ```shell + $ docker-compose ps + Name Command State Ports + --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + test_arbitrator_1 /usr/bin/entrypoint.sh tar ... Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp + test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp + test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp + ``` -Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. +4. Show dnodes via TDengine CLI + +```shell +$ docker-compose exec td-1 taos -s "show dnodes" taos> show dnodes - id | end_point | vnodes | cores | status | role | create_time | offline reason | + id | endpoint | vnodes | support_vnodes | status | create_time | note | ====================================================================================================================================== - 1 | td-1:6030 | 1 | 8 | ready | any | 2022-01-18 02:47:42.871 | | - 2 | td-2:6030 | 0 | 8 | ready | any | 2022-01-18 02:47:43.518 | | - 0 | arbitrator:6042 | 0 | 0 | ready | arb | 2022-01-18 02:47:43.633 | - | -Query OK, 3 row(s) in set (0.000811s) -``` + 1 | td-1:6030 | 0 | 32 | ready | 2022-08-19 07:57:29.971 | | + 2 | td-2:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.415 | | + 3 | td-3:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.417 | | +Query OK, 3 rows in database (0.021262s) -### Start a TDengine cluster with scaled taosadapter service +``` -In previous use case, you could see the way to start other services built with TDengine(`taosd` as the default command). There's another important service you should know: +## taosAdapter -> **taosAdapter** is a TDengine’s companion tool and is a bridge/adapter between TDengine cluster and application. It provides an easy-to-use and efficient way to ingest data from data collections agents(like Telegraf, StatsD, CollectD) directly. It also provides InfluxDB/OpenTSDB compatible data ingestion interface to allow InfluxDB/OpenTSDB applications to immigrate to TDengine seamlessly. +1. taosAdapter is enabled by default in the TDengine container. If you want to disable it, specify the environment variable `TAOS_DISABLE_ADAPTER=true` at startup -`taosadapter` is running inside `tdengine` image by default, you can disable it by `TAOS_DISABLE_ADAPTER=true`. Running `taosadapter` in a separate container is like how `arbitrator` does: +2. At the same time, for flexible deployment, taosAdapter can be started in a separate container -```yaml -services: - # ... - adapter: - image: tdengine/tdengine:$VERSION - command: taosadapter -``` + ```docker + services: + # ... + adapter: + image: tdengine/tdengine:$VERSION + command: taosadapter + ``` -`taosadapter` could be scaled with docker-compose, so that you can manage the `taosadapter` nodes easily. Here is an example shows 4-`taosadapter` instances in a TDengine cluster(much like previous use cases): + Suppose you want to deploy multiple taosAdapters to improve throughput and provide high availability. In that case, the recommended configuration method uses a reverse proxy such as Nginx to offer a unified access entry. For specific configuration methods, please refer to the official documentation of Nginx. Here is an example: -```yaml +```yml version: "3" networks: inter: - api: services: - arbitrator: - image: tdengine/tdengine:$VERSION - command: tarbitrator - networks: - - inter td-1: image: tdengine/tdengine:$VERSION networks: @@ -446,9 +400,6 @@ services: environment: TAOS_FQDN: "td-1" TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 volumes: - taosdata-td1:/var/lib/taos/ - taoslog-td1:/var/log/taos/ @@ -459,15 +410,12 @@ services: environment: TAOS_FQDN: "td-2" TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 volumes: - taosdata-td2:/var/lib/taos/ - taoslog-td2:/var/log/taos/ adapter: image: tdengine/tdengine:$VERSION - command: taosadapter + entrypoint: "taosadapter" networks: - inter environment: @@ -481,7 +429,6 @@ services: - adapter networks: - inter - - api ports: - 6041:6041 - 6044:6044/udp @@ -504,100 +451,14 @@ volumes: taoslog-td2: ``` -Start the cluster: - -```bash -$ VERSION=2.4.0.0 docker-compose up -d -Creating network "docker_inter" with the default driver -Creating network "docker_api" with the default driver -Creating volume "docker_taosdata-td1" with default driver -Creating volume "docker_taoslog-td1" with default driver -Creating volume "docker_taosdata-td2" with default driver -Creating volume "docker_taoslog-td2" with default driver -Creating docker_td-2_1 ... done -Creating docker_arbitrator_1 ... done -Creating docker_td-1_1 ... done -Creating docker_adapter_1 ... done -Creating docker_adapter_2 ... done -Creating docker_adapter_3 ... done -``` - -It will start a TDengine cluster with two dnodes and four taosadapter instances, expose ports 6041/tcp and 6044/udp to host. - -`6041` is the RESTful API endpoint port, you can verify that the RESTful interface taosAdapter provides working using the `curl` command. - -```bash -$ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql -{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["log","2022-01-18 04:37:42.902",16,1,1,1,10,"30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":1} -``` - -If you run curl in batch(here we use [hyperfine](https://github.com/sharkdp/hyperfine) - a command-line benchmarking tool), the requests are balanced into 4 adapter instances. - -```bash -hyperfine -m10 'curl -u root:taosdata localhost:6041/rest/sql -d "describe log.log"' -``` - -View the logs with `docker-compose logs`: - -```bash -$ docker-compose logs adapter -# some logs skipped -adapter_2 | 01/18 04:57:44.616529 00000039 TAOS_ADAPTER info "| 200 | 162.185µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18 -adapter_1 | 01/18 04:57:44.627695 00000039 TAOS_ADAPTER info "| 200 | 145.485µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=17 -adapter_3 | 01/18 04:57:44.639165 00000040 TAOS_ADAPTER info "| 200 | 146.913µs | 172.21.0.9 | POST | /rest/sql " sessionID=17 model=web -adapter_4 | 01/18 04:57:44.650829 00000039 TAOS_ADAPTER info "| 200 | 153.201µs | 172.21.0.9 | POST | /rest/sql " sessionID=17 model=web -adapter_2 | 01/18 04:57:44.662422 00000039 TAOS_ADAPTER info "| 200 | 211.393µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19 -adapter_1 | 01/18 04:57:44.673426 00000039 TAOS_ADAPTER info "| 200 | 154.714µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18 -adapter_3 | 01/18 04:57:44.684788 00000040 TAOS_ADAPTER info "| 200 | 131.876µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18 -adapter_4 | 01/18 04:57:44.696261 00000039 TAOS_ADAPTER info "| 200 | 162.173µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=18 -adapter_2 | 01/18 04:57:44.707414 00000039 TAOS_ADAPTER info "| 200 | 164.419µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20 -adapter_1 | 01/18 04:57:44.720842 00000039 TAOS_ADAPTER info "| 200 | 179.374µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19 -adapter_3 | 01/18 04:57:44.732184 00000040 TAOS_ADAPTER info "| 200 | 141.174µs | 172.21.0.9 | POST | /rest/sql " sessionID=19 model=web -adapter_4 | 01/18 04:57:44.744024 00000039 TAOS_ADAPTER info "| 200 | 159.774µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=19 -adapter_2 | 01/18 04:57:44.773732 00000039 TAOS_ADAPTER info "| 200 | 178.993µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=21 -adapter_1 | 01/18 04:57:44.796518 00000039 TAOS_ADAPTER info "| 200 | 238.24µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20 -adapter_3 | 01/18 04:57:44.810744 00000040 TAOS_ADAPTER info "| 200 | 176.133µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20 -adapter_4 | 01/18 04:57:44.826395 00000039 TAOS_ADAPTER info "| 200 | 149.215µs | 172.21.0.9 | POST | /rest/sql " model=web sessionID=20 -``` - -`6044/udp` is the [StatsD](https://github.com/statsd/statsd)-compatible port, you can verify this feature with `nc` command(usually provided by `netcat` package). - -```bash -echo "foo:1|c" | nc -u -w0 127.0.0.1 6044 -``` - -Check the result in `taos` shell with `docker-compose exec`: +## Deploy with docker swarm -```bash -$ dc exec td-1 taos +If you want to deploy a container-based TDengine cluster on multiple hosts, you can use docker swarm. First, to establish a docker swarm cluster on these hosts, please refer to the official docker documentation. -Welcome to the TDengine shell from Linux, Client Version:2.4.0.0 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. +The docker-compose file can refer to the previous section. Here is the command to start TDengine with docker swarm: -taos> show databases; - name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status | -==================================================================================================================================================================================================================================================================================== - log | 2022-01-18 04:37:42.902 | 17 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready | - statsd | 2022-01-18 04:45:02.563 | 1 | 1 | 2 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready | -Query OK, 2 row(s) in set (0.001838s) - -taos> select * from statsd.foo; - ts | value | metric_type | -======================================================================================= - 2022-01-18 04:45:02.563422822 | 1 | counter | -Query OK, 1 row(s) in set (0.003854s) -``` - -Use `docker-compose up -d adapter=1 to reduce the instances to 1 - -### Deploy TDengine cluster in Docker Swarm with `docker-compose.yml` - -If you use docker swarm mode, it will schedule arbitrator/taosd/taosadapter services into different hosts automatically. If you've no experience with k8s/kubernetes, this is the most convenient way to scale out the TDengine cluster with multiple hosts/servers. - -Use the `docker-compose.yml` file in previous use case, and deploy with `docker stack` or `docker deploy`: - -```bash -$ VERSION=2.4.0 docker stack deploy -c docker-compose.yml taos +```shell +$ VERSION=3.0.0.0 docker stack deploy -c docker-compose.yml taos Creating network taos_inter Creating network taos_api Creating service taos_arbitrator @@ -607,58 +468,40 @@ Creating service taos_adapter Creating service taos_nginx ``` -Now you've created a TDengine cluster with multiple host servers. - -Use `docker service` or `docker stack` to manage the cluster: +Checking status: - - -```bash +```shell $ docker stack ps taos ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago -3e94u72msiyg taos_adapter.1 tdengine/tdengine:2.4.0 TM1702 Running Running 56 seconds ago -100amjkwzsc6 taos_td-2.1 tdengine/tdengine:2.4.0 TM1703 Running Running about a minute ago -pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:2.4.0 TM1704 Running Running 2 minutes ago -tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:2.4.0 TM1705 Running Running 2 minutes ago -rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:2.4.0 TM1706 Running Running 56 seconds ago -i2augxamfllf taos_adapter.3 tdengine/tdengine:2.4.0 TM1707 Running Running 56 seconds ago -lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:2.4.0 TM1708 Running Running 56 seconds ago +79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago +3e94u72msiyg taos_adapter.1 tdengine/tdengine:3.0.0.0 TM1702 Running Running 56 seconds ago +100amjkwzsc6 taos_td-2.1 tdengine/tdengine:3.0.0.0 TM1703 Running Running about a minute ago +pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:3.0.0.0 TM1704 Running Running 2 minutes ago +tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:3.0.0.0 TM1705 Running Running 2 minutes ago +rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:3.0.0.0 TM1706 Running Running 56 seconds ago +i2augxamfllf taos_adapter.3 tdengine/tdengine:3.0.0.0 TM1707 Running Running 56 seconds ago +lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:3.0.0.0 TM1708 Running Running 56 seconds ago $ docker service ls ID NAME MODE REPLICAS IMAGE PORTS -561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:2.4.0 -3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:2.4.0 +561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0 +3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:3.0.0.0 d8qr52envqzu taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp -2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:2.4.0 -9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:2.4.0 +2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0 +9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0 ``` - - -It shows that there are two dnodes, one arbitrator, four taosadapter and one nginx reverse-forward service in this cluster. +From the above output, you can see two dnodes, two taosAdapters, and one Nginx reverse proxy service. -You can scale down the taosadapter replicas to `1` by `docker service`: +Next, we can reduce the number of taosAdapter services. -```bash +```shell $ docker service scale taos_adapter=1 taos_adapter scaled to 1 -overall progress: 1 out of 1 tasks -1/1: running [==================================================>] +overall progress: 1 out of 1 tasks +1/1: running [==================================================>] verify: Service converged $ docker service ls -f name=taos_adapter ID NAME MODE REPLICAS IMAGE PORTS -561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:2.4.0 -``` - -Now it remains only 1 taosadapter instance in the cluster. - -When you want to remove the cluster, just type: - -```bash -docker stack rm taos +561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0 ``` - -### Environment Variables - -When you start `tdengine` image, you can adjust the configuration of TDengine by passing environment variables on the `docker run` command line or in the docker compose file. You can use all of the environment variables that passed to taosd or taosadapter. diff --git a/packaging/docker/docker-compose.yml b/packaging/docker/docker-compose.yml deleted file mode 100644 index 301b41e7d43c2a894d866c1f0d45cf8d13328585..0000000000000000000000000000000000000000 --- a/packaging/docker/docker-compose.yml +++ /dev/null @@ -1,77 +0,0 @@ -version: "3" - -networks: - inter: - api: - -services: - arbitrator: - image: tdengine/tdengine:$VERSION - command: tarbitrator - networks: - - inter - td-1: - image: tdengine/tdengine:$VERSION - networks: - - inter - environment: - TAOS_FQDN: "td-1" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td1:/var/lib/taos/ - - taoslog-td1:/var/log/taos/ - td-2: - image: tdengine/tdengine:$VERSION - networks: - - inter - environment: - TAOS_FQDN: "td-2" - TAOS_FIRST_EP: "td-1" - TAOS_NUM_OF_MNODES: "2" - TAOS_REPLICA: "2" - TAOS_ARBITRATOR: arbitrator:6042 - volumes: - - taosdata-td2:/var/lib/taos/ - - taoslog-td2:/var/log/taos/ - adapter: - image: tdengine/tdengine:$VERSION - command: taosadapter - networks: - - inter - environment: - TAOS_FIRST_EP: "td-1" - TOAS_SECOND_EP: "td-2" - deploy: - replicas: 4 - update_config: - parallelism: 4 - nginx: - image: nginx - depends_on: - - adapter - networks: - - inter - - api - ports: - - 6041:6041 - - 6044:6044/udp - command: [ - "sh", - "-c", - "while true; - do curl -s http://adapter:6041/-/ping >/dev/null && break; - done; - printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}' - > /etc/nginx/conf.d/rest.conf; - printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}' - >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf; - nginx -g 'daemon off;'", - ] -volumes: - taosdata-td1: - taoslog-td1: - taosdata-td2: - taoslog-td2: diff --git a/packaging/docker/dockerManifest.sh b/packaging/docker/dockerManifest.sh index 8f71e30fbdca1cc9adf8e9b46c652475822e4b08..db71bf8833d3fce82a461e7021da3eaa66b0bf60 100755 --- a/packaging/docker/dockerManifest.sh +++ b/packaging/docker/dockerManifest.sh @@ -1,9 +1,9 @@ #!/bin/bash set -e #set -x -set -v +set -v -# dockerbuild.sh +# dockerbuild.sh # -n [version number] # -p [xxxx] # -V [stable | beta] @@ -28,7 +28,7 @@ do V) #echo "verType=$OPTARG" verType=$(echo $OPTARG) - ;; + ;; h) echo "Usage: `basename $0` -n [version number] " echo " -p [password for docker hub] " @@ -39,8 +39,8 @@ do a) #echo "dockerLatest=$OPTARG" dockerLatest=$(echo $OPTARG) - ;; - ?) #unknow option + ;; + ?) #unknow option echo "unkonw argument" exit 1 ;; @@ -60,7 +60,7 @@ if [ "$verType" == "stable" ]; then elif [ "$verType" == "beta" ];then verType=beta tagVal=ver-${version}-beta - dockerinput=TDengine-server-${version}-${verType}-Linux-$cpuType.tar.gz + dockerinput=TDengine-server-${version}-${verType}-Linux-$cpuType.tar.gz dockerinput_x64=TDengine-server-${version}-${verType}-Linux-amd64.tar.gz dockerim=tdengine/tdengine-beta dockeramd64=tdengine/tdengine-amd64-beta @@ -73,30 +73,30 @@ fi username="tdengine" -# generate docker verison +# generate docker version echo "generate ${dockerim}:${version}" docker manifest create -a ${dockerim}:${version} ${dockeramd64}:${version} ${dockeraarch64}:${version} docker manifest inspect ${dockerim}:${version} docker manifest rm ${dockerim}:${version} docker manifest create -a ${dockerim}:${version} ${dockeramd64}:${version} ${dockeraarch64}:${version} docker manifest inspect ${dockerim}:${version} -docker login -u ${username} -p ${passWord} +docker login -u ${username} -p ${passWord} docker manifest push ${dockerim}:${version} -# generate docker latest +# generate docker latest echo "generate ${dockerim}:latest " if [ ${dockerLatest} == 'y' ] ;then echo "docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest" docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest - docker manifest inspect ${dockerim}:latest - docker manifest rm ${dockerim}:latest + docker manifest inspect ${dockerim}:latest + docker manifest rm ${dockerim}:latest docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest docker manifest inspect ${dockerim}:latest docker login -u tdengine -p ${passWord} #replace the docker registry username and password docker manifest push ${dockerim}:latest - docker pull tdengine/tdengine:latest + docker pull tdengine/tdengine:latest fi diff --git a/packaging/docker/dockerbuild.sh b/packaging/docker/dockerbuild.sh index b02387a3d1191c09dadfa1ce21ab33183a52a7da..4b6fc8576b233192a6835d455d73baf14abc1758 100755 --- a/packaging/docker/dockerbuild.sh +++ b/packaging/docker/dockerbuild.sh @@ -74,7 +74,7 @@ do done -# Check_verison() +# Check_version() # { # } @@ -102,14 +102,14 @@ scriptDir=$(dirname $(readlink -f $0)) communityDir=${scriptDir}/../../../community DockerfilePath=${communityDir}/packaging/docker/ if [ "$cloudBuild" == "y" ]; then - comunityArchiveDir=/nas/TDengine/v$version/cloud + communityArchiveDir=/nas/TDengine/v$version/cloud Dockerfile=${communityDir}/packaging/docker/DockerfileCloud else - comunityArchiveDir=/nas/TDengine/v$version/community + communityArchiveDir=/nas/TDengine/v$version/community Dockerfile=${communityDir}/packaging/docker/Dockerfile fi cd ${scriptDir} -cp -f ${comunityArchiveDir}/${pkgFile} . +cp -f ${communityArchiveDir}/${pkgFile} . echo "dirName=${dirName}" diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index 2191c54315d6324e819337fa41db3875e1506594..07624efe0479224740ee728a734d6b8763122009 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -456,12 +456,13 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) { (*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS); int32_t len = blockEncode(pBlock, (*pRsp)->data, SHOW_VARIABLES_RESULT_COLS); + blockDataDestroy(pBlock); + if(len != rspSize - sizeof(SRetrieveTableRsp)){ uError("buildShowVariablesRsp error, len:%d != rspSize - sizeof(SRetrieveTableRsp):%" PRIu64, len, (uint64_t) (rspSize - sizeof(SRetrieveTableRsp))); return TSDB_CODE_TSC_INVALID_INPUT; } - blockDataDestroy(pBlock); return TSDB_CODE_SUCCESS; } diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c index b52af85ecf9c236e7a06f51f1c79266f78053302..2bb870837272a0422de500d56e89c7ca245a7beb 100644 --- a/source/common/src/tdataformat.c +++ b/source/common/src/tdataformat.c @@ -139,7 +139,10 @@ int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow) { nkv += tPutI16v(NULL, -pTColumn->colId); nIdx++; } else { - ASSERT(0); + if (ASSERTS(0, "invalid input")) { + code = TSDB_CODE_INVALID_PARA; + goto _exit; + } } pTColumn = (++iTColumn < pTSchema->numOfCols) ? pTSchema->columns + iTColumn : NULL; @@ -176,8 +179,10 @@ int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow) { ntp = sizeof(SRow) + BIT2_SIZE(pTSchema->numOfCols - 1) + pTSchema->flen + ntp; break; default: - ASSERT(0); - break; + if (ASSERTS(0, "impossible")) { + code = TSDB_CODE_INVALID_PARA; + goto _exit; + } } if (maxIdx <= UINT8_MAX) { nkv = sizeof(SRow) + sizeof(SKVIdx) + nIdx + nkv; @@ -306,8 +311,10 @@ int32_t tRowBuild(SArray *aColVal, const STSchema *pTSchema, SRow **ppRow) { pv = pf + pTSchema->flen; break; default: - ASSERT(0); - break; + if (ASSERTS(0, "impossible")) { + code = TSDB_CODE_INVALID_PARA; + goto _exit; + } } if (pb) { @@ -370,7 +377,7 @@ _exit: return code; } -void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { +int32_t tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { ASSERT(iCol < pTSchema->numOfCols); ASSERT(pRow->sver == pTSchema->version); @@ -381,17 +388,17 @@ void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { pColVal->type = pTColumn->type; pColVal->flag = CV_FLAG_VALUE; memcpy(&pColVal->value.val, &pRow->ts, sizeof(TSKEY)); - return; + return 0; } if (pRow->flag == HAS_NONE) { *pColVal = COL_VAL_NONE(pTColumn->colId, pTColumn->type); - return; + return 0; } if (pRow->flag == HAS_NULL) { *pColVal = COL_VAL_NULL(pTColumn->colId, pTColumn->type); - return; + return 0; } if (pRow->flag >> 4) { // KV Row @@ -440,7 +447,7 @@ void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { memcpy(&pColVal->value.val, pData, pTColumn->bytes); } } - return; + return 0; } else if (TABS(cid) < pTColumn->colId) { lidx = mid + 1; } else { @@ -492,16 +499,16 @@ void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { pv = pf + pTSchema->flen; break; default: - ASSERT(0); - break; + ASSERTS(0, "invalid row format"); + return TSDB_CODE_IVLD_DATA_FMT; } if (bv == BIT_FLG_NONE) { *pColVal = COL_VAL_NONE(pTColumn->colId, pTColumn->type); - return; + return 0; } else if (bv == BIT_FLG_NULL) { *pColVal = COL_VAL_NULL(pTColumn->colId, pTColumn->type); - return; + return 0; } pColVal->cid = pTColumn->colId; @@ -520,6 +527,8 @@ void tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) { } } } + + return 0; } void tRowDestroy(SRow *pRow) { @@ -710,7 +719,6 @@ int32_t tRowIterOpen(SRow *pRow, STSchema *pTSchema, SRowIter **ppIter) { _exit: if (code) { *ppIter = NULL; - if (pIter) taosMemoryFree(pIter); } else { *ppIter = pIter; } @@ -929,8 +937,8 @@ static int32_t tRowTupleUpsertColData(SRow *pRow, STSchema *pTSchema, SColData * pv = pf + pTSchema->flen; break; default: - ASSERT(0); - break; + ASSERTS(0, "Invalid row flag"); + return TSDB_CODE_IVLD_DATA_FMT; } while (pColData) { @@ -954,8 +962,8 @@ static int32_t tRowTupleUpsertColData(SRow *pRow, STSchema *pTSchema, SColData * bv = GET_BIT2(pb, iTColumn - 1); break; default: - ASSERT(0); - break; + ASSERTS(0, "Invalid row flag"); + return TSDB_CODE_IVLD_DATA_FMT; } if (bv == BIT_FLG_NONE) { @@ -1045,7 +1053,8 @@ static int32_t tRowKVUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *aCo } else if (pRow->flag & KV_FLG_BIG) { pData = pv + ((uint32_t *)pKVIdx->idx)[iCol]; } else { - ASSERT(0); + ASSERTS(0, "Invalid KV row format"); + return TSDB_CODE_IVLD_DATA_FMT; } int16_t cid; @@ -1579,7 +1588,7 @@ static FORCE_INLINE int32_t tColDataPutValue(SColData *pColData, uint8_t *pData, int32_t code = 0; if (IS_VAR_DATA_TYPE(pColData->type)) { - code = tRealloc((uint8_t **)(&pColData->aOffset), (pColData->nVal + 1) << 2); + code = tRealloc((uint8_t **)(&pColData->aOffset), ((int64_t)(pColData->nVal + 1)) << 2); if (code) goto _exit; pColData->aOffset[pColData->nVal] = pColData->nData; @@ -2347,35 +2356,25 @@ void tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal) { } uint8_t tColDataGetBitValue(const SColData *pColData, int32_t iVal) { - uint8_t v; switch (pColData->flag) { case HAS_NONE: - v = 0; - break; + return 0; case HAS_NULL: - v = 1; - break; + return 1; case (HAS_NULL | HAS_NONE): - v = GET_BIT1(pColData->pBitMap, iVal); - break; + return GET_BIT1(pColData->pBitMap, iVal); case HAS_VALUE: - v = 2; - break; + return 2; case (HAS_VALUE | HAS_NONE): - v = GET_BIT1(pColData->pBitMap, iVal); - if (v) v = 2; - break; + return (GET_BIT1(pColData->pBitMap, iVal)) ? 2 : 0; case (HAS_VALUE | HAS_NULL): - v = GET_BIT1(pColData->pBitMap, iVal) + 1; - break; + return GET_BIT1(pColData->pBitMap, iVal) + 1; case (HAS_VALUE | HAS_NULL | HAS_NONE): - v = GET_BIT2(pColData->pBitMap, iVal); - break; + return GET_BIT2(pColData->pBitMap, iVal); default: - ASSERT(0); - break; + ASSERTS(0, "not possible"); + return 0; } - return v; } int32_t tColDataCopy(SColData *pColDataFrom, SColData *pColData, xMallocFn xMalloc, void *arg) { diff --git a/source/dnode/mnode/impl/src/mndIndex.c b/source/dnode/mnode/impl/src/mndIndex.c index 016f01b032542aa709fb2a5ccc894027c10ce45e..8782fd823f36fdfcad114d4cf2f6bbac5d754bad 100644 --- a/source/dnode/mnode/impl/src/mndIndex.c +++ b/source/dnode/mnode/impl/src/mndIndex.c @@ -638,7 +638,7 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb } int32_t mndAddIndexImpl(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb, SIdxObj *pIdx) { // impl later - int32_t code = 0; + int32_t code = -1; SStbObj newStb = {0}; STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, "create-stb-index"); if (pTrans == NULL) goto _OVER; @@ -670,6 +670,7 @@ _OVER: } static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *req, SDbObj *pDb, SStbObj *pStb) { + int32_t code = -1; SIdxObj idxObj = {0}; memcpy(idxObj.name, req->idxName, TSDB_TABLE_FNAME_LEN); memcpy(idxObj.stb, pStb->name, TSDB_TABLE_FNAME_LEN); @@ -681,21 +682,6 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re idxObj.stbUid = pStb->uid; idxObj.dbUid = pStb->dbUid; - int32_t code = -1; - // SField *pField0 = NULL; - - // SStbObj stbObj = {0}; - // SStbObj *pNew = &stbObj; - - // taosRLockLatch(&pOld->lock); - // memcpy(&stbObj, pOld, sizeof(SStbObj)); - // taosRUnLockLatch(&pOld->lock); - - // stbObj.pColumns = NULL; - // stbObj.pTags = NULL; - // stbObj.updateTime = taosGetTimestampMs(); - // stbObj.lock = 0; - int32_t tag = mndFindSuperTableTagId(pStb, req->colName); if (tag < 0) { terrno = TSDB_CODE_MND_TAG_NOT_EXIST; diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index 75411f60896c333a84f1d945e9ec82c0a7017bbb..70ba7ed4efec7035adcb3567f21d8a6d52533400 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -133,6 +133,7 @@ static void mndCalMqRebalance(SMnode *pMnode) { } } +#if 0 static void mndStreamCheckpointTick(SMnode *pMnode, int64_t sec) { int32_t contLen = 0; void *pReq = mndBuildCheckpointTickMsg(&contLen, sec); @@ -145,6 +146,7 @@ static void mndStreamCheckpointTick(SMnode *pMnode, int64_t sec) { tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg); } } +#endif static void mndPullupTelem(SMnode *pMnode) { mTrace("pullup telem msg"); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 199266cd178d6e48c8aedfc061106dd59c1a626d..47ebdd706d010507a21d6dde71e8f1556f22b51a 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -39,7 +39,7 @@ static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pStream, SStreamObj static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq); static int32_t mndProcessDropStreamReq(SRpcMsg *pReq); static int32_t mndProcessStreamCheckpointTmr(SRpcMsg *pReq); -static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq); +// static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq); /*static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq);*/ static int32_t mndProcessStreamMetaReq(SRpcMsg *pReq); static int32_t mndGetStreamMeta(SRpcMsg *pReq, SShowObj *pShow, STableMetaRsp *pMeta); @@ -66,8 +66,8 @@ int32_t mndInitStream(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_DEPLOY_RSP, mndTransProcessRsp); mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_DROP_RSP, mndTransProcessRsp); - mndSetMsgHandle(pMnode, TDMT_MND_STREAM_CHECKPOINT_TIMER, mndProcessStreamCheckpointTmr); - mndSetMsgHandle(pMnode, TDMT_MND_STREAM_BEGIN_CHECKPOINT, mndProcessStreamDoCheckpoint); + // mndSetMsgHandle(pMnode, TDMT_MND_STREAM_CHECKPOINT_TIMER, mndProcessStreamCheckpointTmr); + // mndSetMsgHandle(pMnode, TDMT_MND_STREAM_BEGIN_CHECKPOINT, mndProcessStreamDoCheckpoint); mndSetMsgHandle(pMnode, TDMT_STREAM_TASK_REPORT_CHECKPOINT, mndTransProcessRsp); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_STREAMS, mndRetrieveStream); @@ -778,6 +778,9 @@ _OVER: tFreeStreamObj(&streamObj); return code; } + +#if 0 + static int32_t mndProcessStreamCheckpointTmr(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SSdb *pSdb = pMnode->pSdb; @@ -942,6 +945,8 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) { return 0; } +#endif + static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) { SMnode *pMnode = pReq->info.node; SStreamObj *pStream = NULL; diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index 9f57887d485d6d3674e599956453074d5edc3a33..1a98134d70c486259a659cb9771152333835b8c6 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -328,10 +328,6 @@ static int32_t tsdbSnapCmprTombData(STsdbSnapReader* pReader, uint8_t** ppData) _exit: if (code) { tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pReader->pTsdb->pVnode), __func__, lino, tstrerror(code)); - if (pData) { - taosMemoryFree(pData); - pData = NULL; - } } *ppData = pData; return code; @@ -404,7 +400,7 @@ static int32_t tsdbSnapReadTombData(STsdbSnapReader* pReader, uint8_t** ppData) } while (pDelInfo && pDelInfo->suid == pReader->tbid.suid && pDelInfo->uid == pReader->tbid.uid) { - if (taosArrayPush(pReader->aDelData, &pDelInfo->delData) < 0) { + if (taosArrayPush(pReader->aDelData, &pDelInfo->delData) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; TSDB_CHECK_CODE(code, lino, _exit); } @@ -1252,7 +1248,7 @@ static int32_t tsdbSnapWriteDelTableData(STsdbSnapWriter* pWriter, TABLEID* pId, SDelData delData; n += tGetDelData(pData + n, &delData); - if (taosArrayPush(pWriter->aDelData, &delData) < 0) { + if (taosArrayPush(pWriter->aDelData, &delData) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; TSDB_CHECK_CODE(code, lino, _exit); } @@ -1420,6 +1416,7 @@ _exit: tBlockDataDestroy(&pWriter->bData); tBlockDataDestroy(&pWriter->inData); tsdbFSDestroy(&pWriter->fs); + taosMemoryFree(pWriter); pWriter = NULL; } } else { diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c index 44887b40b787cd004771c31e19dbb62938dcad86..36834ce921923ec9ff4e10f719fa67c695985c7a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c @@ -116,12 +116,7 @@ int32_t tMapDataToArray(SMapData *pMapData, int32_t itemSize, int32_t (*tGetItem } _exit: - if (code) { - *ppArray = NULL; - if (pArray) taosArrayDestroy(pArray); - } else { - *ppArray = pArray; - } + *ppArray = pArray; return code; } @@ -1233,14 +1228,22 @@ int32_t tBlockDataInit(SBlockData *pBlockData, TABLEID *pId, STSchema *pTSchema, int32_t iColumn = 1; STColumn *pTColumn = &pTSchema->columns[iColumn]; for (int32_t iCid = 0; iCid < nCid; iCid++) { - ASSERT(pTColumn); + if (ASSERTS(pTColumn != NULL, "invalid input param")) { + code = TSDB_CODE_INVALID_PARA; + goto _exit; + } + while (pTColumn->colId < aCid[iCid]) { iColumn++; ASSERT(iColumn < pTSchema->numOfCols); pTColumn = &pTSchema->columns[iColumn]; } - ASSERT(pTColumn->colId == aCid[iCid]); + if (ASSERTS(pTColumn->colId == aCid[iCid], "invalid input param")) { + code = TSDB_CODE_INVALID_PARA; + goto _exit; + } + tColDataInit(&pBlockData->aColData[iCid], pTColumn->colId, pTColumn->type, (pTColumn->flags & COL_SMA_ON) ? 1 : 0); diff --git a/source/dnode/vnode/src/tsdb/tsdbWrite.c b/source/dnode/vnode/src/tsdb/tsdbWrite.c index 301b504346d80190653ea420c311e23ef7e7464b..bd2d26380424852f0edb709f75dc828705980345 100644 --- a/source/dnode/vnode/src/tsdb/tsdbWrite.c +++ b/source/dnode/vnode/src/tsdb/tsdbWrite.c @@ -31,7 +31,9 @@ int tsdbInsertData(STsdb *pTsdb, int64_t version, SSubmitReq2 *pMsg, SSubmitRsp2 int32_t affectedrows = 0; int32_t numOfRows = 0; - ASSERT(pTsdb->mem != NULL); + if (ASSERTS(pTsdb->mem != NULL, "vgId:%d, mem is NULL", TD_VID(pTsdb->pVnode))) { + return -1; + } if (pMsg) { arrSize = taosArrayGetSize(pMsg->aSubmitTbData); diff --git a/source/dnode/vnode/src/vnd/vnodeCompact.c b/source/dnode/vnode/src/vnd/vnodeCompact.c index 14d893042d9a6d0eefd3d0023d4d69ad670f91bf..16e39d75dce38d40940df3b4ebf886f972f2caaf 100644 --- a/source/dnode/vnode/src/vnd/vnodeCompact.c +++ b/source/dnode/vnode/src/vnd/vnodeCompact.c @@ -36,8 +36,8 @@ static int32_t vnodeCompactTask(void *param) { vnodeCommitInfo(dir); _exit: - taosMemoryFree(pInfo); tsem_post(&pInfo->pVnode->canCommit); + taosMemoryFree(pInfo); return code; } static int32_t vnodePrepareCompact(SVnode *pVnode, SCompactInfo *pInfo) { @@ -59,9 +59,17 @@ static int32_t vnodePrepareCompact(SVnode *pVnode, SCompactInfo *pInfo) { snprintf(dir, TSDB_FILENAME_LEN, "%s", pVnode->path); } - vnodeLoadInfo(dir, &info); + if (vnodeLoadInfo(dir, &info) < 0) { + code = terrno; + goto _exit; + } + info.state.commitID = pInfo->commitID; - vnodeSaveInfo(dir, &info); + + if (vnodeSaveInfo(dir, &info) < 0) { + code = terrno; + goto _exit; + } _exit: if (code) { diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 68ee50a14f7969bd4ae6f8f676f81acab45b60fa..c7d155be0d50600e419940b03297d2bad6d29528 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -48,7 +48,7 @@ int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs) { info.state.applied = -1; info.state.commitID = 0; - vInfo("vgId:%d, save config while create", pCfg->vgId); + vInfo("vgId:%d, save config while create", info.config.vgId); if (vnodeSaveInfo(dir, &info) < 0 || vnodeCommitInfo(dir) < 0) { vError("vgId:%d, failed to save vnode config since %s", pCfg ? pCfg->vgId : 0, tstrerror(terrno)); return -1; @@ -124,7 +124,7 @@ int32_t vnodeRenameVgroupId(const char *srcPath, const char *dstPath, int32_t sr while (1) { const STfsFile *tsdbFile = tfsReaddir(tsdbDir); if (tsdbFile == NULL) break; - if (tsdbFile->rname == NULL) continue; + if (tsdbFile->rname[0] == '\0') continue; tstrncpy(oldRname, tsdbFile->rname, TSDB_FILENAME_LEN); char *tsdbFilePrefixPos = strstr(oldRname, tsdbFilePrefix); diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index e57b200ab6b3da446d024bda3fb80a8749f614cb..6f9f701ac2313bab3c170f4261acb8cc31c8de64 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -141,7 +141,10 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int *(int64_t *)(pCoder->data + pCoder->pos) = uid; pCoder->pos += sizeof(int64_t); } else { - tDecodeI64(pCoder, &submitTbData.uid); + if (tDecodeI64(pCoder, &submitTbData.uid) < 0) { + code = TSDB_CODE_INVALID_MSG; + TSDB_CHECK_CODE(code, lino, _exit); + } } if (tDecodeI32v(pCoder, &submitTbData.sver) < 0) { @@ -168,6 +171,11 @@ static int32_t vnodePreProcessSubmitTbData(SVnode *pVnode, SDecoder *pCoder, int SColData colData = {0}; pCoder->pos += tGetColData(pCoder->data + pCoder->pos, &colData); + if (colData.flag != HAS_VALUE) { + code = TSDB_CODE_INVALID_MSG; + goto _exit; + } + for (int32_t iRow = 0; iRow < colData.nVal; iRow++) { if (((TSKEY *)colData.pData)[iRow] < minKey || ((TSKEY *)colData.pData)[iRow] > maxKey) { code = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE; diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index 06db2c3268a298c71d33586eead38643cae40d58..7ff8afd6a510f16c6a47df25550c9d2dbd0e3543 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -493,11 +493,9 @@ int32_t ctgCopyTbMeta(SCatalog *pCtg, SCtgTbMetaCtx *ctx, SCtgDBCache **pDb, SCt //ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache); - if (tbCache) { - CTG_UNLOCK(CTG_READ, &tbCache->metaLock); - taosHashRelease(dbCache->tbCache, tbCache); - *pTb = NULL; - } + CTG_UNLOCK(CTG_READ, &tbCache->metaLock); + taosHashRelease(dbCache->tbCache, tbCache); + *pTb = NULL; ctgDebug("Got ctb %s meta from cache, will continue to get its stb meta, type:%d, dbFName:%s", ctx->pName->tname, ctx->tbInfo.tbType, dbFName); @@ -1554,8 +1552,8 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam SCtgTbCache cache = {0}; cache.pMeta = meta; if (taosHashPut(dbCache->tbCache, tbName, strlen(tbName), &cache, sizeof(SCtgTbCache)) != 0) { - taosMemoryFree(meta); ctgError("taosHashPut new tbCache failed, dbFName:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType); + taosMemoryFree(meta); CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index f3bd91f786597ad8c0d82b3493f0087c693a5299..7b6f795ecfe64a1c445fc8c450e8967534f74ccd 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1470,8 +1470,8 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc SNode* pTable = pSelect->pFromTable; if ((NULL != pTable && (QUERY_NODE_REAL_TABLE != nodeType(pTable) || - (TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType && - TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType)))) { + (TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType && + TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType)))) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE, "%s is only supported in single table query", pFunc->functionName); } @@ -5953,6 +5953,7 @@ static int32_t adjustOrderOfProjections(STranslateContext* pCxt, SNodeList* pCol } int32_t code = TSDB_CODE_SUCCESS; + bool hasPrimaryKey = false; SNode* pCol = NULL; SNode* pProj = NULL; FORBOTH(pCol, pCols, pProj, *pProjections) { @@ -5966,6 +5967,14 @@ static int32_t adjustOrderOfProjections(STranslateContext* pCxt, SNodeList* pCol if (TSDB_CODE_SUCCESS != code) { break; } + if (PRIMARYKEY_TIMESTAMP_COL_ID == pSchema->colId) { + hasPrimaryKey = true; + } + } + + if (TSDB_CODE_SUCCESS == code && !hasPrimaryKey) { + code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM, + "primary timestamp column can not be null"); } SNodeList* pNewProjections = NULL; @@ -6008,7 +6017,15 @@ static int32_t adjustProjectionsForExistTable(STranslateContext* pCxt, SCreateSt return adjustOrderOfProjections(pCxt, pStmt->pCols, pMeta, &pSelect->pProjectionList, pReq); } +static bool isGroupIdTagStream(const STableMeta* pMeta, SNodeList* pTags) { + return (NULL == pTags && 1 == pMeta->tableInfo.numOfTags && TSDB_DATA_TYPE_UBIGINT == getTableTagSchema(pMeta)->type); +} + static int32_t adjustDataTypeOfTags(STranslateContext* pCxt, const STableMeta* pMeta, SNodeList* pTags) { + if (isGroupIdTagStream(pMeta, pTags)) { + return TSDB_CODE_SUCCESS; + } + if (getNumOfTags(pMeta) != LIST_LENGTH(pTags)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM, "Illegal number of tags"); } diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 66acadc23bd088efd40a2120b177551de100aa31..b120fecd9d8ae04da3f2d6be323176e9c08609ee 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -847,6 +847,9 @@ _return: qwBuildAndSendFetchRsp(qwMsg->msgType + 1, &qwMsg->connInfo, rsp, dataLen, code); QW_TASK_DLOG("%s send, handle:%p, code:%x - %s, dataLen:%d", TMSG_INFO(qwMsg->msgType + 1), qwMsg->connInfo.handle, code, tstrerror(code), dataLen); + } else { + qwFreeFetchRsp(rsp); + rsp = NULL; } } @@ -1217,7 +1220,7 @@ void qWorkerStopAllTasks(void *qWorkerMgmt) { QW_UPDATE_RSP_CODE(ctx, TSDB_CODE_VND_STOPPED); QW_SET_EVENT_RECEIVED(ctx, QW_EVENT_DROP); } else { - qwDropTask(QW_FPARAMS()); + (void)qwDropTask(QW_FPARAMS()); } QW_UNLOCK(QW_WRITE, &ctx->lock); diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index efd19074da1b2f51e1217b3f7ab359f2b4a33c95..670cfbead1e180061fe0f972290351125eb9852c 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#if 0 #include "streamInc.h" int32_t tEncodeSStreamCheckpointSourceReq(SEncoder* pEncoder, const SStreamCheckpointSourceReq* pReq) { @@ -192,3 +193,4 @@ int32_t streamProcessCheckpointRsp(SStreamMeta* pMeta, SStreamTask* pTask, SStre // set status normal return 0; } +#endif diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index 8a2e940563f4e2da81f69e681b9db215dffe2dd0..ffb600be6eb868f3dcd3630f96bf75782368d994 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -26,7 +26,7 @@ int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock ASSERT(pReq->blockNum == taosArrayGetSize(pReq->dataLen)); for (int32_t i = 0; i < blockNum; i++) { - SRetrieveTableRsp* pRetrieve = taosArrayGetP(pReq->data, i); + SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*) taosArrayGetP(pReq->data, i); SSDataBlock* pDataBlock = taosArrayGet(pArray, i); blockDecode(pDataBlock, pRetrieve->data); // TODO: refactor diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 5bbd60bf0d313a7a686ef43a4fb191027173b0ca..9226d6ebb8dabc349c333470b0fc2d44396c2c0b 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -114,12 +114,14 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { int32_t batchCnt = 0; while (1) { if (atomic_load_8(&pTask->taskStatus) == TASK_STATUS__DROPPING) { + taosArrayDestroy(pRes); return 0; } SSDataBlock* output = NULL; uint64_t ts = 0; if (qExecTask(exec, &output, &ts) < 0) { + taosArrayDestroy(pRes); return -1; } if (output == NULL) { diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 63527e2b1c55af8814eed0551088fa1026ff55f2..518ace86300f48e572ed0f1f6d538be7c3723b4e 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -171,6 +171,7 @@ int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask) { } #endif +#if 0 SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId) { SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &taskId, sizeof(int32_t)); if (ppTask) { @@ -180,6 +181,7 @@ SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId) { return NULL; } } +#endif SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId) { taosRLockLatch(&pMeta->lock); diff --git a/source/libs/stream/src/streamQueue.c b/source/libs/stream/src/streamQueue.c index 7eafcdc93ea20b96140391c0e555ff5caf5bff93..882fba718bb3bc6cde73cea38c151e849936a155 100644 --- a/source/libs/stream/src/streamQueue.c +++ b/source/libs/stream/src/streamQueue.c @@ -46,6 +46,7 @@ void streamQueueClose(SStreamQueue* queue) { taosMemoryFree(queue); } +#if 0 bool streamQueueResEmpty(const SStreamQueueRes* pRes) { // return true; @@ -101,3 +102,4 @@ SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue) { if (pNode) return streamQueueBuildRes(pNode); return (SStreamQueueRes){0}; } +#endif diff --git a/source/util/src/terror.c b/source/util/src/terror.c index d03cfff022190847358dabbc2e3b8b069364697f..1624aec5af4b7c96dd3c92fee075bc40a3715bfb 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -97,6 +97,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TIMEOUT_ERROR, "Operation timeout") TAOS_DEFINE_ERROR(TSDB_CODE_APP_IS_STARTING, "Database is starting up") TAOS_DEFINE_ERROR(TSDB_CODE_APP_IS_STOPPING, "Database is closing down") +TAOS_DEFINE_ERROR(TSDB_CODE_IVLD_DATA_FMT, "Invalid data format") //client TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_OPERATION, "Invalid operation")