diff --git a/docs/en/20-third-party/01-grafana.mdx b/docs/en/20-third-party/01-grafana.mdx
index 9076f163bea60e87b5d1aaed79d966572463d56d..5dbeb31a231464e48b4f977420f03f0ede81e78e 100644
--- a/docs/en/20-third-party/01-grafana.mdx
+++ b/docs/en/20-third-party/01-grafana.mdx
@@ -31,38 +31,41 @@ TDengine currently supports Grafana versions 7.5 and above. Users can go to the
### Install Grafana Plugin and Configure Data Source
-
+
-Set the url and authorization environment variables by `export` or a [`.env`(dotenv) file](https://hexdocs.pm/dotenvy/dotenv-file-format.html):
+Under Grafana 8, plugin catalog allows you to [browse and manage plugins within Grafana](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog) (but for Grafana 7.x, use **With Script** or **Install & Configure Manually**). Find the page at **Configurations > Plugins**, search **TDengine** and click it to install.
-```sh
-export TDENGINE_API=http://tdengine.local:6041
-# user + password
-export TDENGINE_USER=user
-export TDENGINE_PASSWORD=password
-
-# Other useful variables
-# - If to install TDengine data source, default is true
-export TDENGINE_DS_ENABLED=false
-# - Data source name to be created, default is TDengine
-export TDENGINE_DS_NAME=TDengine
-# - Data source organization id, default is 1
-export GF_ORG_ID=1
-# - Data source is editable in admin ui or not, default is 0 (false)
-export TDENGINE_EDITABLE=1
-```
+![Search tdengine in grafana plugins](./grafana/grafana-plugin-search-tdengine.png)
+
+Installation may cost some minutes, then you can **Create a TDengine data source**:
+
+![Install and configure Grafana data source](./grafana/grafana-install-and-config.png)
+
+Then you can add a TDengine data source by filling up the configuration options.
+
+![TDengine Database Grafana plugin add data source](./grafana/grafana-data-source.png)
+
+You can create dashboards with TDengine now.
+
+
+
-Run `install.sh`:
+On a server with Grafana installed, run `install.sh` with TDengine url and username/passwords will install TDengine data source plugin and add a data source named TDengine. This is the recommended way for Grafana 7.x or [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) users.
```sh
-bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)"
+bash -c "$(curl -fsSL \
+ https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \
+ -a http://localhost:6041 \
+ -u root \
+ -p taosdata
```
-With this script, TDengine data source plugin and the Grafana data source will be installed and created automatically with Grafana provisioning configurations. Save the script and type `./install.sh --help` for the full usage of the script.
+Restart Grafana service and open Grafana in web-browser, usually .
-And then, restart Grafana service and open Grafana in web-browser, usually .
+Save the script and type `./install.sh --help` for the full usage of the script.
+
Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation.
@@ -115,6 +118,73 @@ Click `Save & Test` to test. You should see a success message if the test worked
![TDengine Database TDinsight plugin add database 4](./grafana/add_datasource4.webp)
+
+
+
+Please refer to [Install plugins in the Docker container](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container). This will install `tdengine-datasource` plugin when Grafana container starts:
+
+```bash
+docker run -d \
+ -p 3000:3000 \
+ --name=grafana \
+ -e "GF_INSTALL_PLUGINS=tdengine-datasource" \
+ grafana/grafana
+```
+
+You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file:
+
+1. Save the provisioning configuration file to `tdengine.yml`.
+
+ ```yml
+ apiVersion: 1
+ datasources:
+ - name: TDengine
+ type: tdengine-datasource
+ orgId: 1
+ url: "$TDENGINE_API"
+ isDefault: true
+ secureJsonData:
+ url: "$TDENGINE_URL"
+ basicAuth: "$TDENGINE_BASIC_AUTH"
+ token: "$TDENGINE_CLOUD_TOKEN"
+ version: 1
+ editable: true
+ ```
+
+2. Write `docker-compose.yml` with [TDengine](https://hub.docker.com/r/tdengine/tdengine) and [Grafana](https://hub.docker.com/r/grafana/grafana) image.
+
+ ```yml
+ version: "3.7"
+
+ services:
+ tdengine:
+ image: tdengine/tdengine:2.6.0.2
+ environment:
+ TAOS_FQDN: tdengine
+ volumes:
+ - tdengine-data:/var/lib/taos/
+ grafana:
+ image: grafana/grafana:8.5.6
+ volumes:
+ - ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml
+ - grafana-data:/var/lib/grafana
+ environment:
+ # install tdengine plugin at start
+ GF_INSTALL_PLUGINS: "tdengine-datasource"
+ TDENGINE_URL: "http://tdengine:6041"
+ #printf "$TDENGINE_USER:$TDENGINE_PASSWORD" | base64
+ TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ=="
+ ports:
+ - 3000:3000
+ volumes:
+ grafana-data:
+ tdengine-data:
+ ```
+
+3. Start TDengine and Grafana services: `docker-compose up -d`.
+
+Open Grafana , and you can add dashboard with TDengine now.
+
diff --git a/docs/en/20-third-party/grafana/grafana-data-source.png b/docs/en/20-third-party/grafana/grafana-data-source.png
new file mode 100644
index 0000000000000000000000000000000000000000..989ffcca0bf5baae8798b0695e259aca35f0442a
Binary files /dev/null and b/docs/en/20-third-party/grafana/grafana-data-source.png differ
diff --git a/docs/en/20-third-party/grafana/grafana-install-and-config.png b/docs/en/20-third-party/grafana/grafana-install-and-config.png
new file mode 100644
index 0000000000000000000000000000000000000000..b918da8b2d62e694fe1797e09cf8f23f103bc97e
Binary files /dev/null and b/docs/en/20-third-party/grafana/grafana-install-and-config.png differ
diff --git a/docs/en/20-third-party/grafana/grafana-plugin-search-tdengine.png b/docs/en/20-third-party/grafana/grafana-plugin-search-tdengine.png
new file mode 100644
index 0000000000000000000000000000000000000000..cf3b66977b64f7dcd617f06024a66066cd62810e
Binary files /dev/null and b/docs/en/20-third-party/grafana/grafana-plugin-search-tdengine.png differ
diff --git a/docs/zh/20-third-party/01-grafana.mdx b/docs/zh/20-third-party/01-grafana.mdx
index 09c0d786cf5ac3fc9a6c5e6a72087be0b658454d..93090ffd38c3ce66488826c486584dd305dbc20c 100644
--- a/docs/zh/20-third-party/01-grafana.mdx
+++ b/docs/zh/20-third-party/01-grafana.mdx
@@ -29,39 +29,41 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
### 安装 Grafana Plugin 并配置数据源
-
+
-将集群信息设置为环境变量;也可以使用 `.env` 文件,请参考 [dotenv](https://hexdocs.pm/dotenvy/dotenv-file-format.html):
+使用 Grafana 最新版本(8.5+),您可以在 Grafana 中[浏览和管理插件](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog)(对于 7.x 版本,请使用 **安装脚本** 或 **手动安装并配置** 方式)。在 Grafana 管理界面中的 **Configurations > Plugins** 页面直接搜索并按照提示安装 TDengine。
-```sh
-export TDENGINE_API=http://tdengine.local:6041
-# user + password
-export TDENGINE_USER=user
-export TDENGINE_PASSWORD=password
-
-# 其他环境变量:
-# - 是否安装数据源,默认为 true,表示安装
-export TDENGINE_DS_ENABLED=false
-# - 数据源名称,默认为 TDengine
-export TDENGINE_DS_NAME=TDengine
-# - 数据源所属组织 ID,默认为 1
-export GF_ORG_ID=1
-# - 数据源是否可通过管理面板编辑,默认为 0,表示不可编辑
-export TDENGINE_EDITABLE=1
-```
+![Search tdengine in grafana plugins](grafana-plugin-search-tdengine.png)
+
+如图示即安装完毕,按照指示 **Create a TDengine data source** 添加数据源。
+
+![Install and configure Grafana data source](grafana-install-and-config.png)
+
+输入 TDengine 相关配置,完成数据源配置。
+
+![TDengine Database Grafana plugin add data source](./grafana-data-source.png)
+
+配置完毕,现在可以使用 TDengine 创建 Dashboard 了。
+
+
+
-运行安装脚本:
+对于使用 Grafana 7.x 版本或使用 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置的用户,可以在 Grafana 服务器上使用安装脚本自动安装插件即添加数据源 Provisioning 配置文件。
```sh
-bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)"
+bash -c "$(curl -fsSL \
+ https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \
+ -a http://localhost:6041 \
+ -u root \
+ -p taosdata
```
-该脚本将自动安装 Grafana 插件并配置数据源。安装完毕后,需要重启 Grafana 服务后生效。
+安装完毕后,需要重启 Grafana 服务后方可生效。
保存该脚本并执行 `./install.sh --help` 可查看详细帮助文档。
-
+
使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件[安装](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation)。
@@ -113,6 +115,73 @@ GF_INSTALL_PLUGINS=tdengine-datasource
![TDengine Database Grafana plugin add data source](./add_datasource4.webp)
+
+
+
+参考 [Grafana 容器化安装说明](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container)。使用如下命令启动一个容器,并自动安装 TDengine 插件:
+
+```bash
+docker run -d \
+ -p 3000:3000 \
+ --name=grafana \
+ -e "GF_INSTALL_PLUGINS=tdengine-datasource" \
+ grafana/grafana
+```
+
+使用 docker-compose,配置 Grafana Provisioning 自动化配置,体验 TDengine + Grafana 组合的零配置启动:
+
+1. 保存该文件为 `tdengine.yml`。
+
+ ```yml
+ apiVersion: 1
+ datasources:
+ - name: TDengine
+ type: tdengine-datasource
+ orgId: 1
+ url: "$TDENGINE_API"
+ isDefault: true
+ secureJsonData:
+ url: "$TDENGINE_URL"
+ basicAuth: "$TDENGINE_BASIC_AUTH"
+ token: "$TDENGINE_CLOUD_TOKEN"
+ version: 1
+ editable: true
+ ```
+
+2. 保存该文件为 `docker-compose.yml`。
+
+ ```yml
+ version: "3.7"
+
+ services:
+ tdengine:
+ image: tdengine/tdengine:2.6.0.2
+ environment:
+ TAOS_FQDN: tdengine
+ volumes:
+ - tdengine-data:/var/lib/taos/
+ grafana:
+ image: grafana/grafana:8.5.6
+ volumes:
+ - ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml
+ - grafana-data:/var/lib/grafana
+ environment:
+ # install tdengine plugin at start
+ GF_INSTALL_PLUGINS: "tdengine-datasource"
+ TDENGINE_URL: "http://tdengine:6041"
+ #printf "$TDENGINE_USER:$TDENGINE_PASSWORD" | base64
+ TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ=="
+ ports:
+ - 3000:3000
+ volumes:
+ grafana-data:
+ tdengine-data:
+ ```
+
+3. 使用 docker-compose 命令启动 TDengine + Grafana :`docker-compose up -d`。
+
+打开 Grafana ,现在可以添加 Dashboard 了。
+
diff --git a/docs/zh/20-third-party/grafana-data-source.png b/docs/zh/20-third-party/grafana-data-source.png
new file mode 100644
index 0000000000000000000000000000000000000000..989ffcca0bf5baae8798b0695e259aca35f0442a
Binary files /dev/null and b/docs/zh/20-third-party/grafana-data-source.png differ
diff --git a/docs/zh/20-third-party/grafana-install-and-config.png b/docs/zh/20-third-party/grafana-install-and-config.png
new file mode 100644
index 0000000000000000000000000000000000000000..b918da8b2d62e694fe1797e09cf8f23f103bc97e
Binary files /dev/null and b/docs/zh/20-third-party/grafana-install-and-config.png differ
diff --git a/docs/zh/20-third-party/grafana-plugin-search-tdengine.png b/docs/zh/20-third-party/grafana-plugin-search-tdengine.png
new file mode 100644
index 0000000000000000000000000000000000000000..cf3b66977b64f7dcd617f06024a66066cd62810e
Binary files /dev/null and b/docs/zh/20-third-party/grafana-plugin-search-tdengine.png differ
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index d61262c9dc88fce786f5ad9595792b619d8128cd..51a5b84a1c3abdd37947092c02cc5a2fc5a474fe 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -128,7 +128,7 @@ typedef struct STscObj {
int8_t connType;
int32_t acctId;
uint32_t connId;
- uint64_t id; // ref ID returned by taosAddRef
+ TAOS *id; // ref ID returned by taosAddRef
TdThreadMutex mutex; // used to protect the operation on db
int32_t numOfReqs; // number of sqlObj bound to this connection
SAppInstInfo* pAppInfo;
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index 24246e5c45316ae5e78d2d907f75289d9d3e1cbd..78454268b715d369d91872740fe8add850c2f4ac 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -38,7 +38,7 @@ static TdThreadOnce tscinit = PTHREAD_ONCE_INIT;
volatile int32_t tscInitRes = 0;
static void registerRequest(SRequestObj *pRequest) {
- STscObj *pTscObj = acquireTscObj(pRequest->pTscObj->id);
+ STscObj *pTscObj = acquireTscObj(*(int64_t*)pRequest->pTscObj->id);
assert(pTscObj != NULL);
@@ -54,7 +54,7 @@ static void registerRequest(SRequestObj *pRequest) {
int32_t currentInst = atomic_add_fetch_64((int64_t *)&pSummary->currentRequests, 1);
tscDebug("0x%" PRIx64 " new Request from connObj:0x%" PRIx64
", current:%d, app current:%d, total:%d, reqId:0x%" PRIx64,
- pRequest->self, pRequest->pTscObj->id, num, currentInst, total, pRequest->requestId);
+ pRequest->self, *(int64_t*)pRequest->pTscObj->id, num, currentInst, total, pRequest->requestId);
}
}
@@ -70,8 +70,8 @@ static void deregisterRequest(SRequestObj *pRequest) {
int64_t duration = taosGetTimestampUs() - pRequest->metric.start;
tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%" PRIu64
" ms, current:%d, app current:%d",
- pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
- releaseTscObj(pTscObj->id);
+ pRequest->self, *(int64_t*)pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
+ releaseTscObj(*(int64_t*)pTscObj->id);
}
// todo close the transporter properly
@@ -80,7 +80,7 @@ void closeTransporter(STscObj *pTscObj) {
return;
}
- tscDebug("free transporter:%p in connObj: 0x%" PRIx64, pTscObj->pAppInfo->pTransporter, pTscObj->id);
+ tscDebug("free transporter:%p in connObj: 0x%" PRIx64, pTscObj->pAppInfo->pTransporter, *(int64_t*)pTscObj->id);
rpcClose(pTscObj->pAppInfo->pTransporter);
}
@@ -128,7 +128,7 @@ void closeAllRequests(SHashObj *pRequests) {
void destroyTscObj(void *pObj) {
STscObj *pTscObj = pObj;
- SClientHbKey connKey = {.tscRid = pTscObj->id, .connType = pTscObj->connType};
+ SClientHbKey connKey = {.tscRid = *(int64_t*)pTscObj->id, .connType = pTscObj->connType};
hbDeregisterConn(pTscObj->pAppInfo->pAppHbMgr, connKey);
int64_t connNum = atomic_sub_fetch_64(&pTscObj->pAppInfo->numOfConns, 1);
closeAllRequests(pTscObj->pRequests);
@@ -137,7 +137,7 @@ void destroyTscObj(void *pObj) {
// TODO
//closeTransporter(pTscObj);
}
- tscDebug("connObj 0x%" PRIx64 " destroyed, totalConn:%" PRId64, pTscObj->id, pTscObj->pAppInfo->numOfConns);
+ tscDebug("connObj 0x%" PRIx64 " destroyed, totalConn:%" PRId64, *(int64_t*)pTscObj->id, pTscObj->pAppInfo->numOfConns);
taosThreadMutexDestroy(&pTscObj->mutex);
taosMemoryFreeClear(pTscObj);
}
@@ -166,10 +166,11 @@ void *createTscObj(const char *user, const char *auth, const char *db, int32_t c
}
taosThreadMutexInit(&pObj->mutex, NULL);
- pObj->id = taosAddRef(clientConnRefPool, pObj);
+ pObj->id = taosMemoryMalloc(sizeof(int64_t));
+ *(int64_t*)pObj->id = taosAddRef(clientConnRefPool, pObj);
pObj->schemalessType = 1;
- tscDebug("connObj created, 0x%" PRIx64, pObj->id);
+ tscDebug("connObj created, 0x%" PRIx64, *(int64_t*)pObj->id);
return pObj;
}
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index d04cc90ee44120dc9dce246fe20df1daf8c3aa26..9668d2b72c202cad01354e7457b216c210bc09d1 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -263,6 +263,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
int32_t asyncExecDdlQuery(SRequestObj* pRequest, SQuery* pQuery) {
// drop table if exists not_exists_table
if (NULL == pQuery->pCmdMsg) {
+ pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
return TSDB_CODE_SUCCESS;
}
@@ -609,6 +610,16 @@ void schedulerExecCb(SQueryResult* pResult, void* param, int32_t code) {
SRequestObj* pRequest = (SRequestObj*)param;
pRequest->code = code;
+ if (TDMT_VND_SUBMIT == pRequest->type || TDMT_VND_DELETE == pRequest->type ||
+ TDMT_VND_CREATE_TABLE == pRequest->type) {
+ pRequest->body.resInfo.numOfRows = pResult->numOfRows;
+
+ if (pRequest->body.queryJob != 0) {
+ schedulerFreeJob(pRequest->body.queryJob, 0);
+ pRequest->body.queryJob = 0;
+ }
+ }
+
tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%d - %s, reqId:0x%" PRIx64,
pRequest->self, code, tstrerror(code), pRequest->requestId);
@@ -712,7 +723,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery) {
code = asyncExecDdlQuery(pRequest, pQuery);
break;
case QUERY_EXEC_MODE_SCHEDULE: {
- SArray* pNodeList = taosArrayInit(4, sizeof(struct SQueryNodeAddr));
+ SArray* pNodeList = taosArrayInit(4, sizeof(SQueryNodeLoad));
pRequest->type = pQuery->msgType;
@@ -725,12 +736,10 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery) {
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE};
SAppInstInfo* pAppInfo = getAppInfo(pRequest);
- if (TSDB_CODE_SUCCESS == code) {
- code = qCreateQueryPlan(&cxt, &pRequest->body.pDag, pNodeList);
- if (code) {
- tscError("0x%" PRIx64 " failed to create query plan, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
- pRequest->requestId);
- }
+ code = qCreateQueryPlan(&cxt, &pRequest->body.pDag, pNodeList);
+ if (code) {
+ tscError("0x%" PRIx64 " failed to create query plan, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
+ pRequest->requestId);
}
if (TSDB_CODE_SUCCESS == code) {
@@ -927,7 +936,7 @@ STscObj* taosConnectImpl(const char* user, const char* auth, const char* db, __t
taos_close_internal(pTscObj);
pTscObj = NULL;
} else {
- tscDebug("0x%" PRIx64 " connection is opening, connId:%u, dnodeConn:%p, reqId:0x%" PRIx64, pTscObj->id,
+ tscDebug("0x%" PRIx64 " connection is opening, connId:%u, dnodeConn:%p, reqId:0x%" PRIx64, *(int64_t*)pTscObj->id,
pTscObj->connId, pTscObj->pAppInfo->pTransporter, pRequest->requestId);
destroyRequest(pRequest);
}
@@ -1090,10 +1099,10 @@ TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, cons
STscObj* pObj = taos_connect_internal(ip, user, NULL, auth, db, port, CONN_TYPE__QUERY);
if (pObj) {
- return (TAOS*)pObj->id;
+ return pObj->id;
}
- return (TAOS*)0;
+ return NULL;
}
TAOS* taos_connect_l(const char* ip, int ipLen, const char* user, int userLen, const char* pass, int passLen,
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index 9a5d5bbecaa4b0b91da751ebc98bad9cdde81278..51e05537d487fe5c64af5219974f383a9c936a64 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -99,10 +99,10 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha
STscObj* pObj = taos_connect_internal(ip, user, pass, NULL, db, port, CONN_TYPE__QUERY);
if (pObj) {
- return (TAOS*)pObj->id;
+ return pObj->id;
}
- return (TAOS*)0;
+ return NULL;
}
void taos_close_internal(void *taos) {
@@ -111,19 +111,24 @@ void taos_close_internal(void *taos) {
}
STscObj *pTscObj = (STscObj *)taos;
- tscDebug("0x%" PRIx64 " try to close connection, numOfReq:%d", pTscObj->id, pTscObj->numOfReqs);
+ tscDebug("0x%" PRIx64 " try to close connection, numOfReq:%d", *(int64_t*)pTscObj->id, pTscObj->numOfReqs);
- taosRemoveRef(clientConnRefPool, pTscObj->id);
+ taosRemoveRef(clientConnRefPool, *(int64_t*)pTscObj->id);
}
void taos_close(TAOS *taos) {
- STscObj* pObj = acquireTscObj((int64_t)taos);
+ if (taos == NULL) {
+ return;
+ }
+
+ STscObj* pObj = acquireTscObj(*(int64_t*)taos);
if (NULL == pObj) {
return;
}
taos_close_internal(pObj);
- releaseTscObj((int64_t)taos);
+ releaseTscObj(*(int64_t*)taos);
+ taosMemoryFree(taos);
}
@@ -206,7 +211,12 @@ static void syncQueryFn(void *param, void *res, int32_t code) {
}
TAOS_RES *taos_query(TAOS *taos, const char *sql) {
- STscObj* pTscObj = acquireTscObj((int64_t)taos);
+ if (NULL == taos) {
+ terrno = TSDB_CODE_TSC_DISCONNECTED;
+ return NULL;
+ }
+
+ STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
if (pTscObj == NULL || sql == NULL) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return NULL;
@@ -219,13 +229,13 @@ TAOS_RES *taos_query(TAOS *taos, const char *sql) {
taos_query_a(taos, sql, syncQueryFn, param);
tsem_wait(¶m->sem);
- releaseTscObj((int64_t)taos);
+ releaseTscObj(*(int64_t*)taos);
return param->pRequest;
#else
size_t sqlLen = strlen(sql);
if (sqlLen > (size_t)TSDB_MAX_ALLOWED_SQL_LEN) {
- releaseTscObj((int64_t)taos);
+ releaseTscObj(*(int64_t*)taos);
tscError("sql string exceeds max length:%d", TSDB_MAX_ALLOWED_SQL_LEN);
terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
return NULL;
@@ -233,7 +243,7 @@ TAOS_RES *taos_query(TAOS *taos, const char *sql) {
TAOS_RES* pRes = execQuery(pTscObj, sql, sqlLen);
- releaseTscObj((int64_t)taos);
+ releaseTscObj(*(int64_t*)taos);
return pRes;
#endif
@@ -453,15 +463,15 @@ int taos_result_precision(TAOS_RES *res) {
}
int taos_select_db(TAOS *taos, const char *db) {
- STscObj* pObj = acquireTscObj((int64_t)taos);
+ STscObj* pObj = acquireTscObj(*(int64_t*)taos);
if (pObj == NULL) {
- releaseTscObj((int64_t)taos);
+ releaseTscObj(*(int64_t*)taos);
terrno = TSDB_CODE_TSC_DISCONNECTED;
return TSDB_CODE_TSC_DISCONNECTED;
}
if (db == NULL || strlen(db) == 0) {
- releaseTscObj((int64_t)taos);
+ releaseTscObj(*(int64_t*)taos);
terrno = TSDB_CODE_TSC_INVALID_INPUT;
return terrno;
}
@@ -473,7 +483,7 @@ int taos_select_db(TAOS *taos, const char *db) {
int32_t code = taos_errno(pRequest);
taos_free_result(pRequest);
- releaseTscObj((int64_t)taos);
+ releaseTscObj(*(int64_t*)taos);
return code;
}
@@ -626,7 +636,7 @@ int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex) {
int taos_validate_sql(TAOS *taos, const char *sql) { return true; }
void taos_reset_current_db(TAOS *taos) {
- STscObj* pTscObj = acquireTscObj((int64_t)taos);
+ STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
if (pTscObj == NULL) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return;
@@ -634,17 +644,17 @@ void taos_reset_current_db(TAOS *taos) {
resetConnectDB(pTscObj);
- releaseTscObj((int64_t)taos);
+ releaseTscObj(*(int64_t*)taos);
}
const char *taos_get_server_info(TAOS *taos) {
- STscObj* pTscObj = acquireTscObj((int64_t)taos);
+ STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
if (pTscObj == NULL) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return NULL;
}
- releaseTscObj((int64_t)taos);
+ releaseTscObj(*(int64_t*)taos);
return pTscObj->ver;
}
@@ -671,6 +681,8 @@ static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) {
}
void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
+ tscDebug("enter meta callback, code %s", tstrerror(code));
+
SqlParseWrapper *pWrapper = (SqlParseWrapper *)param;
SQuery *pQuery = pWrapper->pQuery;
SRequestObj *pRequest = pWrapper->pRequest;
@@ -711,11 +723,11 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
}
void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param) {
- STscObj* pTscObj = acquireTscObj((int64_t)taos);
+ STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
if (pTscObj == NULL || sql == NULL || NULL == fp) {
terrno = TSDB_CODE_INVALID_PARA;
if (pTscObj) {
- releaseTscObj((int64_t)taos);
+ releaseTscObj(*(int64_t*)taos);
} else {
terrno = TSDB_CODE_TSC_DISCONNECTED;
}
@@ -936,7 +948,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
}
TAOS_STMT *taos_stmt_init(TAOS *taos) {
- STscObj* pObj = acquireTscObj((int64_t)taos);
+ STscObj* pObj = acquireTscObj(*(int64_t*)taos);
if (NULL == pObj) {
tscError("invalid parameter for %s", __FUNCTION__);
terrno = TSDB_CODE_TSC_DISCONNECTED;
@@ -945,7 +957,7 @@ TAOS_STMT *taos_stmt_init(TAOS *taos) {
TAOS_STMT* pStmt = stmtInit(pObj);
- releaseTscObj((int64_t)taos);
+ releaseTscObj(*(int64_t*)taos);
return pStmt;
}
diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c
index bf627ec26b7bd6527be8feac03c33bfe64751039..db8aebb32293de4e20c95b96062283d4187e867e 100644
--- a/source/client/src/clientMsgHandler.c
+++ b/source/client/src/clientMsgHandler.c
@@ -77,7 +77,7 @@ int32_t processConnectRsp(void* param, const SDataBuf* pMsg, int32_t code) {
for (int32_t i = 0; i < connectRsp.epSet.numOfEps; ++i) {
tscDebug("0x%" PRIx64 " epSet.fqdn[%d]:%s port:%d, connObj:0x%" PRIx64, pRequest->requestId, i,
- connectRsp.epSet.eps[i].fqdn, connectRsp.epSet.eps[i].port, pTscObj->id);
+ connectRsp.epSet.eps[i].fqdn, connectRsp.epSet.eps[i].port, *(int64_t*)pTscObj->id);
}
pTscObj->connId = connectRsp.connId;
@@ -90,7 +90,7 @@ int32_t processConnectRsp(void* param, const SDataBuf* pMsg, int32_t code) {
pTscObj->connType = connectRsp.connType;
- hbRegisterConn(pTscObj->pAppInfo->pAppHbMgr, pTscObj->id, connectRsp.clusterId, connectRsp.connType);
+ hbRegisterConn(pTscObj->pAppInfo->pAppHbMgr, *(int64_t*)pTscObj->id, connectRsp.clusterId, connectRsp.connType);
// pRequest->body.resInfo.pRspMsg = pMsg->pData;
tscDebug("0x%" PRIx64 " clusterId:%" PRId64 ", totalConn:%" PRId64, pRequest->requestId, connectRsp.clusterId,
diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c
index 25018e0d15a2064b02d6b10dc44e31bc5056d7c6..580aafa010f6faeaa941952599d8c2aa1a7d3dd9 100644
--- a/source/client/src/clientSml.c
+++ b/source/client/src/clientSml.c
@@ -309,7 +309,7 @@ static int32_t smlApplySchemaAction(SSmlHandle *info, SSchemaAction *action) {
case SCHEMA_ACTION_ADD_COLUMN: {
int n = sprintf(result, "alter stable `%s` add column ", action->alterSTable.sTableName);
smlBuildColumnDescription(action->alterSTable.field, result + n, capacity - n, &outBytes);
- TAOS_RES *res = taos_query((TAOS*)info->taos->id, result); // TODO async doAsyncQuery
+ TAOS_RES *res = taos_query(info->taos->id, result); // TODO async doAsyncQuery
code = taos_errno(res);
const char *errStr = taos_errstr(res);
if (code != TSDB_CODE_SUCCESS) {
@@ -323,7 +323,7 @@ static int32_t smlApplySchemaAction(SSmlHandle *info, SSchemaAction *action) {
case SCHEMA_ACTION_ADD_TAG: {
int n = sprintf(result, "alter stable `%s` add tag ", action->alterSTable.sTableName);
smlBuildColumnDescription(action->alterSTable.field, result + n, capacity - n, &outBytes);
- TAOS_RES *res = taos_query((TAOS*)info->taos->id, result); // TODO async doAsyncQuery
+ TAOS_RES *res = taos_query(info->taos->id, result); // TODO async doAsyncQuery
code = taos_errno(res);
const char *errStr = taos_errstr(res);
if (code != TSDB_CODE_SUCCESS) {
@@ -337,7 +337,7 @@ static int32_t smlApplySchemaAction(SSmlHandle *info, SSchemaAction *action) {
case SCHEMA_ACTION_CHANGE_COLUMN_SIZE: {
int n = sprintf(result, "alter stable `%s` modify column ", action->alterSTable.sTableName);
smlBuildColumnDescription(action->alterSTable.field, result + n, capacity - n, &outBytes);
- TAOS_RES *res = taos_query((TAOS*)info->taos->id, result); // TODO async doAsyncQuery
+ TAOS_RES *res = taos_query(info->taos->id, result); // TODO async doAsyncQuery
code = taos_errno(res);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " apply schema action. error : %s", info->id, taos_errstr(res));
@@ -350,7 +350,7 @@ static int32_t smlApplySchemaAction(SSmlHandle *info, SSchemaAction *action) {
case SCHEMA_ACTION_CHANGE_TAG_SIZE: {
int n = sprintf(result, "alter stable `%s` modify tag ", action->alterSTable.sTableName);
smlBuildColumnDescription(action->alterSTable.field, result + n, capacity - n, &outBytes);
- TAOS_RES *res = taos_query((TAOS*)info->taos->id, result); // TODO async doAsyncQuery
+ TAOS_RES *res = taos_query(info->taos->id, result); // TODO async doAsyncQuery
code = taos_errno(res);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " apply schema action. error : %s", info->id, taos_errstr(res));
@@ -405,7 +405,7 @@ static int32_t smlApplySchemaAction(SSmlHandle *info, SSchemaAction *action) {
pos--;
++freeBytes;
outBytes = snprintf(pos, freeBytes, ")");
- TAOS_RES *res = taos_query((TAOS*)info->taos->id, result);
+ TAOS_RES *res = taos_query(info->taos->id, result);
code = taos_errno(res);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " apply schema action. error : %s", info->id, taos_errstr(res));
@@ -2434,7 +2434,7 @@ static void smlInsertCallback(void *param, void *res, int32_t code) {
*/
TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int protocol, int precision) {
- STscObj* pTscObj = acquireTscObj((int64_t)taos);
+ STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
if (NULL == pTscObj) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
uError("SML:taos_schemaless_insert invalid taos");
@@ -2443,7 +2443,7 @@ TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int pr
SRequestObj* request = (SRequestObj*)createRequest(pTscObj, TSDB_SQL_INSERT);
if(!request){
- releaseTscObj((int64_t)taos);
+ releaseTscObj(*(int64_t*)taos);
uError("SML:taos_schemaless_insert error request is null");
return NULL;
}
@@ -2531,6 +2531,6 @@ end:
// ((STscObj *)taos)->schemalessType = 0;
pTscObj->schemalessType = 1;
uDebug("resultend:%s", request->msgBuf);
- releaseTscObj((int64_t)taos);
+ releaseTscObj(*(int64_t*)taos);
return (TAOS_RES*)request;
}
diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
index d895b73fb0425955df6f6b1565d9a6369af6fa40..a845ae7b39d2582e52a6a01cfb222d848d3abc25 100644
--- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
+++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
@@ -204,8 +204,8 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_CONTINUE, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
@@ -213,7 +213,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DEPLOY_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_DROP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c
index 99476c4cbd3a95bb1b76d8e8498fd808e5787293..f2671f6279119b614e89906ddf35c032c99811c0 100644
--- a/source/libs/planner/src/planPhysiCreater.c
+++ b/source/libs/planner/src/planPhysiCreater.c
@@ -35,7 +35,7 @@ typedef struct SPhysiPlanContext {
int32_t errCode;
int16_t nextDataBlockId;
SArray* pLocationHelper;
- SArray* pExecNodeList;
+ SArray* pExecNodeList; // SArray
} SPhysiPlanContext;
static int32_t getSlotKey(SNode* pNode, const char* pStmtName, char* pKey) {
@@ -459,7 +459,7 @@ static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubpla
}
vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode);
SQueryNodeLoad node = {.addr = pSubplan->execNode, .load = 0};
- taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode);
+ taosArrayPush(pCxt->pExecNodeList, &node);
return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTagScan, pPhyNode);
}
@@ -532,7 +532,7 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan*
if (0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_USER_TABLES)) {
vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode);
SQueryNodeLoad node = {.addr = pSubplan->execNode, .load = 0};
- taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode);
+ taosArrayPush(pCxt->pExecNodeList, &node);
} else {
SQueryNodeLoad node = {.addr = {.nodeId = MNODE_HANDLE, .epSet = pCxt->pPlanCxt->mgmtEpSet}, .load = 0};
taosArrayPush(pCxt->pExecNodeList, &node);
diff --git a/source/libs/scheduler/inc/schedulerInt.h b/source/libs/scheduler/inc/schedulerInt.h
index 545bb6c45a0277c6c47e5eff7c745a69f7f574f3..6b2570c5b70132032c7df84107aa2b294b79233a 100644
--- a/source/libs/scheduler/inc/schedulerInt.h
+++ b/source/libs/scheduler/inc/schedulerInt.h
@@ -210,7 +210,7 @@ typedef struct SSchJob {
int32_t levelNum;
int32_t taskNum;
SRequestConnInfo conn;
- SArray *nodeList; // qnode/vnode list, SArray
+ SArray *nodeList; // qnode/vnode list, SArray
SArray *levels; // starting from 0. SArray
SNodeList *subPlans; // subplan pointer copied from DAG, no need to free it in scheduler
diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c
index 733f8694cc36bc63061957fd6c01fbe745778b6f..679a244f31f4098fadd82ba203b17af955d6e3a5 100644
--- a/source/libs/scheduler/src/schJob.c
+++ b/source/libs/scheduler/src/schJob.c
@@ -614,7 +614,7 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) {
++pJob->taskNum;
}
- SCH_JOB_DLOG("level initialized, taskNum:%d", taskNum);
+ SCH_JOB_DLOG("level %d initialized, taskNum:%d", i, taskNum);
}
SCH_ERR_JRET(schBuildTaskRalation(pJob, planToTask));
@@ -636,8 +636,9 @@ int32_t schSetAddrsFromNodeList(SSchJob *pJob, SSchTask *pTask) {
nodeNum = taosArrayGetSize(pJob->nodeList);
for (int32_t i = 0; i < nodeNum && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) {
- SQueryNodeAddr *naddr = taosArrayGet(pJob->nodeList, i);
-
+ SQueryNodeLoad *nload = taosArrayGet(pJob->nodeList, i);
+ SQueryNodeAddr *naddr = &nload->addr;
+
if (NULL == taosArrayPush(pTask->candidateAddrs, naddr)) {
SCH_TASK_ELOG("taosArrayPush execNode to candidate addrs failed, addNum:%d, errno:%d", addNum, errno);
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);