Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
慢慢CG
TDengine
提交
b9d4476b
T
TDengine
项目概览
慢慢CG
/
TDengine
与 Fork 源项目一致
Fork自
taosdata / TDengine
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b9d4476b
编写于
5月 26, 2021
作者:
S
Shengliang Guan
浏览文件
操作
浏览文件
下载
差异文件
Merge from develop
上级
0542a345
cadf32d3
变更
16
显示空白变更内容
内联
并排
Showing
16 changed file
with
759 addition
and
658 deletion
+759
-658
.circleci/config.yml
.circleci/config.yml
+13
-0
.gitmodules
.gitmodules
+3
-3
cmake/version.inc
cmake/version.inc
+1
-1
documentation20/cn/12.taos-sql/01.error-code/docs.md
documentation20/cn/12.taos-sql/01.error-code/docs.md
+170
-170
documentation20/cn/12.taos-sql/docs.md
documentation20/cn/12.taos-sql/docs.md
+94
-74
documentation20/cn/13.faq/docs.md
documentation20/cn/13.faq/docs.md
+17
-14
snap/snapcraft.yaml
snap/snapcraft.yaml
+2
-2
src/connector/grafanaplugin
src/connector/grafanaplugin
+1
-1
src/connector/odbc/examples/c/main.c
src/connector/odbc/examples/c/main.c
+1
-5
src/kit/taosdemo/taosdemo.c
src/kit/taosdemo/taosdemo.c
+427
-356
src/os/src/detail/osMemory.c
src/os/src/detail/osMemory.c
+11
-11
src/os/src/windows/wSemphone.c
src/os/src/windows/wSemphone.c
+11
-13
tests/Jenkinsfile
tests/Jenkinsfile
+3
-3
tests/script/unique/cluster/cache.sim
tests/script/unique/cluster/cache.sim
+1
-1
tests/script/unique/dnode/monitor.sim
tests/script/unique/dnode/monitor.sim
+1
-1
tests/script/unique/dnode/monitor_bug.sim
tests/script/unique/dnode/monitor_bug.sim
+3
-3
未找到文件。
.circleci/config.yml
0 → 100644
浏览文件 @
b9d4476b
# Use the latest 2.1 version of CircleCI pipeline process engine. See: https://circleci.com/docs/2.0/configuration-reference
version
:
2.1
# Use a package of configuration called an orb.
orbs
:
# Declare a dependency on the welcome-orb
welcome
:
circleci/welcome-orb@0.4.1
# Orchestrate or schedule a set of jobs
workflows
:
# Name the workflow "welcome"
welcome
:
# Run the welcome/run job in its own container
jobs
:
-
welcome/run
.gitmodules
浏览文件 @
b9d4476b
[submodule "src/connector/go"]
[submodule "src/connector/go"]
path = src/connector/go
path = src/connector/go
url =
https://github.com/taosdata/driver-go
url =
git@github.com:taosdata/driver-go.git
[submodule "src/connector/grafanaplugin"]
[submodule "src/connector/grafanaplugin"]
path = src/connector/grafanaplugin
path = src/connector/grafanaplugin
url =
https://github.com/taosdata/grafanaplugin
url =
git@github.com:taosdata/grafanaplugin.git
[submodule "src/connector/hivemq-tdengine-extension"]
[submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension
path = src/connector/hivemq-tdengine-extension
url =
https://github.com/huskar-t
/hivemq-tdengine-extension.git
url =
git@github.com:taosdata
/hivemq-tdengine-extension.git
[submodule "tests/examples/rust"]
[submodule "tests/examples/rust"]
path = tests/examples/rust
path = tests/examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
url = https://github.com/songtianyi/tdengine-rust-bindings.git
cmake/version.inc
浏览文件 @
b9d4476b
...
@@ -4,7 +4,7 @@ PROJECT(TDengine)
...
@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF
(
DEFINED
VERNUMBER
)
IF
(
DEFINED
VERNUMBER
)
SET
(
TD_VER_NUMBER
$
{
VERNUMBER
})
SET
(
TD_VER_NUMBER
$
{
VERNUMBER
})
ELSE
()
ELSE
()
SET
(
TD_VER_NUMBER
"2.1.
0
.0"
)
SET
(
TD_VER_NUMBER
"2.1.
1
.0"
)
ENDIF
()
ENDIF
()
IF
(
DEFINED
VERCOMPATIBLE
)
IF
(
DEFINED
VERCOMPATIBLE
)
...
...
documentation20/cn/12.taos-sql/01.error-code/docs.md
浏览文件 @
b9d4476b
# TDengine 2.0 错误码以及对应的十进制码
# TDengine 2.0 错误码以及对应的十进制码
| 状态码 | 模 | 错误码(十六进制) | 错误描述 | 错误码(十进制) |
| 状态码 | 模 | 错误码(十六进制) | 错误描述 | 错误码(十进制) |
|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
| :-------------------------------------- | :--: | :----------------: | :------------------------------------------- | :--------------- |
|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
| TSDB_CODE_RPC_ACTION_IN_PROGRESS | 0 | 0x0001 | "Action in progress" | -2147483647 |
|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646|
| TSDB_CODE_RPC_AUTH_REQUIRED | 0 | 0x0002 | "Authentication required" | -2147483646 |
|TSDB_CODE_RPC_AUTH_FAILURE| 0| 0x0003 | "Authentication failure"| -2147483645|
| TSDB_CODE_RPC_AUTH_FAILURE | 0 | 0x0003 | "Authentication failure" | -2147483645 |
|TSDB_CODE_RPC_REDIRECT |0 | 0x0004| "Redirect"| -2147483644|
| TSDB_CODE_RPC_REDIRECT | 0 | 0x0004 | "Redirect" | -2147483644 |
|TSDB_CODE_RPC_NOT_READY| 0 | 0x0005 | "System not ready"| -2147483643|
| TSDB_CODE_RPC_NOT_READY | 0 | 0x0005 | "System not ready" | -2147483643 |
|TSDB_CODE_RPC_ALREADY_PROCESSED| 0 | 0x0006 |"Message already processed"| -2147483642|
| TSDB_CODE_RPC_ALREADY_PROCESSED | 0 | 0x0006 | "Message already processed" | -2147483642 |
|TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED| 0 |0x0007| "Last session not finished"| -2147483641|
| TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED | 0 | 0x0007 | "Last session not finished" | -2147483641 |
|TSDB_CODE_RPC_MISMATCHED_LINK_ID| 0| 0x0008 | "Mismatched meter id"| -2147483640|
| TSDB_CODE_RPC_MISMATCHED_LINK_ID | 0 | 0x0008 | "Mismatched meter id" | -2147483640 |
|TSDB_CODE_RPC_TOO_SLOW| 0 | 0x0009 | "Processing of request timed out"| -2147483639|
| TSDB_CODE_RPC_TOO_SLOW | 0 | 0x0009 | "Processing of request timed out" | -2147483639 |
|TSDB_CODE_RPC_MAX_SESSIONS| 0 | 0x000A | "Number of sessions reached limit"| -2147483638|
| TSDB_CODE_RPC_MAX_SESSIONS | 0 | 0x000A | "Number of sessions reached limit" | -2147483638 |
|TSDB_CODE_RPC_NETWORK_UNAVAIL| 0 |0x000B | "Unable to establish connection" |-2147483637|
| TSDB_CODE_RPC_NETWORK_UNAVAIL | 0 | 0x000B | "Unable to establish connection" | -2147483637 |
|TSDB_CODE_RPC_APP_ERROR| 0| 0x000C | "Unexpected generic error in RPC"| -2147483636|
| TSDB_CODE_RPC_APP_ERROR | 0 | 0x000C | "Unexpected generic error in RPC" | -2147483636 |
|TSDB_CODE_RPC_UNEXPECTED_RESPONSE| 0 |0x000D | "Unexpected response"| -2147483635|
| TSDB_CODE_RPC_UNEXPECTED_RESPONSE | 0 | 0x000D | "Unexpected response" | -2147483635 |
|TSDB_CODE_RPC_INVALID_VALUE| 0 | 0x000E | "Invalid value"| -2147483634|
| TSDB_CODE_RPC_INVALID_VALUE | 0 | 0x000E | "Invalid value" | -2147483634 |
|TSDB_CODE_RPC_INVALID_TRAN_ID| 0 | 0x000F | "Invalid transaction id"| -2147483633|
| TSDB_CODE_RPC_INVALID_TRAN_ID | 0 | 0x000F | "Invalid transaction id" | -2147483633 |
|TSDB_CODE_RPC_INVALID_SESSION_ID| 0| 0x0010 | "Invalid session id"| -2147483632|
| TSDB_CODE_RPC_INVALID_SESSION_ID | 0 | 0x0010 | "Invalid session id" | -2147483632 |
|TSDB_CODE_RPC_INVALID_MSG_TYPE| 0| 0x0011| "Invalid message type"| -2147483631|
| TSDB_CODE_RPC_INVALID_MSG_TYPE | 0 | 0x0011 | "Invalid message type" | -2147483631 |
|TSDB_CODE_RPC_INVALID_RESPONSE_TYPE| 0 | 0x0012| "Invalid response type"| -2147483630|
| TSDB_CODE_RPC_INVALID_RESPONSE_TYPE | 0 | 0x0012 | "Invalid response type" | -2147483630 |
|TSDB_CODE_RPC_INVALID_TIME_STAMP| 0| 0x0013| "Invalid timestamp"| -2147483629|
| TSDB_CODE_RPC_INVALID_TIME_STAMP | 0 | 0x0013 | "Invalid timestamp" | -2147483629 |
|TSDB_CODE_COM_OPS_NOT_SUPPORT| 0 | 0x0100| "Operation not supported"| -2147483392|
| TSDB_CODE_COM_OPS_NOT_SUPPORT | 0 | 0x0100 | "Operation not supported" | -2147483392 |
|TSDB_CODE_COM_MEMORY_CORRUPTED |0| 0x0101 | "Memory corrupted"| -2147483391|
| TSDB_CODE_COM_MEMORY_CORRUPTED | 0 | 0x0101 | "Memory corrupted" | -2147483391 |
|TSDB_CODE_COM_OUT_OF_MEMORY| 0| 0x0102| "Out of memory"| -2147483390|
| TSDB_CODE_COM_OUT_OF_MEMORY | 0 | 0x0102 | "Out of memory" | -2147483390 |
|TSDB_CODE_COM_INVALID_CFG_MSG| 0 | 0x0103| "Invalid config message"| -2147483389|
| TSDB_CODE_COM_INVALID_CFG_MSG | 0 | 0x0103 | "Invalid config message" | -2147483389 |
|TSDB_CODE_COM_FILE_CORRUPTED| 0| 0x0104| "Data file corrupted" |-2147483388|
| TSDB_CODE_COM_FILE_CORRUPTED | 0 | 0x0104 | "Data file corrupted" | -2147483388 |
|TSDB_CODE_TSC_INVALID_SQL| 0| 0x0200 | "Invalid SQL statement"| -2147483136|
| TSDB_CODE_TSC_INVALID_SQL | 0 | 0x0200 | "Invalid SQL statement" | -2147483136 |
|TSDB_CODE_TSC_INVALID_QHANDLE| 0 | 0x0201 | "Invalid qhandle"| -2147483135|
| TSDB_CODE_TSC_INVALID_QHANDLE | 0 | 0x0201 | "Invalid qhandle" | -2147483135 |
|TSDB_CODE_TSC_INVALID_TIME_STAMP| 0 | 0x0202 | "Invalid combination of client/service time"| -2147483134|
| TSDB_CODE_TSC_INVALID_TIME_STAMP | 0 | 0x0202 | "Invalid combination of client/service time" | -2147483134 |
|TSDB_CODE_TSC_INVALID_VALUE| 0 | 0x0203| "Invalid value in client"| -2147483133|
| TSDB_CODE_TSC_INVALID_VALUE | 0 | 0x0203 | "Invalid value in client" | -2147483133 |
|TSDB_CODE_TSC_INVALID_VERSION| 0 | 0x0204 | "Invalid client version" |-2147483132|
| TSDB_CODE_TSC_INVALID_VERSION | 0 | 0x0204 | "Invalid client version" | -2147483132 |
|TSDB_CODE_TSC_INVALID_IE| 0 | 0x0205 | "Invalid client ie" |-2147483131|
| TSDB_CODE_TSC_INVALID_IE | 0 | 0x0205 | "Invalid client ie" | -2147483131 |
|TSDB_CODE_TSC_INVALID_FQDN| 0 | 0x0206| "Invalid host name"| -2147483130|
| TSDB_CODE_TSC_INVALID_FQDN | 0 | 0x0206 | "Invalid host name" | -2147483130 |
|TSDB_CODE_TSC_INVALID_USER_LENGTH| 0 | 0x0207| "Invalid user name"| -2147483129|
| TSDB_CODE_TSC_INVALID_USER_LENGTH | 0 | 0x0207 | "Invalid user name" | -2147483129 |
|TSDB_CODE_TSC_INVALID_PASS_LENGTH| 0 | 0x0208 | "Invalid password"| -2147483128|
| TSDB_CODE_TSC_INVALID_PASS_LENGTH | 0 | 0x0208 | "Invalid password" | -2147483128 |
|TSDB_CODE_TSC_INVALID_DB_LENGTH| 0 | 0x0209| "Database name too long"| -2147483127|
| TSDB_CODE_TSC_INVALID_DB_LENGTH | 0 | 0x0209 | "Database name too long" | -2147483127 |
|TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH| 0 | 0x020A | "Table name too long"| -2147483126|
| TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH | 0 | 0x020A | "Table name too long" | -2147483126 |
|TSDB_CODE_TSC_INVALID_CONNECTION| 0 | 0x020B| "Invalid connection"| -2147483125|
| TSDB_CODE_TSC_INVALID_CONNECTION | 0 | 0x020B | "Invalid connection" | -2147483125 |
|TSDB_CODE_TSC_OUT_OF_MEMORY| 0 | 0x020C | "System out of memory" |-2147483124|
| TSDB_CODE_TSC_OUT_OF_MEMORY | 0 | 0x020C | "System out of memory" | -2147483124 |
|TSDB_CODE_TSC_NO_DISKSPACE| 0 | 0x020D | "System out of disk space"| -2147483123|
| TSDB_CODE_TSC_NO_DISKSPACE | 0 | 0x020D | "System out of disk space" | -2147483123 |
|TSDB_CODE_TSC_QUERY_CACHE_ERASED| 0 | 0x020E| "Query cache erased"| -2147483122|
| TSDB_CODE_TSC_QUERY_CACHE_ERASED | 0 | 0x020E | "Query cache erased" | -2147483122 |
|TSDB_CODE_TSC_QUERY_CANCELLED| 0 | 0x020F |"Query terminated"| -2147483121|
| TSDB_CODE_TSC_QUERY_CANCELLED | 0 | 0x020F | "Query terminated" | -2147483121 |
|TSDB_CODE_TSC_SORTED_RES_TOO_MANY| 0 |0x0210 | "Result set too large to be sorted"| -2147483120|
| TSDB_CODE_TSC_SORTED_RES_TOO_MANY | 0 | 0x0210 | "Result set too large to be sorted" | -2147483120 |
|TSDB_CODE_TSC_APP_ERROR| 0 | 0x0211 | "Application error"| -2147483119|
| TSDB_CODE_TSC_APP_ERROR | 0 | 0x0211 | "Application error" | -2147483119 |
|TSDB_CODE_TSC_ACTION_IN_PROGRESS| 0 |0x0212 | "Action in progress"| -2147483118|
| TSDB_CODE_TSC_ACTION_IN_PROGRESS | 0 | 0x0212 | "Action in progress" | -2147483118 |
|TSDB_CODE_TSC_DISCONNECTED| 0 | 0x0213 |"Disconnected from service" |-2147483117|
| TSDB_CODE_TSC_DISCONNECTED | 0 | 0x0213 | "Disconnected from service" | -2147483117 |
|TSDB_CODE_TSC_NO_WRITE_AUTH| 0 | 0x0214 | "No write permission" |-2147483116|
| TSDB_CODE_TSC_NO_WRITE_AUTH | 0 | 0x0214 | "No write permission" | -2147483116 |
|TSDB_CODE_MND_MSG_NOT_PROCESSED| 0| 0x0300| "Message not processed"| -2147482880|
| TSDB_CODE_MND_MSG_NOT_PROCESSED | 0 | 0x0300 | "Message not processed" | -2147482880 |
|TSDB_CODE_MND_ACTION_IN_PROGRESS| 0 | 0x0301 |"Message is progressing"| -2147482879|
| TSDB_CODE_MND_ACTION_IN_PROGRESS | 0 | 0x0301 | "Message is progressing" | -2147482879 |
|TSDB_CODE_MND_ACTION_NEED_REPROCESSED| 0 | 0x0302 |"Messag need to be reprocessed"| -2147482878|
| TSDB_CODE_MND_ACTION_NEED_REPROCESSED | 0 | 0x0302 | "Messag need to be reprocessed" | -2147482878 |
|TSDB_CODE_MND_NO_RIGHTS| 0 | 0x0303| "Insufficient privilege for operation"| -2147482877|
| TSDB_CODE_MND_NO_RIGHTS | 0 | 0x0303 | "Insufficient privilege for operation" | -2147482877 |
|TSDB_CODE_MND_APP_ERROR| 0 | 0x0304 | "Unexpected generic error in mnode"| -2147482876|
| TSDB_CODE_MND_APP_ERROR | 0 | 0x0304 | "Unexpected generic error in mnode" | -2147482876 |
|TSDB_CODE_MND_INVALID_CONNECTION| 0 | 0x0305 | "Invalid message connection"| -2147482875|
| TSDB_CODE_MND_INVALID_CONNECTION | 0 | 0x0305 | "Invalid message connection" | -2147482875 |
|TSDB_CODE_MND_INVALID_MSG_VERSION| 0 | 0x0306 | "Incompatible protocol version"| -2147482874|
| TSDB_CODE_MND_INVALID_MSG_VERSION | 0 | 0x0306 | "Incompatible protocol version" | -2147482874 |
|TSDB_CODE_MND_INVALID_MSG_LEN| 0| 0x0307 | "Invalid message length"| -2147482873|
| TSDB_CODE_MND_INVALID_MSG_LEN | 0 | 0x0307 | "Invalid message length" | -2147482873 |
|TSDB_CODE_MND_INVALID_MSG_TYPE| 0 | 0x0308 | "Invalid message type" |-2147482872|
| TSDB_CODE_MND_INVALID_MSG_TYPE | 0 | 0x0308 | "Invalid message type" | -2147482872 |
|TSDB_CODE_MND_TOO_MANY_SHELL_CONNS| 0 |0x0309 | "Too many connections"| -2147482871|
| TSDB_CODE_MND_TOO_MANY_SHELL_CONNS | 0 | 0x0309 | "Too many connections" | -2147482871 |
|TSDB_CODE_MND_OUT_OF_MEMORY| 0 |0x030A | "Out of memory in mnode"| -2147482870|
| TSDB_CODE_MND_OUT_OF_MEMORY | 0 | 0x030A | "Out of memory in mnode" | -2147482870 |
|TSDB_CODE_MND_INVALID_SHOWOBJ| 0 | 0x030B |"Data expired"| -2147482869|
| TSDB_CODE_MND_INVALID_SHOWOBJ | 0 | 0x030B | "Data expired" | -2147482869 |
|TSDB_CODE_MND_INVALID_QUERY_ID |0 | 0x030C |"Invalid query id" |-2147482868|
| TSDB_CODE_MND_INVALID_QUERY_ID | 0 | 0x030C | "Invalid query id" | -2147482868 |
|TSDB_CODE_MND_INVALID_STREAM_ID| 0 |0x030D | "Invalid stream id"| -2147482867|
| TSDB_CODE_MND_INVALID_STREAM_ID | 0 | 0x030D | "Invalid stream id" | -2147482867 |
|TSDB_CODE_MND_INVALID_CONN_ID| 0| 0x030E | "Invalid connection id" |-2147482866|
| TSDB_CODE_MND_INVALID_CONN_ID | 0 | 0x030E | "Invalid connection id" | -2147482866 |
|TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE| 0 | 0x0320| "Object already there"| -2147482848|
| TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE | 0 | 0x0320 | "Object already there" | -2147482848 |
|TSDB_CODE_MND_SDB_ERROR| 0 |0x0321 | "Unexpected generic error in sdb" |-2147482847|
| TSDB_CODE_MND_SDB_ERROR | 0 | 0x0321 | "Unexpected generic error in sdb" | -2147482847 |
|TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE| 0 | 0x0322| "Invalid table type" |-2147482846|
| TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE | 0 | 0x0322 | "Invalid table type" | -2147482846 |
|TSDB_CODE_MND_SDB_OBJ_NOT_THERE| 0 | 0x0323 |"Object not there" |-2147482845|
| TSDB_CODE_MND_SDB_OBJ_NOT_THERE | 0 | 0x0323 | "Object not there" | -2147482845 |
|TSDB_CODE_MND_SDB_INVAID_META_ROW| 0 | 0x0324| "Invalid meta row" |-2147482844|
| TSDB_CODE_MND_SDB_INVAID_META_ROW | 0 | 0x0324 | "Invalid meta row" | -2147482844 |
|TSDB_CODE_MND_SDB_INVAID_KEY_TYPE| 0 | 0x0325 |"Invalid key type" |-2147482843|
| TSDB_CODE_MND_SDB_INVAID_KEY_TYPE | 0 | 0x0325 | "Invalid key type" | -2147482843 |
|TSDB_CODE_MND_DNODE_ALREADY_EXIST| 0 | 0x0330 | "DNode already exists"| -2147482832|
| TSDB_CODE_MND_DNODE_ALREADY_EXIST | 0 | 0x0330 | "DNode already exists" | -2147482832 |
|TSDB_CODE_MND_DNODE_NOT_EXIST| 0 | 0x0331| "DNode does not exist" |-2147482831|
| TSDB_CODE_MND_DNODE_NOT_EXIST | 0 | 0x0331 | "DNode does not exist" | -2147482831 |
|TSDB_CODE_MND_VGROUP_NOT_EXIST| 0 | 0x0332 |"VGroup does not exist"| -2147482830|
| TSDB_CODE_MND_VGROUP_NOT_EXIST | 0 | 0x0332 | "VGroup does not exist" | -2147482830 |
|TSDB_CODE_MND_NO_REMOVE_MASTER |0 | 0x0333 | "Master DNode cannot be removed"| -2147482829|
| TSDB_CODE_MND_NO_REMOVE_MASTER | 0 | 0x0333 | "Master DNode cannot be removed" | -2147482829 |
|TSDB_CODE_MND_NO_ENOUGH_DNODES |0 | 0x0334| "Out of DNodes"| -2147482828|
| TSDB_CODE_MND_NO_ENOUGH_DNODES | 0 | 0x0334 | "Out of DNodes" | -2147482828 |
|TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT |0 | 0x0335 | "Cluster cfg inconsistent"| -2147482827|
| TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT | 0 | 0x0335 | "Cluster cfg inconsistent" | -2147482827 |
|TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION| 0 | 0x0336 | "Invalid dnode cfg option"| -2147482826|
| TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION | 0 | 0x0336 | "Invalid dnode cfg option" | -2147482826 |
|TSDB_CODE_MND_BALANCE_ENABLED| 0 | 0x0337 | "Balance already enabled" |-2147482825|
| TSDB_CODE_MND_BALANCE_ENABLED | 0 | 0x0337 | "Balance already enabled" | -2147482825 |
|TSDB_CODE_MND_VGROUP_NOT_IN_DNODE| 0 |0x0338 | "Vgroup not in dnode"| -2147482824|
| TSDB_CODE_MND_VGROUP_NOT_IN_DNODE | 0 | 0x0338 | "Vgroup not in dnode" | -2147482824 |
|TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE| 0 | 0x0339 | "Vgroup already in dnode"| -2147482823|
| TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE | 0 | 0x0339 | "Vgroup already in dnode" | -2147482823 |
|TSDB_CODE_MND_DNODE_NOT_FREE |0 | 0x033A |"Dnode not avaliable"| -2147482822|
| TSDB_CODE_MND_DNODE_NOT_FREE | 0 | 0x033A | "Dnode not avaliable" | -2147482822 |
|TSDB_CODE_MND_INVALID_CLUSTER_ID |0 |0x033B | "Cluster id not match"| -2147482821|
| TSDB_CODE_MND_INVALID_CLUSTER_ID | 0 | 0x033B | "Cluster id not match" | -2147482821 |
|TSDB_CODE_MND_NOT_READY| 0 | 0x033C |"Cluster not ready"| -2147482820|
| TSDB_CODE_MND_NOT_READY | 0 | 0x033C | "Cluster not ready" | -2147482820 |
|TSDB_CODE_MND_ACCT_ALREADY_EXIST| 0 | 0x0340 | "Account already exists" |-2147482816|
| TSDB_CODE_MND_ACCT_ALREADY_EXIST | 0 | 0x0340 | "Account already exists" | -2147482816 |
|TSDB_CODE_MND_INVALID_ACCT| 0 | 0x0341| "Invalid account"| -2147482815|
| TSDB_CODE_MND_INVALID_ACCT | 0 | 0x0341 | "Invalid account" | -2147482815 |
|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814|
| TSDB_CODE_MND_INVALID_ACCT_OPTION | 0 | 0x0342 | "Invalid account options" | -2147482814 |
|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800|
| TSDB_CODE_MND_USER_ALREADY_EXIST | 0 | 0x0350 | "User already exists" | -2147482800 |
|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799|
| TSDB_CODE_MND_INVALID_USER | 0 | 0x0351 | "Invalid user" | -2147482799 |
|TSDB_CODE_MND_INVALID_USER_FORMAT| 0 |0x0352 |"Invalid user format" |-2147482798|
| TSDB_CODE_MND_INVALID_USER_FORMAT | 0 | 0x0352 | "Invalid user format" | -2147482798 |
|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797|
| TSDB_CODE_MND_INVALID_PASS_FORMAT | 0 | 0x0353 | "Invalid password format" | -2147482797 |
|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796|
| TSDB_CODE_MND_NO_USER_FROM_CONN | 0 | 0x0354 | "Can not get user from conn" | -2147482796 |
|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795|
| TSDB_CODE_MND_TOO_MANY_USERS | 0 | 0x0355 | "Too many users" | -2147482795 |
|TSDB_CODE_MND_TABLE_ALREADY_EXIST| 0| 0x0360| "Table already exists"| -2147482784|
| TSDB_CODE_MND_TABLE_ALREADY_EXIST | 0 | 0x0360 | "Table already exists" | -2147482784 |
|TSDB_CODE_MND_INVALID_TABLE_ID| 0| 0x0361| "Table name too long"| -2147482783|
| TSDB_CODE_MND_INVALID_TABLE_ID | 0 | 0x0361 | "Table name too long" | -2147482783 |
|TSDB_CODE_MND_INVALID_TABLE_NAME| 0| 0x0362 | "Table does not exist"| -2147482782|
| TSDB_CODE_MND_INVALID_TABLE_NAME | 0 | 0x0362 | "Table does not exist" | -2147482782 |
|TSDB_CODE_MND_INVALID_TABLE_TYPE| 0| 0x0363 | "Invalid table type in tsdb"| -2147482781|
| TSDB_CODE_MND_INVALID_TABLE_TYPE | 0 | 0x0363 | "Invalid table type in tsdb" | -2147482781 |
|TSDB_CODE_MND_TOO_MANY_TAGS| 0 | 0x0364| "Too many tags"| -2147482780|
| TSDB_CODE_MND_TOO_MANY_TAGS | 0 | 0x0364 | "Too many tags" | -2147482780 |
|TSDB_CODE_MND_TOO_MANY_TIMESERIES| 0| 0x0366| "Too many time series"| -2147482778|
| TSDB_CODE_MND_TOO_MANY_TIMESERIES | 0 | 0x0366 | "Too many time series" | -2147482778 |
|TSDB_CODE_MND_NOT_SUPER_TABLE| 0 |0x0367| "Not super table"| -2147482777|
| TSDB_CODE_MND_NOT_SUPER_TABLE | 0 | 0x0367 | "Not super table" | -2147482777 |
|TSDB_CODE_MND_COL_NAME_TOO_LONG| 0| 0x0368| "Tag name too long"| -2147482776|
| TSDB_CODE_MND_COL_NAME_TOO_LONG | 0 | 0x0368 | "Tag name too long" | -2147482776 |
|TSDB_CODE_MND_TAG_ALREAY_EXIST| 0| 0x0369| "Tag already exists"| -2147482775|
| TSDB_CODE_MND_TAG_ALREAY_EXIST | 0 | 0x0369 | "Tag already exists" | -2147482775 |
|TSDB_CODE_MND_TAG_NOT_EXIST| 0 |0x036A | "Tag does not exist" |-2147482774|
| TSDB_CODE_MND_TAG_NOT_EXIST | 0 | 0x036A | "Tag does not exist" | -2147482774 |
|TSDB_CODE_MND_FIELD_ALREAY_EXIST| 0 | 0x036B| "Field already exists"| -2147482773|
| TSDB_CODE_MND_FIELD_ALREAY_EXIST | 0 | 0x036B | "Field already exists" | -2147482773 |
|TSDB_CODE_MND_FIELD_NOT_EXIST| 0 | 0x036C | "Field does not exist"| -2147482772|
| TSDB_CODE_MND_FIELD_NOT_EXIST | 0 | 0x036C | "Field does not exist" | -2147482772 |
|TSDB_CODE_MND_INVALID_STABLE_NAME |0 | 0x036D |"Super table does not exist" |-2147482771|
| TSDB_CODE_MND_INVALID_STABLE_NAME | 0 | 0x036D | "Super table does not exist" | -2147482771 |
|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752|
| TSDB_CODE_MND_DB_NOT_SELECTED | 0 | 0x0380 | "Database not specified or available" | -2147482752 |
|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751|
| TSDB_CODE_MND_DB_ALREADY_EXIST | 0 | 0x0381 | "Database already exists" | -2147482751 |
|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750|
| TSDB_CODE_MND_INVALID_DB_OPTION | 0 | 0x0382 | "Invalid database options" | -2147482750 |
|TSDB_CODE_MND_INVALID_DB| 0 | 0x0383 | "Invalid database name"| -2147482749|
| TSDB_CODE_MND_INVALID_DB | 0 | 0x0383 | "Invalid database name" | -2147482749 |
|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748|
| TSDB_CODE_MND_MONITOR_DB_FORBIDDEN | 0 | 0x0384 | "Cannot delete monitor database" | -2147482748 |
|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747|
| TSDB_CODE_MND_TOO_MANY_DATABASES | 0 | 0x0385 | "Too many databases for account" | -2147482747 |
|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746|
| TSDB_CODE_MND_DB_IN_DROPPING | 0 | 0x0386 | "Database not available" | -2147482746 |
|TSDB_CODE_DND_MSG_NOT_PROCESSED| 0| 0x0400 | "Message not processed"| -2147482624|
| TSDB_CODE_DND_MSG_NOT_PROCESSED | 0 | 0x0400 | "Message not processed" | -2147482624 |
|TSDB_CODE_DND_OUT_OF_MEMORY |0 | 0x0401 | "Dnode out of memory"| -2147482623|
| TSDB_CODE_DND_OUT_OF_MEMORY | 0 | 0x0401 | "Dnode out of memory" | -2147482623 |
|TSDB_CODE_DND_NO_WRITE_ACCESS| 0 | 0x0402 | "No permission for disk files in dnode"| -2147482622|
| TSDB_CODE_DND_NO_WRITE_ACCESS | 0 | 0x0402 | "No permission for disk files in dnode" | -2147482622 |
|TSDB_CODE_DND_INVALID_MSG_LEN| 0 | 0x0403 | "Invalid message length"| -2147482621|
| TSDB_CODE_DND_INVALID_MSG_LEN | 0 | 0x0403 | "Invalid message length" | -2147482621 |
|TSDB_CODE_VND_ACTION_IN_PROGRESS |0 |0x0500| "Action in progress" |-2147482368|
| TSDB_CODE_VND_ACTION_IN_PROGRESS | 0 | 0x0500 | "Action in progress" | -2147482368 |
|TSDB_CODE_VND_MSG_NOT_PROCESSED| 0 |0x0501 | "Message not processed" |-2147482367|
| TSDB_CODE_VND_MSG_NOT_PROCESSED | 0 | 0x0501 | "Message not processed" | -2147482367 |
|TSDB_CODE_VND_ACTION_NEED_REPROCESSED |0 |0x0502| "Action need to be reprocessed"| -2147482366|
| TSDB_CODE_VND_ACTION_NEED_REPROCESSED | 0 | 0x0502 | "Action need to be reprocessed" | -2147482366 |
|TSDB_CODE_VND_INVALID_VGROUP_ID |0 | 0x0503| "Invalid Vgroup ID"| -2147482365|
| TSDB_CODE_VND_INVALID_VGROUP_ID | 0 | 0x0503 | "Invalid Vgroup ID" | -2147482365 |
|TSDB_CODE_VND_INIT_FAILED| 0 | 0x0504 | "Vnode initialization failed"| -2147482364|
| TSDB_CODE_VND_INIT_FAILED | 0 | 0x0504 | "Vnode initialization failed" | -2147482364 |
|TSDB_CODE_VND_NO_DISKSPACE| 0 |0x0505| "System out of disk space" |-2147482363|
| TSDB_CODE_VND_NO_DISKSPACE | 0 | 0x0505 | "System out of disk space" | -2147482363 |
|TSDB_CODE_VND_NO_DISK_PERMISSIONS| 0 | 0x0506| "No write permission for disk files" |-2147482362|
| TSDB_CODE_VND_NO_DISK_PERMISSIONS | 0 | 0x0506 | "No write permission for disk files" | -2147482362 |
|TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR| 0 | 0x0507 | "Missing data file"| -2147482361|
| TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR | 0 | 0x0507 | "Missing data file" | -2147482361 |
|TSDB_CODE_VND_OUT_OF_MEMORY |0| 0x0508 | "Out of memory"| -2147482360|
| TSDB_CODE_VND_OUT_OF_MEMORY | 0 | 0x0508 | "Out of memory" | -2147482360 |
|TSDB_CODE_VND_APP_ERROR| 0| 0x0509 | "Unexpected generic error in vnode"| -2147482359|
| TSDB_CODE_VND_APP_ERROR | 0 | 0x0509 | "Unexpected generic error in vnode" | -2147482359 |
|TSDB_CODE_VND_INVALID_STATUS |0| 0x0510 | "Database not ready"| -2147482352|
| TSDB_CODE_VND_INVALID_STATUS | 0 | 0x0510 | "Database not ready" | -2147482352 |
|TSDB_CODE_VND_NOT_SYNCED| 0 | 0x0511 | "Database suspended"| -2147482351|
| TSDB_CODE_VND_NOT_SYNCED | 0 | 0x0511 | "Database suspended" | -2147482351 |
|TSDB_CODE_VND_NO_WRITE_AUTH| 0 | 0x0512| "Write operation denied" |-2147482350|
| TSDB_CODE_VND_NO_WRITE_AUTH | 0 | 0x0512 | "Write operation denied" | -2147482350 |
|TSDB_CODE_TDB_INVALID_TABLE_ID |0 | 0x0600 | "Invalid table ID"| -2147482112|
| TSDB_CODE_TDB_INVALID_TABLE_ID | 0 | 0x0600 | "Invalid table ID" | -2147482112 |
|TSDB_CODE_TDB_INVALID_TABLE_TYPE| 0| 0x0601 |"Invalid table type"| -2147482111|
| TSDB_CODE_TDB_INVALID_TABLE_TYPE | 0 | 0x0601 | "Invalid table type" | -2147482111 |
|TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION| 0| 0x0602| "Invalid table schema version"| -2147482110|
| TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION | 0 | 0x0602 | "Invalid table schema version" | -2147482110 |
|TSDB_CODE_TDB_TABLE_ALREADY_EXIST| 0 | 0x0603| "Table already exists"| -2147482109|
| TSDB_CODE_TDB_TABLE_ALREADY_EXIST | 0 | 0x0603 | "Table already exists" | -2147482109 |
|TSDB_CODE_TDB_INVALID_CONFIG| 0 | 0x0604| "Invalid configuration"| -2147482108|
| TSDB_CODE_TDB_INVALID_CONFIG | 0 | 0x0604 | "Invalid configuration" | -2147482108 |
|TSDB_CODE_TDB_INIT_FAILED| 0 | 0x0605| "Tsdb init failed"| -2147482107|
| TSDB_CODE_TDB_INIT_FAILED | 0 | 0x0605 | "Tsdb init failed" | -2147482107 |
|TSDB_CODE_TDB_NO_DISKSPACE| 0 | 0x0606| "No diskspace for tsdb"| -2147482106|
| TSDB_CODE_TDB_NO_DISKSPACE | 0 | 0x0606 | "No diskspace for tsdb" | -2147482106 |
|TSDB_CODE_TDB_NO_DISK_PERMISSIONS| 0 | 0x0607| "No permission for disk files"| -2147482105|
| TSDB_CODE_TDB_NO_DISK_PERMISSIONS | 0 | 0x0607 | "No permission for disk files" | -2147482105 |
|TSDB_CODE_TDB_FILE_CORRUPTED| 0 | 0x0608| "Data file(s) corrupted"| -2147482104|
| TSDB_CODE_TDB_FILE_CORRUPTED | 0 | 0x0608 | "Data file(s) corrupted" | -2147482104 |
|TSDB_CODE_TDB_OUT_OF_MEMORY| 0 | 0x0609| "Out of memory"| -2147482103|
| TSDB_CODE_TDB_OUT_OF_MEMORY | 0 | 0x0609 | "Out of memory" | -2147482103 |
|TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE| 0 | 0x060A| "Tag too old"| -2147482102|
| TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE | 0 | 0x060A | "Tag too old" | -2147482102 |
|TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE |0| 0x060B | "Timestamp data out of range"| -2147482101|
| TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE | 0 | 0x060B | "Timestamp data out of range" | -2147482101 |
|TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP| 0| 0x060C| "Submit message is messed up"| -2147482100|
| TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP | 0 | 0x060C | "Submit message is messed up" | -2147482100 |
|TSDB_CODE_TDB_INVALID_ACTION| 0 | 0x060D | "Invalid operation"| -2147482099|
| TSDB_CODE_TDB_INVALID_ACTION | 0 | 0x060D | "Invalid operation" | -2147482099 |
|TSDB_CODE_TDB_INVALID_CREATE_TB_MSG| 0 | 0x060E| "Invalid creation of table"| -2147482098|
| TSDB_CODE_TDB_INVALID_CREATE_TB_MSG | 0 | 0x060E | "Invalid creation of table" | -2147482098 |
|TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM| 0 | 0x060F| "No table data in memory skiplist" |-2147482097|
| TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM | 0 | 0x060F | "No table data in memory skiplist" | -2147482097 |
|TSDB_CODE_TDB_FILE_ALREADY_EXISTS| 0 | 0x0610| "File already exists"| -2147482096|
| TSDB_CODE_TDB_FILE_ALREADY_EXISTS | 0 | 0x0610 | "File already exists" | -2147482096 |
|TSDB_CODE_TDB_TABLE_RECONFIGURE| 0 | 0x0611| "Need to reconfigure table"| -2147482095|
| TSDB_CODE_TDB_TABLE_RECONFIGURE | 0 | 0x0611 | "Need to reconfigure table" | -2147482095 |
|TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO| 0 | 0x0612| "Invalid information to create table"| -2147482094|
| TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO | 0 | 0x0612 | "Invalid information to create table" | -2147482094 |
|TSDB_CODE_QRY_INVALID_QHANDLE| 0 | 0x0700| "Invalid handle"| -2147481856|
| TSDB_CODE_QRY_INVALID_QHANDLE | 0 | 0x0700 | "Invalid handle" | -2147481856 |
|TSDB_CODE_QRY_INVALID_MSG| 0 | 0x0701| "Invalid message"| -2147481855|
| TSDB_CODE_QRY_INVALID_MSG | 0 | 0x0701 | "Invalid message" | -2147481855 |
|TSDB_CODE_QRY_NO_DISKSPACE| 0 | 0x0702 | "No diskspace for query"| -2147481854|
| TSDB_CODE_QRY_NO_DISKSPACE | 0 | 0x0702 | "No diskspace for query" | -2147481854 |
|TSDB_CODE_QRY_OUT_OF_MEMORY| 0 | 0x0703 | "System out of memory"| -2147481853|
| TSDB_CODE_QRY_OUT_OF_MEMORY | 0 | 0x0703 | "System out of memory" | -2147481853 |
|TSDB_CODE_QRY_APP_ERROR| 0 | 0x0704 | "Unexpected generic error in query"| -2147481852|
| TSDB_CODE_QRY_APP_ERROR | 0 | 0x0704 | "Unexpected generic error in query" | -2147481852 |
|TSDB_CODE_QRY_DUP_JOIN_KEY| 0 | 0x0705| "Duplicated join key"| -2147481851|
| TSDB_CODE_QRY_DUP_JOIN_KEY | 0 | 0x0705 | "Duplicated join key" | -2147481851 |
|TSDB_CODE_QRY_EXCEED_TAGS_LIMIT| 0 | 0x0706 | "Tag conditon too many"| -2147481850|
| TSDB_CODE_QRY_EXCEED_TAGS_LIMIT | 0 | 0x0706 | "Tag conditon too many" | -2147481850 |
|TSDB_CODE_QRY_NOT_READY |0| 0x0707 | "Query not ready" |-2147481849|
| TSDB_CODE_QRY_NOT_READY | 0 | 0x0707 | "Query not ready" | -2147481849 |
|TSDB_CODE_QRY_HAS_RSP| 0 | 0x0708| "Query should response"| -2147481848|
| TSDB_CODE_QRY_HAS_RSP | 0 | 0x0708 | "Query should response" | -2147481848 |
|TSDB_CODE_GRANT_EXPIRED| 0 | 0x0800| "License expired"| -2147481600|
| TSDB_CODE_GRANT_EXPIRED | 0 | 0x0800 | "License expired" | -2147481600 |
|TSDB_CODE_GRANT_DNODE_LIMITED| 0 | 0x0801 | "DNode creation limited by licence"| -2147481599|
| TSDB_CODE_GRANT_DNODE_LIMITED | 0 | 0x0801 | "DNode creation limited by licence" | -2147481599 |
|TSDB_CODE_GRANT_ACCT_LIMITED |0| 0x0802 |"Account creation limited by license"| -2147481598|
| TSDB_CODE_GRANT_ACCT_LIMITED | 0 | 0x0802 | "Account creation limited by license" | -2147481598 |
|TSDB_CODE_GRANT_TIMESERIES_LIMITED| 0 | 0x0803 | "Table creation limited by license"| -2147481597|
| TSDB_CODE_GRANT_TIMESERIES_LIMITED | 0 | 0x0803 | "Table creation limited by license" | -2147481597 |
|TSDB_CODE_GRANT_DB_LIMITED| 0 | 0x0804 | "DB creation limited by license"| -2147481596|
| TSDB_CODE_GRANT_DB_LIMITED | 0 | 0x0804 | "DB creation limited by license" | -2147481596 |
|TSDB_CODE_GRANT_USER_LIMITED| 0 | 0x0805 | "User creation limited by license"| -2147481595|
| TSDB_CODE_GRANT_USER_LIMITED | 0 | 0x0805 | "User creation limited by license" | -2147481595 |
|TSDB_CODE_GRANT_CONN_LIMITED| 0| 0x0806 | "Conn creation limited by license" |-2147481594|
| TSDB_CODE_GRANT_CONN_LIMITED | 0 | 0x0806 | "Conn creation limited by license" | -2147481594 |
|TSDB_CODE_GRANT_STREAM_LIMITED| 0 | 0x0807 | "Stream creation limited by license"| -2147481593|
| TSDB_CODE_GRANT_STREAM_LIMITED | 0 | 0x0807 | "Stream creation limited by license" | -2147481593 |
|TSDB_CODE_GRANT_SPEED_LIMITED| 0 | 0x0808 | "Write speed limited by license" |-2147481592|
| TSDB_CODE_GRANT_SPEED_LIMITED | 0 | 0x0808 | "Write speed limited by license" | -2147481592 |
|TSDB_CODE_GRANT_STORAGE_LIMITED| 0 |0x0809 | "Storage capacity limited by license"| -2147481591|
| TSDB_CODE_GRANT_STORAGE_LIMITED | 0 | 0x0809 | "Storage capacity limited by license" | -2147481591 |
|TSDB_CODE_GRANT_QUERYTIME_LIMITED| 0 | 0x080A | "Query time limited by license" |-2147481590|
| TSDB_CODE_GRANT_QUERYTIME_LIMITED | 0 | 0x080A | "Query time limited by license" | -2147481590 |
|TSDB_CODE_GRANT_CPU_LIMITED| 0 |0x080B |"CPU cores limited by license"| -2147481589|
| TSDB_CODE_GRANT_CPU_LIMITED | 0 | 0x080B | "CPU cores limited by license" | -2147481589 |
|TSDB_CODE_SYN_INVALID_CONFIG| 0 | 0x0900| "Invalid Sync Configuration"| -2147481344|
| TSDB_CODE_SYN_INVALID_CONFIG | 0 | 0x0900 | "Invalid Sync Configuration" | -2147481344 |
|TSDB_CODE_SYN_NOT_ENABLED| 0 | 0x0901 | "Sync module not enabled" |-2147481343|
| TSDB_CODE_SYN_NOT_ENABLED | 0 | 0x0901 | "Sync module not enabled" | -2147481343 |
|TSDB_CODE_WAL_APP_ERROR| 0| 0x1000 | "Unexpected generic error in wal" |-2147479552|
| TSDB_CODE_WAL_APP_ERROR | 0 | 0x1000 | "Unexpected generic error in wal" | -2147479552 |
\ No newline at end of file
documentation20/cn/12.taos-sql/docs.md
浏览文件 @
b9d4476b
...
@@ -41,9 +41,9 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
...
@@ -41,9 +41,9 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
在TDengine中,普通表的数据模型中可使用以下 10 种数据类型。
在TDengine中,普通表的数据模型中可使用以下 10 种数据类型。
|
| 类型 | Bytes | 说明
|
|
# |
**类型**
|
**Bytes**
|
**说明**
|
| ---- | :-------: | ------ | ------------------------------------------------------------ |
| ---- | :-------: | ------ | ------------------------------------------------------------ |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18 版本开始,已经去除了这一时间范围限制) |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18
.0
版本开始,已经去除了这一时间范围限制) |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL |
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
...
@@ -53,6 +53,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
...
@@ -53,6 +53,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL |
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL |
| 9 | BOOL | 1 | 布尔型,{true, false} |
| 9 | BOOL | 1 | 布尔型,{true, false} |
| 10 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符
`\’`
。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
| 10 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符
`\’`
。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
<!-- REPLACE_OPEN_TO_ENTERPRISE__COLUMN_TYPE_ADDONS -->
**Tips**
:
**Tips**
:
1.
TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
1.
TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
...
@@ -63,11 +64,11 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
...
@@ -63,11 +64,11 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
-
**创建数据库**
-
**创建数据库**
```mysql
```mysql
CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [UPDATE 1];
CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [
DAYS days] [
UPDATE 1];
```
```
说明:
说明:
<!-- 注意:上一行中的 SQL 语句在企业版文档中会被替换,因此修改此语句的话,需要修改企业版文档的替换字典键值!! -->
1) KEEP是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据;
1) KEEP是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据;
<!-- REPLACE_OPEN_TO_ENTERPRISE__KEEP_PARAM_DESCRIPTION -->
2) UPDATE 标志数据库支持更新相同时间戳数据;
2) UPDATE 标志数据库支持更新相同时间戳数据;
...
@@ -75,7 +76,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
...
@@ -75,7 +76,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
4) 一条SQL 语句的最大长度为65480个字符;
4) 一条SQL 语句的最大长度为65480个字符;
5) 数据库还有更多与存储相关的配置参数,请参见
系统管理
。
5) 数据库还有更多与存储相关的配置参数,请参见
[
服务端配置
](
https://www.taosdata.com/cn/documentation/taos-sql#management
)
章节
。
-
**显示系统当前参数**
-
**显示系统当前参数**
...
@@ -167,22 +168,22 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
...
@@ -167,22 +168,22 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```mysql
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name TAGS (tag_value1, ...);
CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name TAGS (tag_value1, ...);
```
```
以指定的超级表为模板,指定
tags
的值来创建数据表。
以指定的超级表为模板,指定
TAGS
的值来创建数据表。
-
**以超级表为模板创建数据表,并指定具体的
tags
列**
-
**以超级表为模板创建数据表,并指定具体的
TAGS
列**
```mysql
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...);
CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...);
```
```
以指定的超级表为模板,指定一部分
tags 列的值来创建数据表。(没被指定的 tags 列会设为空值。)
以指定的超级表为模板,指定一部分
TAGS 列的值来创建数据表(没被指定的 TAGS 列会设为空值)。
说明:从 2.0.17
版本开始支持这种方式。在之前的版本中,不允许指定 tags 列,而必须显式给出所有 tags
列的取值。
说明:从 2.0.17
.0 版本开始支持这种方式。在之前的版本中,不允许指定 TAGS 列,而必须显式给出所有 TAGS
列的取值。
-
**批量创建数据表**
-
**批量创建数据表**
```mysql
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
```
```
以更快的速度批量创建大量数据表
。(服务器端 2.0.14 及以上版本)
以更快的速度批量创建大量数据表
(服务器端 2.0.14 及以上版本)。
说明:
说明:
...
@@ -220,6 +221,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
...
@@ -220,6 +221,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```mysql
```mysql
SET MAX_BINARY_DISPLAY_WIDTH <nn>;
SET MAX_BINARY_DISPLAY_WIDTH <nn>;
```
```
如显示的内容后面以...结尾时,表示该内容已被截断,可通过本命令修改显示字符宽度以显示完整的内容。
-
**获取表的结构信息**
-
**获取表的结构信息**
...
@@ -236,14 +238,14 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
...
@@ -236,14 +238,14 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
1) 列的最大个数为1024,最小个数为2;
1) 列的最大个数为1024,最小个数为2;
2) 列名最大长度为64
;
2) 列名最大长度为64
。
-
**表删除列**
-
**表删除列**
```mysql
```mysql
ALTER TABLE tb_name DROP COLUMN field_name;
ALTER TABLE tb_name DROP COLUMN field_name;
```
```
如果表是通过
[超级表](../super-table/)创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构
如果表是通过
超级表创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构。
## <a class="anchor" id="super-table"></a>超级表STable管理
## <a class="anchor" id="super-table"></a>超级表STable管理
...
@@ -254,7 +256,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
...
@@ -254,7 +256,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```mysql
```mysql
CREATE STABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]);
CREATE STABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]);
```
```
创建 STable,与创建表的 SQL 语法相似,但需指定 TAGS 字段的名称和类型
创建 STable,与创建表的 SQL 语法相似,但需
要
指定 TAGS 字段的名称和类型
说明:
说明:
...
@@ -276,7 +278,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
...
@@ -276,7 +278,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
-
**显示当前数据库下的所有超级表信息**
-
**显示当前数据库下的所有超级表信息**
```mysql
```mysql
SHOW STABLES [LIKE tb_name_wildcar];
SHOW STABLES [LIKE tb_name_wildcar
d
];
```
```
查看数据库内全部 STable,及其相关信息,包括 STable 的名称、创建时间、列数量、标签(TAG)数量、通过该 STable 建表的数量。
查看数据库内全部 STable,及其相关信息,包括 STable 的名称、创建时间、列数量、标签(TAG)数量、通过该 STable 建表的数量。
...
@@ -341,7 +343,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
...
@@ -341,7 +343,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```
mysql
```
mysql
INSERT INTO tb_name VALUES (field_value, ...);
INSERT INTO tb_name VALUES (field_value, ...);
```
```
向表tb_name中插入一条记录
向表tb_name中插入一条记录
。
-
**插入一条记录,数据对应到指定的列**
-
**插入一条记录,数据对应到指定的列**
```
mysql
```
mysql
...
@@ -353,42 +355,51 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
...
@@ -353,42 +355,51 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```
mysql
```
mysql
INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
```
向表tb_name中插入多条记录
向表tb_name中插入多条记录
。
**注意**
:在使用“插入多条记录”方式写入数据时,不能把第一列的时间戳取值都设为now,否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。
**注意**
:在使用“插入多条记录”方式写入数据时,不能把第一列的时间戳取值都设为now,否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。
-
**按指定的列插入多条记录**
-
**按指定的列插入多条记录**
```
mysql
```
mysql
INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
```
向表tb_name中按指定的列插入多条记录
向表tb_name中按指定的列插入多条记录
。
-
**向多个表插入多条记录**
-
**向多个表插入多条记录**
```
mysql
```
mysql
INSERT INTO tb1_name VALUES (field1_value1, ...) (field1_value2, ...) ...
INSERT INTO tb1_name VALUES (field1_value1, ...) (field1_value2, ...) ...
tb2_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
tb2_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
```
同时向表tb1_name和tb2_name中分别插入多条记录
同时向表tb1_name和tb2_name中分别插入多条记录
。
-
**同时向多个表按列插入多条记录**
-
**同时向多个表按列插入多条记录**
```
mysql
```
mysql
INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...
INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...
tb2_name (tb2_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
tb2_name (tb2_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
```
同时向表tb1_name和tb2_name中按列分别插入多条记录
同时向表tb1_name和tb2_name中按列分别插入多条记录
。
注意:允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。
注意:
1) 如果时间戳为now,系统将自动使用客户端当前时间作为该记录的时间戳;
2) 允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。
-
<a
class=
"anchor"
id=
"auto_create_table"
></a>
**插入记录时自动建表**
-
<a
class=
"anchor"
id=
"auto_create_table"
></a>
**插入记录时自动建表**
```
mysql
```
mysql
INSERT INTO tb_name USING stb_name TAGS (tag_value1, ...) VALUES (field_value1, ...);
INSERT INTO tb_name USING stb_name TAGS (tag_value1, ...) VALUES (field_value1, ...);
```
```
如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的
tags
取值。
如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的
TAGS
取值。
-
**插入记录时自动建表,并指定具体的
tags
列**
-
**插入记录时自动建表,并指定具体的
TAGS
列**
```
mysql
```
mysql
INSERT INTO tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...) VALUES (field_value1, ...);
INSERT INTO tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...) VALUES (field_value1, ...);
```
```
在自动建表时,可以只是指定部分 tags 列的取值,未被指定的 tags 列将取为空值。
在自动建表时,可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将取为空值。
-
**同时向多个表按列插入多条记录,自动建表**
```
mysql
INSERT INTO tb1_name (tb1_field1_name, ...) [USING stb1_name TAGS (tag_value1, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ...
tb2_name (tb2_field1_name, ...) [USING stb2_name TAGS (tag_value2, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
以自动建表的方式,同时向表tb1_name和tb2_name中按列分别插入多条记录。
**历史记录写入**
:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。
**历史记录写入**
:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。
...
@@ -471,7 +482,7 @@ Query OK, 9 row(s) in set (0.002022s)
...
@@ -471,7 +482,7 @@ Query OK, 9 row(s) in set (0.002022s)
SELECT * FROM d1001;
SELECT * FROM d1001;
SELECT d1001.* FROM d1001;
SELECT d1001.* FROM d1001;
```
```
在J
oin
查询中,带前缀的
\*
和不带前缀
\*
返回的结果有差别,
\*
返回全部表的所有列数据(不包含标签),带前缀的通配符,则只返回该表的列数据。
在J
OIN
查询中,带前缀的
\*
和不带前缀
\*
返回的结果有差别,
\*
返回全部表的所有列数据(不包含标签),带前缀的通配符,则只返回该表的列数据。
```
mysql
```
mysql
taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts;
taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts;
ts | current | voltage | phase | ts | current | voltage | phase |
ts | current | voltage | phase | ts | current | voltage | phase |
...
@@ -487,7 +498,7 @@ taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts;
...
@@ -487,7 +498,7 @@ taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts;
Query OK, 1 row(s) in set (0.020443s)
Query OK, 1 row(s) in set (0.020443s)
```
```
在使用SQL函数来进行查询过程中,部分SQL函数支持通配符操作。其中的区别在于:
在使用SQL函数来进行查询
的
过程中,部分SQL函数支持通配符操作。其中的区别在于:
```
count(*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
```
count(*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
```
mysql
```
mysql
...
@@ -522,12 +533,12 @@ Query OK, 2 row(s) in set (0.003112s)
...
@@ -522,12 +533,12 @@ Query OK, 2 row(s) in set (0.003112s)
##### 获取标签列的去重取值
##### 获取标签列的去重取值
从 2.0.15 版本开始,支持在超级表查询标签列时,指定
distinct
关键字,这样将返回指定标签列的所有不重复取值。
从 2.0.15 版本开始,支持在超级表查询标签列时,指定
DISTINCT
关键字,这样将返回指定标签列的所有不重复取值。
```
mysql
```
mysql
SELECT DISTINCT tag_name FROM stb_name;
SELECT DISTINCT tag_name FROM stb_name;
```
```
注意:目前
distinct
关键字只支持对超级表的标签列进行去重,而不能用于普通列。
注意:目前
DISTINCT
关键字只支持对超级表的标签列进行去重,而不能用于普通列。
...
@@ -562,7 +573,7 @@ SELECT * FROM d1001;
...
@@ -562,7 +573,7 @@ SELECT * FROM d1001;
#### 特殊功能
#### 特殊功能
部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database()
部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database()
:
```
mysql
```
mysql
taos> SELECT DATABASE();
taos> SELECT DATABASE();
database() |
database() |
...
@@ -570,7 +581,7 @@ taos> SELECT DATABASE();
...
@@ -570,7 +581,7 @@ taos> SELECT DATABASE();
power |
power |
Query OK, 1 row(s) in set (0.000079s)
Query OK, 1 row(s) in set (0.000079s)
```
```
如果登录的时候没有指定默认数据库,且没有使用```
use
```命令切换数据,则返回NULL。
如果登录的时候没有指定默认数据库,且没有使用```
USE
```命令切换数据,则返回NULL。
```
mysql
```
mysql
taos> SELECT DATABASE();
taos> SELECT DATABASE();
database() |
database() |
...
@@ -578,7 +589,7 @@ taos> SELECT DATABASE();
...
@@ -578,7 +589,7 @@ taos> SELECT DATABASE();
NULL |
NULL |
Query OK, 1 row(s) in set (0.000184s)
Query OK, 1 row(s) in set (0.000184s)
```
```
获取服务器和客户端版本号
:
获取服务器和客户端版本号
:
```
mysql
```
mysql
taos> SELECT CLIENT_VERSION();
taos> SELECT CLIENT_VERSION();
client_version() |
client_version() |
...
@@ -622,7 +633,7 @@ SELECT TBNAME, location FROM meters;
...
@@ -622,7 +633,7 @@ SELECT TBNAME, location FROM meters;
```
mysql
```
mysql
SELECT COUNT(TBNAME) FROM meters;
SELECT COUNT(TBNAME) FROM meters;
```
```
以上两个查询均只支持在W
here
条件子句中添加针对标签(TAGS)的过滤条件。例如:
以上两个查询均只支持在W
HERE
条件子句中添加针对标签(TAGS)的过滤条件。例如:
```
mysql
```
mysql
taos> SELECT TBNAME, location FROM meters;
taos> SELECT TBNAME, location FROM meters;
tbname | location |
tbname | location |
...
@@ -648,12 +659,12 @@ Query OK, 1 row(s) in set (0.001091s)
...
@@ -648,12 +659,12 @@ Query OK, 1 row(s) in set (0.001091s)
- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。
- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。
* 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。
* 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。
- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。
- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。
- 通过
">>"
输出结果可以导出到指定文件。
- 通过
“>>”
输出结果可以导出到指定文件。
### 支持的条件过滤操作
### 支持的条件过滤操作
|
Operation | Note | Applicable Data Types
|
|
**Operation** | **Note** | **Applicable Data Types**
|
| ----------- | ----------------------------- | ------------------------------------- |
| -----------
----
| ----------------------------- | ------------------------------------- |
| > | larger than | **`timestamp`** and all numeric types |
| > | larger than | **`timestamp`** and all numeric types |
| < | smaller than | **`timestamp`** and all numeric types |
| < | smaller than | **`timestamp`** and all numeric types |
| >= | larger than or equal to | **`timestamp`** and all numeric types |
| >= | larger than or equal to | **`timestamp`** and all numeric types |
...
@@ -669,9 +680,10 @@ Query OK, 1 row(s) in set (0.001091s)
...
@@ -669,9 +680,10 @@ Query OK, 1 row(s) in set (0.001091s)
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
<!--
<!--
### <a class="anchor" id="having"></a>GROUP BY 之后的 HAVING 过滤
<a class="anchor" id="having"></a>
### GROUP BY 之后的 HAVING 过滤
从 2.0.20 版本开始,GROUP BY 之后允许再跟一个 HAVING 子句,对成组后的各组数据再做筛选。HAVING 子句可以使用聚合函数和选择函数作为过滤条件(但暂时不支持 LEASTSQUARES、TOP、BOTTOM、LAST_ROW)。
从 2.0.20
.0
版本开始,GROUP BY 之后允许再跟一个 HAVING 子句,对成组后的各组数据再做筛选。HAVING 子句可以使用聚合函数和选择函数作为过滤条件(但暂时不支持 LEASTSQUARES、TOP、BOTTOM、LAST_ROW)。
例如,如下语句只会输出 `AVG(f1) > 0` 的分组:
例如,如下语句只会输出 `AVG(f1) > 0` 的分组:
```
mysql
```
mysql
...
@@ -679,7 +691,8 @@ SELECT AVG(f1), SPREAD(f1, f2, st2.f1) FROM st2 WHERE f1 > 0 GROUP BY f1 HAVING
...
@@ -679,7 +691,8 @@ SELECT AVG(f1), SPREAD(f1, f2, st2.f1) FROM st2 WHERE f1 > 0 GROUP BY f1 HAVING
```
```
-->
-->
### <a class="anchor" id="union"></a>UNION ALL 操作符
<a class="anchor" id="union"></a>
### UNION ALL 操作符
```
mysql
```
mysql
SELECT ...
SELECT ...
...
@@ -691,37 +704,38 @@ TDengine 支持 UNION ALL 操作符。也就是说,如果多个 SELECT 子句
...
@@ -691,37 +704,38 @@ TDengine 支持 UNION ALL 操作符。也就是说,如果多个 SELECT 子句
### SQL 示例
### SQL 示例
- 对于下面的例子,表tb1用以下语句创建
- 对于下面的例子,表tb1用以下语句创建
:
```mysql
```mysql
CREATE TABLE tb1 (ts TIMESTAMP, col1 INT, col2 FLOAT, col3 BINARY(50));
CREATE TABLE tb1 (ts TIMESTAMP, col1 INT, col2 FLOAT, col3 BINARY(50));
```
```
- 查询tb1刚过去的一个小时的所有记录
- 查询tb1刚过去的一个小时的所有记录
:
```mysql
```mysql
SELECT * FROM tb1 WHERE ts >= NOW - 1h;
SELECT * FROM tb1 WHERE ts >= NOW - 1h;
```
```
- 查询表tb1从2018-06-01 08:00:00.000 到2018-06-02 08:00:00.000时间范围,并且col3的字符串是'nny'结尾的记录,结果按照时间戳降序
- 查询表tb1从2018-06-01 08:00:00.000 到2018-06-02 08:00:00.000时间范围,并且col3的字符串是'nny'结尾的记录,结果按照时间戳降序
:
```mysql
```mysql
SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC;
SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC;
```
```
- 查询col1与col2的和,并取名complex, 时间大于2018-06-01 08:00:00.000, col2大于1.2,结果输出仅仅10条记录,从第5条开始
- 查询col1与col2的和,并取名complex, 时间大于2018-06-01 08:00:00.000, col2大于1.2,结果输出仅仅10条记录,从第5条开始
:
```mysql
```mysql
SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5;
SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5;
```
```
- 查询过去10分钟的记录,col2的值大于3.14,并且将结果输出到文件 `/home/testoutpu.csv`
.
- 查询过去10分钟的记录,col2的值大于3.14,并且将结果输出到文件 `/home/testoutpu.csv`
:
```mysql
```mysql
SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv;
SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv;
```
```
## <a class="anchor" id="functions"></a>SQL 函数
<a class="anchor" id="functions"></a>
## SQL 函数
### 聚合函数
### 聚合函数
...
@@ -741,7 +755,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
...
@@ -741,7 +755,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
说明:
说明:
1)可以使用星号
\*
来替代具体的字段,使用星号(\*)返回全部记录数量。
1)可以使用星号
(\*)
来替代具体的字段,使用星号(\*)返回全部记录数量。
2)针对同一表的(不包含NULL值)字段查询结果均相同。
2)针对同一表的(不包含NULL值)字段查询结果均相同。
...
@@ -1012,7 +1026,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
...
@@ -1012,7 +1026,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
1)*k*值取值范围1≤*k*≤100;
1)*k*值取值范围1≤*k*≤100;
2)系统同时返回该记录关联的时间戳列。
2)系统同时返回该记录关联的时间戳列;
3)限制:TOP函数不支持FILL子句。
示例:
示例:
```mysql
```mysql
...
@@ -1048,7 +1064,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
...
@@ -1048,7 +1064,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
1)*k*值取值范围1≤*k*≤100;
1)*k*值取值范围1≤*k*≤100;
2)系统同时返回该记录关联的时间戳列。
2)系统同时返回该记录关联的时间戳列;
3)限制:BOTTOM函数不支持FILL子句。
示例:
示例:
```mysql
```mysql
...
@@ -1124,7 +1142,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
...
@@ -1124,7 +1142,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
适用于:**表、超级表**。
适用于:**表、超级表**。
说明:与last函数不同,last_row不支持时间范围限制,强制返回最后一条记录。
说明:与LAST函数不同,LAST_ROW不支持时间范围限制,强制返回最后一条记录。
限制:LAST_ROW()不能与INTERVAL一起使用。
示例:
示例:
```mysql
```mysql
...
@@ -1233,40 +1253,40 @@ SELECT function_list FROM tb_name
...
@@ -1233,40 +1253,40 @@ SELECT function_list FROM tb_name
[WHERE where_condition]
[WHERE where_condition]
INTERVAL (interval [, offset])
INTERVAL (interval [, offset])
[SLIDING sliding]
[SLIDING sliding]
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
[FILL ({NONE | VALUE | PREV | NULL | LINEAR
| NEXT
})]
SELECT function_list FROM stb_name
SELECT function_list FROM stb_name
[WHERE where_condition]
[WHERE where_condition]
INTERVAL (interval [, offset])
INTERVAL (interval [, offset])
[SLIDING sliding]
[SLIDING sliding]
[FILL ({ VALUE | PREV | NULL | LINEAR})]
[FILL ({ VALUE | PREV | NULL | LINEAR
| NEXT
})]
[GROUP BY tags]
[GROUP BY tags]
```
```
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
- WHERE语句可以指定查询的起止时间和其他过滤条件
- WHERE语句可以指定查询的起止时间和其他过滤条件
。
- SLIDING语句用于指定聚合时间段的前向增量
- SLIDING语句用于指定聚合时间段的前向增量
。
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
* 不进行填充:NONE(默认填充模式)。
1. 不进行填充:NONE(默认填充模式)。
* VALUE填充:固定值填充,此时需要指定填充的数值。例如:fill(value, 1.23)。
2. VALUE填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。
* NULL填充:使用NULL填充数据。例如:fill(null)。
3. NULL填充:使用NULL填充数据。例如:FILL(NULL)。
* PREV填充:使用前一个非NULL值填充数据。例如:fill(prev)。
4. PREV填充:使用前一个非NULL值填充数据。例如:FILL(PREV)。
5. NEXT填充:使用下一个非NULL值填充数据。例如:FILL(NEXT)。
说明:
说明:
1. 使用FILL语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过1千万条具有插值的结果。
1. 使用FILL语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过1千万条具有插值的结果。
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用
group by语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了group by语句分组,则返回结果中每个group
内不按照时间序列严格单调递增。
3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用
GROUP BY语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了GROUP BY语句分组,则返回结果中每个GROUP
内不按照时间序列严格单调递增。
时间聚合也常被用于连续查询场景,可以参考文档 [连续查询(Continuous Query)](https://www.taosdata.com/cn/documentation/advanced-features#continuous-query)。
时间聚合也常被用于连续查询场景,可以参考文档 [连续查询(Continuous Query)](https://www.taosdata.com/cn/documentation/advanced-features#continuous-query)。
**示例
:**
智能电表的建表语句如下:
**示例
**:
智能电表的建表语句如下:
```
mysql
```
mysql
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
```
```
针对智能电表采集的数据,以10分钟为一个阶段,计算过去24小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非NULL值填充。
针对智能电表采集的数据,以10分钟为一个阶段,计算过去24小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非NULL值填充。使用的查询语句如下:
使用的查询语句如下:
```
mysql
```
mysql
SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters
SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters
...
@@ -1287,15 +1307,15 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
...
@@ -1287,15 +1307,15 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
## TAOS SQL其他约定
## TAOS SQL其他约定
**
group by
的限制**
**
GROUP BY
的限制**
TAOS SQL支持对标签、
tbname进行group by操作,也支持普通列进行group by
,前提是:仅限一列且该列的唯一值小于10万个。
TAOS SQL支持对标签、
TBNAME进行GROUP BY操作,也支持普通列进行GROUP BY
,前提是:仅限一列且该列的唯一值小于10万个。
**
join
操作的限制**
**
JOIN
操作的限制**
TAOS SQL支持表之间按主键时间戳来join两张表的列,暂不支持两个表之间聚合后的四则运算。
TAOS SQL支持表之间按主键时间戳来join两张表的列,暂不支持两个表之间聚合后的四则运算。
**
is not null
与不为空的表达式适用范围**
**
IS NOT NULL
与不为空的表达式适用范围**
is not null
支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
IS NOT NULL
支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
documentation20/cn/13.faq/docs.md
浏览文件 @
b9d4476b
...
@@ -26,17 +26,17 @@
...
@@ -26,17 +26,17 @@
## 2. Windows平台下JDBCDriver找不到动态链接库,怎么办?
## 2. Windows平台下JDBCDriver找不到动态链接库,怎么办?
请看为此问题撰写的
[
技术博客
](
https://www.taosdata.com/blog/2019/12/03/
jdbcdriver找不到动态链接库/
)
请看为此问题撰写的
[
技术博客
](
https://www.taosdata.com/blog/2019/12/03/
950.html
)
## 3. 创建数据表时提示more dnodes are needed
## 3. 创建数据表时提示more dnodes are needed
请看为此问题撰写的
[
技术博客
](
https://www.taosdata.com/blog/2019/12/03/
创建数据表时提示more-dnodes-are-needed/
)
请看为此问题撰写的
[
技术博客
](
https://www.taosdata.com/blog/2019/12/03/
965.html
)
## 4. 如何让TDengine crash时生成core文件?
## 4. 如何让TDengine crash时生成core文件?
请看为此问题撰写的
[
技术博客
](
https://www.taosdata.com/blog/2019/12/06/
tdengine-crash时生成core文件的方法/
)
请看为此问题撰写的
[
技术博客
](
https://www.taosdata.com/blog/2019/12/06/
974.html
)
## 5. 遇到错误
"Unable to establish connection"
, 我怎么办?
## 5. 遇到错误
“Unable to establish connection”
, 我怎么办?
客户端遇到连接故障,请按照下面的步骤进行检查:
客户端遇到连接故障,请按照下面的步骤进行检查:
...
@@ -51,13 +51,13 @@
...
@@ -51,13 +51,13 @@
4.
确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:
[
一篇文章说清楚TDengine的FQDN
](
https://www.taosdata.com/blog/2020/09/11/1824.html
)
。
4.
确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:
[
一篇文章说清楚TDengine的FQDN
](
https://www.taosdata.com/blog/2020/09/11/1824.html
)
。
5.
ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
5.
ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
。如果部署的是TDengine集群,客户端需要能ping通所有集群节点的FQDN。
6.
检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确认TCP/UDP 端口6030-6042 是打开的
6.
检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确认TCP/UDP 端口6030-6042 是打开的
7.
对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保
*libtaos.so*
在目录
*/usr/local/taos/driver*
里, 并且
*/usr/local/taos/driver*
在系统库函数搜索路径
*LD_LIBRARY_PATH*
里
7.
对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保
*libtaos.so*
在目录
*/usr/local/taos/driver*
里, 并且
*/usr/local/taos/driver*
在系统库函数搜索路径
*LD_LIBRARY_PATH*
里
8.
对于
w
indows上的JDBC, ODBC, Python, Go等连接,确保
*C:\TDengine\driver\taos.dll*
在你的系统库函数搜索目录里 (建议
*taos.dll*
放在目录
*C:\Windows\System32*
)
8.
对于
W
indows上的JDBC, ODBC, Python, Go等连接,确保
*C:\TDengine\driver\taos.dll*
在你的系统库函数搜索目录里 (建议
*taos.dll*
放在目录
*C:\Windows\System32*
)
9.
如果仍不能排除连接故障
9.
如果仍不能排除连接故障
...
@@ -70,7 +70,8 @@
...
@@ -70,7 +70,8 @@
10.
也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):
[
TDengine 内嵌网络检测工具使用指南
](
https://www.taosdata.com/blog/2020/09/08/1816.html
)
。
10.
也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):
[
TDengine 内嵌网络检测工具使用指南
](
https://www.taosdata.com/blog/2020/09/08/1816.html
)
。
## 6. 遇到错误“Unexpected generic error in RPC”或者"TDengine Error: Unable to resolve FQDN", 我怎么办?
## 6. 遇到错误“Unexpected generic error in RPC”或者“Unable to resolve FQDN”,我怎么办?
产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查:
产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查:
1.
请检查连接的服务器的FQDN是否正确,FQDN配置参考:
[
一篇文章说清楚TDengine的FQDN
](
https://www.taosdata.com/blog/2020/09/11/1824.html
)
。
1.
请检查连接的服务器的FQDN是否正确,FQDN配置参考:
[
一篇文章说清楚TDengine的FQDN
](
https://www.taosdata.com/blog/2020/09/11/1824.html
)
。
...
@@ -102,7 +103,7 @@ TDengine 目前尚不支持删除功能,未来根据用户需求可能会支
...
@@ -102,7 +103,7 @@ TDengine 目前尚不支持删除功能,未来根据用户需求可能会支
批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。
批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。
## 12.
w
indows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
## 12.
W
indows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
Windows下插入nchar类的数据中如果有中文,请先确认系统的地区设置成了中国(在Control Panel里可以设置),这时cmd中的
`taos`
客户端应该已经可以正常工作了;如果是在IDE里开发Java应用,比如Eclipse, Intellij,请确认IDE里的文件编码为GBK(这是Java默认的编码类型),然后在生成Connection时,初始化客户端的配置,具体语句如下:
Windows下插入nchar类的数据中如果有中文,请先确认系统的地区设置成了中国(在Control Panel里可以设置),这时cmd中的
`taos`
客户端应该已经可以正常工作了;如果是在IDE里开发Java应用,比如Eclipse, Intellij,请确认IDE里的文件编码为GBK(这是Java默认的编码类型),然后在生成Connection时,初始化客户端的配置,具体语句如下:
```
JAVA
```
JAVA
...
@@ -115,15 +116,15 @@ Connection = DriverManager.getConnection(url, properties);
...
@@ -115,15 +116,15 @@ Connection = DriverManager.getConnection(url, properties);
## 13.JDBC报错: the excuted SQL is not a DML or a DDL?
## 13.JDBC报错: the excuted SQL is not a DML or a DDL?
请更新至最新的JDBC驱动
请更新至最新的JDBC驱动
```
JAVA
```
xml
<dependency>
<dependency>
<groupId>
com.taosdata.jdbc
</groupId>
<groupId>
com.taosdata.jdbc
</groupId>
<artifactId>
taos-jdbcdriver
</artifactId>
<artifactId>
taos-jdbcdriver
</artifactId>
<version>2.0.
4
</version>
<version>
2.0.
27
</version>
</dependency>
</dependency>
```
```
## 14. taos connect failed, reason
:
invalid timestamp
## 14. taos connect failed, reason
:
invalid timestamp
常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。
常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。
...
@@ -157,7 +158,8 @@ ALTER LOCAL RESETLOG;
...
@@ -157,7 +158,8 @@ ALTER LOCAL RESETLOG;
其含义是,清空本机所有由客户端生成的日志文件。
其含义是,清空本机所有由客户端生成的日志文件。
## <a class="anchor" id="timezone"></a>18. 时间戳的时区信息是怎样处理的?
<a
class=
"anchor"
id=
"timezone"
></a>
## 18. 时间戳的时区信息是怎样处理的?
TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。
TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。
...
@@ -167,12 +169,13 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
...
@@ -167,12 +169,13 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
3.
如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
3.
如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
4.
在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如
`1554984068000`
)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如
`2013-04-12T15:52:01.123+08:00`
)或 ISO-8601 格式(例如
`2013-04-12T15:52:01.123+0800`
)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
4.
在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如
`1554984068000`
)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如
`2013-04-12T15:52:01.123+08:00`
)或 ISO-8601 格式(例如
`2013-04-12T15:52:01.123+0800`
)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
## <a class="anchor" id="port"></a>19. TDengine 都会用到哪些网络端口?
<a
class=
"anchor"
id=
"port"
></a>
## 19. TDengine 都会用到哪些网络端口?
在 TDengine 2.0 版本中,会用到以下这些网络端口(以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么这里列举的端口都会出现变化),管理员可以参考这里的信息调整防火墙设置:
在 TDengine 2.0 版本中,会用到以下这些网络端口(以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么这里列举的端口都会出现变化),管理员可以参考这里的信息调整防火墙设置:
| 协议 | 默认端口 | 用途说明 | 修改方法 |
| 协议 | 默认端口 | 用途说明 | 修改方法 |
|
--- | --------- | ------------------------------- |
------------------------------ |
|
:--- | :-------- | :---------------------------------- | :-
------------------------------ |
| TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 |
| TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 |
| TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
| TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
| TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 |
| TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 |
...
...
snap/snapcraft.yaml
浏览文件 @
b9d4476b
name
:
tdengine
name
:
tdengine
base
:
core18
base
:
core18
version
:
'
2.1.
0
.0'
version
:
'
2.1.
1
.0'
icon
:
snap/gui/t-dengine.svg
icon
:
snap/gui/t-dengine.svg
summary
:
an open-source big data platform designed and optimized for IoT.
summary
:
an open-source big data platform designed and optimized for IoT.
description
:
|
description
:
|
...
@@ -73,7 +73,7 @@ parts:
...
@@ -73,7 +73,7 @@ parts:
-
usr/bin/taosd
-
usr/bin/taosd
-
usr/bin/taos
-
usr/bin/taos
-
usr/bin/taosdemo
-
usr/bin/taosdemo
-
usr/lib/libtaos.so.2.1.
0
.0
-
usr/lib/libtaos.so.2.1.
1
.0
-
usr/lib/libtaos.so.1
-
usr/lib/libtaos.so.1
-
usr/lib/libtaos.so
-
usr/lib/libtaos.so
...
...
grafanaplugin
@
3530c6df
Subproject commit 3
2e2c97a4cf7bedaa99f5d6dd8cb036e7f4470df
Subproject commit 3
530c6df097134a410bacec6b3cd013ef38a61aa
src/connector/odbc/examples/c/main.c
浏览文件 @
b9d4476b
...
@@ -312,11 +312,7 @@ static int test_sqls_in_stmt(SQLHENV env, SQLHDBC conn, SQLHSTMT stmt, const cha
...
@@ -312,11 +312,7 @@ static int test_sqls_in_stmt(SQLHENV env, SQLHDBC conn, SQLHSTMT stmt, const cha
size_t
len
=
0
;
size_t
len
=
0
;
ssize_t
n
=
0
;
ssize_t
n
=
0
;
#ifdef _MSC_VER
n
=
tgetline
(
&
line
,
&
len
,
f
);
n
=
taosGetlineImp
(
&
line
,
&
len
,
f
);
#else
n
=
getline
(
&
line
,
&
len
,
f
);
#endif
if
(
n
==-
1
)
break
;
if
(
n
==-
1
)
break
;
const
char
*
p
=
NULL
;
const
char
*
p
=
NULL
;
...
...
src/kit/taosdemo/taosdemo.c
浏览文件 @
b9d4476b
...
@@ -19,6 +19,7 @@
...
@@ -19,6 +19,7 @@
*/
*/
#include <stdint.h>
#include <stdint.h>
#include <taos.h>
#define _GNU_SOURCE
#define _GNU_SOURCE
#define CURL_STATICLIB
#define CURL_STATICLIB
...
@@ -229,13 +230,13 @@ typedef struct SArguments_S {
...
@@ -229,13 +230,13 @@ typedef struct SArguments_S {
uint32_t
num_of_threads
;
uint32_t
num_of_threads
;
uint64_t
insert_interval
;
uint64_t
insert_interval
;
int64_t
query_times
;
int64_t
query_times
;
uint
64
_t
interlace_rows
;
uint
32
_t
interlace_rows
;
uint
64
_t
num_of_RPR
;
// num_of_records_per_req
uint
32
_t
num_of_RPR
;
// num_of_records_per_req
uint64_t
max_sql_len
;
uint64_t
max_sql_len
;
int64_t
num_of_tables
;
int64_t
num_of_tables
;
int64_t
num_of_DPT
;
int64_t
num_of_DPT
;
int
abort
;
int
abort
;
int
disorderRatio
;
// 0: no disorder, >0: x%
uint32_t
disorderRatio
;
// 0: no disorder, >0: x%
int
disorderRange
;
// ms or us by database precision
int
disorderRange
;
// ms or us by database precision
uint32_t
method_of_delete
;
uint32_t
method_of_delete
;
char
**
arg_list
;
char
**
arg_list
;
...
@@ -258,12 +259,12 @@ typedef struct SSuperTable_S {
...
@@ -258,12 +259,12 @@ typedef struct SSuperTable_S {
uint8_t
autoCreateTable
;
// 0: create sub table, 1: auto create sub table
uint8_t
autoCreateTable
;
// 0: create sub table, 1: auto create sub table
char
childTblPrefix
[
MAX_TB_NAME_SIZE
];
char
childTblPrefix
[
MAX_TB_NAME_SIZE
];
char
dataSource
[
MAX_TB_NAME_SIZE
+
1
];
// rand_gen or sample
char
dataSource
[
MAX_TB_NAME_SIZE
+
1
];
// rand_gen or sample
uint16_t
i
nsertMode
;
// 0: taosc, 1: rest, 2: stmt
uint16_t
i
face
;
// 0: taosc, 1: rest, 2: stmt
int64_t
childTblLimit
;
int64_t
childTblLimit
;
uint64_t
childTblOffset
;
uint64_t
childTblOffset
;
// int multiThreadWriteOneTbl; // 0: no, 1: yes
// int multiThreadWriteOneTbl; // 0: no, 1: yes
uint
64
_t
interlaceRows
;
//
uint
32
_t
interlaceRows
;
//
int
disorderRatio
;
// 0: no disorder, >0: x%
int
disorderRatio
;
// 0: no disorder, >0: x%
int
disorderRange
;
// ms or us by database precision
int
disorderRange
;
// ms or us by database precision
uint64_t
maxSqlLen
;
//
uint64_t
maxSqlLen
;
//
...
@@ -375,7 +376,7 @@ typedef struct SDbs_S {
...
@@ -375,7 +376,7 @@ typedef struct SDbs_S {
typedef
struct
SpecifiedQueryInfo_S
{
typedef
struct
SpecifiedQueryInfo_S
{
uint64_t
queryInterval
;
// 0: unlimit > 0 loop/s
uint64_t
queryInterval
;
// 0: unlimit > 0 loop/s
uint
64
_t
concurrent
;
uint
32
_t
concurrent
;
uint64_t
sqlCount
;
uint64_t
sqlCount
;
uint32_t
asyncMode
;
// 0: sync, 1: async
uint32_t
asyncMode
;
// 0: sync, 1: async
uint64_t
subscribeInterval
;
// ms
uint64_t
subscribeInterval
;
// ms
...
@@ -386,6 +387,9 @@ typedef struct SpecifiedQueryInfo_S {
...
@@ -386,6 +387,9 @@ typedef struct SpecifiedQueryInfo_S {
char
result
[
MAX_QUERY_SQL_COUNT
][
MAX_FILE_NAME_LEN
+
1
];
char
result
[
MAX_QUERY_SQL_COUNT
][
MAX_FILE_NAME_LEN
+
1
];
int
resubAfterConsume
[
MAX_QUERY_SQL_COUNT
];
int
resubAfterConsume
[
MAX_QUERY_SQL_COUNT
];
TAOS_SUB
*
tsub
[
MAX_QUERY_SQL_COUNT
];
TAOS_SUB
*
tsub
[
MAX_QUERY_SQL_COUNT
];
char
topic
[
MAX_QUERY_SQL_COUNT
][
32
];
int
consumed
[
MAX_QUERY_SQL_COUNT
];
TAOS_RES
*
res
[
MAX_QUERY_SQL_COUNT
];
uint64_t
totalQueried
;
uint64_t
totalQueried
;
}
SpecifiedQueryInfo
;
}
SpecifiedQueryInfo
;
...
@@ -431,7 +435,8 @@ typedef struct SThreadInfo_S {
...
@@ -431,7 +435,8 @@ typedef struct SThreadInfo_S {
int
threadID
;
int
threadID
;
char
db_name
[
MAX_DB_NAME_SIZE
+
1
];
char
db_name
[
MAX_DB_NAME_SIZE
+
1
];
uint32_t
time_precision
;
uint32_t
time_precision
;
char
fp
[
4096
];
char
filePath
[
4096
];
FILE
*
fp
;
char
tb_prefix
[
MAX_TB_NAME_SIZE
];
char
tb_prefix
[
MAX_TB_NAME_SIZE
];
uint64_t
start_table_from
;
uint64_t
start_table_from
;
uint64_t
end_table_to
;
uint64_t
end_table_to
;
...
@@ -546,7 +551,9 @@ static int createDatabasesAndStables();
...
@@ -546,7 +551,9 @@ static int createDatabasesAndStables();
static
void
createChildTables
();
static
void
createChildTables
();
static
int
queryDbExec
(
TAOS
*
taos
,
char
*
command
,
QUERY_TYPE
type
,
bool
quiet
);
static
int
queryDbExec
(
TAOS
*
taos
,
char
*
command
,
QUERY_TYPE
type
,
bool
quiet
);
static
int
postProceSql
(
char
*
host
,
struct
sockaddr_in
*
pServAddr
,
static
int
postProceSql
(
char
*
host
,
struct
sockaddr_in
*
pServAddr
,
uint16_t
port
,
char
*
sqlstr
,
char
*
resultFile
);
uint16_t
port
,
char
*
sqlstr
,
threadInfo
*
pThreadInfo
);
static
int64_t
getTSRandTail
(
int64_t
timeStampStep
,
int32_t
seq
,
int
disorderRatio
,
int
disorderRange
);
/* ************ Global variables ************ */
/* ************ Global variables ************ */
...
@@ -1066,7 +1073,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
...
@@ -1066,7 +1073,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
}
printf
(
"# Insertion interval: %"
PRIu64
"
\n
"
,
printf
(
"# Insertion interval: %"
PRIu64
"
\n
"
,
arguments
->
insert_interval
);
arguments
->
insert_interval
);
printf
(
"# Number of records per req: %
"
PRIu64
"
\n
"
,
printf
(
"# Number of records per req: %
ud
\n
"
,
arguments
->
num_of_RPR
);
arguments
->
num_of_RPR
);
printf
(
"# Max SQL length: %"
PRIu64
"
\n
"
,
printf
(
"# Max SQL length: %"
PRIu64
"
\n
"
,
arguments
->
max_sql_len
);
arguments
->
max_sql_len
);
...
@@ -1125,10 +1132,11 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
...
@@ -1125,10 +1132,11 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
}
}
}
}
verbosePrint
(
"%s() LN%d - command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
code
!=
0
)
{
if
(
code
!=
0
)
{
if
(
!
quiet
)
{
if
(
!
quiet
)
{
debugPrint
(
"%s() LN%d - command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
errorPrint
(
"Failed to execute %s, reason: %s
\n
"
,
errorPrint
(
"Failed to execute %s, reason: %s
\n
"
,
command
,
taos_errstr
(
res
));
command
,
taos_errstr
(
res
));
}
}
taos_free_result
(
res
);
taos_free_result
(
res
);
//taos_close(taos);
//taos_close(taos);
...
@@ -1145,24 +1153,22 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
...
@@ -1145,24 +1153,22 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
return
0
;
return
0
;
}
}
static
void
appendResultBufToFile
(
char
*
resultBuf
,
char
*
resultFile
)
static
void
appendResultBufToFile
(
char
*
resultBuf
,
threadInfo
*
pThreadInfo
)
{
{
FILE
*
fp
=
NULL
;
pThreadInfo
->
fp
=
fopen
(
pThreadInfo
->
filePath
,
"at"
);
if
(
resultFile
[
0
]
!=
0
)
{
if
(
pThreadInfo
->
fp
==
NULL
)
{
fp
=
fopen
(
resultFile
,
"at"
);
if
(
fp
==
NULL
)
{
errorPrint
(
errorPrint
(
"%s() LN%d, failed to open result file: %s, result will not save to file
\n
"
,
"%s() LN%d, failed to open result file: %s, result will not save to file
\n
"
,
__func__
,
__LINE__
,
resultFile
);
__func__
,
__LINE__
,
pThreadInfo
->
filePath
);
return
;
return
;
}
}
}
fprintf
(
fp
,
"%s"
,
resultBuf
);
fprintf
(
pThreadInfo
->
fp
,
"%s"
,
resultBuf
);
tmfclose
(
fp
);
tmfclose
(
pThreadInfo
->
fp
);
pThreadInfo
->
fp
=
NULL
;
}
}
static
void
appendResultToFile
(
TAOS_RES
*
res
,
char
*
resultFile
)
{
static
void
fetchResult
(
TAOS_RES
*
res
,
threadInfo
*
pThreadInfo
)
{
TAOS_ROW
row
=
NULL
;
TAOS_ROW
row
=
NULL
;
int
num_rows
=
0
;
int
num_rows
=
0
;
int
num_fields
=
taos_field_count
(
res
);
int
num_fields
=
taos_field_count
(
res
);
...
@@ -1180,8 +1186,9 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
...
@@ -1180,8 +1186,9 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
// fetch the records row by row
// fetch the records row by row
while
((
row
=
taos_fetch_row
(
res
)))
{
while
((
row
=
taos_fetch_row
(
res
)))
{
if
(
totalLen
>=
100
*
1024
*
1024
-
32000
)
{
if
((
strlen
(
pThreadInfo
->
filePath
)
>
0
)
appendResultBufToFile
(
databuf
,
resultFile
);
&&
(
totalLen
>=
100
*
1024
*
1024
-
32000
))
{
appendResultBufToFile
(
databuf
,
pThreadInfo
);
totalLen
=
0
;
totalLen
=
0
;
memset
(
databuf
,
0
,
100
*
1024
*
1024
);
memset
(
databuf
,
0
,
100
*
1024
*
1024
);
}
}
...
@@ -1194,8 +1201,10 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
...
@@ -1194,8 +1201,10 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
}
}
verbosePrint
(
"%s() LN%d, databuf=%s resultFile=%s
\n
"
,
verbosePrint
(
"%s() LN%d, databuf=%s resultFile=%s
\n
"
,
__func__
,
__LINE__
,
databuf
,
resultFile
);
__func__
,
__LINE__
,
databuf
,
pThreadInfo
->
filePath
);
appendResultBufToFile
(
databuf
,
resultFile
);
if
(
strlen
(
pThreadInfo
->
filePath
)
>
0
)
{
appendResultBufToFile
(
databuf
,
pThreadInfo
);
}
free
(
databuf
);
free
(
databuf
);
}
}
...
@@ -1211,16 +1220,14 @@ static void selectAndGetResult(
...
@@ -1211,16 +1220,14 @@ static void selectAndGetResult(
return
;
return
;
}
}
if
((
strlen
(
pThreadInfo
->
fp
)))
{
fetchResult
(
res
,
pThreadInfo
);
appendResultToFile
(
res
,
pThreadInfo
->
fp
);
}
taos_free_result
(
res
);
taos_free_result
(
res
);
}
else
if
(
0
==
strncasecmp
(
g_queryInfo
.
queryMode
,
"rest"
,
strlen
(
"rest"
)))
{
}
else
if
(
0
==
strncasecmp
(
g_queryInfo
.
queryMode
,
"rest"
,
strlen
(
"rest"
)))
{
int
retCode
=
postProceSql
(
int
retCode
=
postProceSql
(
g_queryInfo
.
host
,
&
(
g_queryInfo
.
serv_addr
),
g_queryInfo
.
port
,
g_queryInfo
.
host
,
&
(
g_queryInfo
.
serv_addr
),
g_queryInfo
.
port
,
command
,
command
,
pThreadInfo
->
fp
);
pThreadInfo
);
if
(
0
!=
retCode
)
{
if
(
0
!=
retCode
)
{
printf
(
"====restful return fail, threadID[%d]
\n
"
,
pThreadInfo
->
threadID
);
printf
(
"====restful return fail, threadID[%d]
\n
"
,
pThreadInfo
->
threadID
);
}
}
...
@@ -1358,7 +1365,7 @@ static int printfInsertMeta() {
...
@@ -1358,7 +1365,7 @@ static int printfInsertMeta() {
g_Dbs
.
threadCountByCreateTbl
);
g_Dbs
.
threadCountByCreateTbl
);
printf
(
"top insert interval:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
printf
(
"top insert interval:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
g_args
.
insert_interval
);
g_args
.
insert_interval
);
printf
(
"number of records per req:
\033
[33m%
"
PRIu64
"
\033
[0m
\n
"
,
printf
(
"number of records per req:
\033
[33m%
ud
\033
[0m
\n
"
,
g_args
.
num_of_RPR
);
g_args
.
num_of_RPR
);
printf
(
"max sql length:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
printf
(
"max sql length:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
g_args
.
max_sql_len
);
g_args
.
max_sql_len
);
...
@@ -1464,9 +1471,9 @@ static int printfInsertMeta() {
...
@@ -1464,9 +1471,9 @@ static int printfInsertMeta() {
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblPrefix
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblPrefix
);
printf
(
" dataSource:
\033
[33m%s
\033
[0m
\n
"
,
printf
(
" dataSource:
\033
[33m%s
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
dataSource
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
dataSource
);
printf
(
" i
nsertMode:
\033
[33m%s
\033
[0m
\n
"
,
printf
(
" i
face:
\033
[33m%s
\033
[0m
\n
"
,
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
nsertMod
e
==
TAOSC_IFACE
)
?
"taosc"
:
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
fac
e
==
TAOSC_IFACE
)
?
"taosc"
:
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
nsertMod
e
==
REST_IFACE
)
?
"rest"
:
"stmt"
);
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
fac
e
==
REST_IFACE
)
?
"rest"
:
"stmt"
);
if
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblLimit
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblLimit
>
0
)
{
printf
(
" childTblLimit:
\033
[33m%"
PRId64
"
\033
[0m
\n
"
,
printf
(
" childTblLimit:
\033
[33m%"
PRId64
"
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblLimit
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblLimit
);
...
@@ -1484,7 +1491,7 @@ static int printfInsertMeta() {
...
@@ -1484,7 +1491,7 @@ static int printfInsertMeta() {
printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
}
}
*/
*/
printf
(
" interlaceRows:
\033
[33m%
"
PRIu64
"
\033
[0m
\n
"
,
printf
(
" interlaceRows:
\033
[33m%
ud
\033
[0m
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
);
if
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
>
0
)
{
...
@@ -1562,7 +1569,7 @@ static void printfInsertMetaToFile(FILE* fp) {
...
@@ -1562,7 +1569,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf
(
fp
,
"resultFile: %s
\n
"
,
g_Dbs
.
resultFile
);
fprintf
(
fp
,
"resultFile: %s
\n
"
,
g_Dbs
.
resultFile
);
fprintf
(
fp
,
"thread num of insert data: %d
\n
"
,
g_Dbs
.
threadCount
);
fprintf
(
fp
,
"thread num of insert data: %d
\n
"
,
g_Dbs
.
threadCount
);
fprintf
(
fp
,
"thread num of create table: %d
\n
"
,
g_Dbs
.
threadCountByCreateTbl
);
fprintf
(
fp
,
"thread num of create table: %d
\n
"
,
g_Dbs
.
threadCountByCreateTbl
);
fprintf
(
fp
,
"number of records per req: %
"
PRIu64
"
\n
"
,
g_args
.
num_of_RPR
);
fprintf
(
fp
,
"number of records per req: %
ud
\n
"
,
g_args
.
num_of_RPR
);
fprintf
(
fp
,
"max sql length: %"
PRIu64
"
\n
"
,
g_args
.
max_sql_len
);
fprintf
(
fp
,
"max sql length: %"
PRIu64
"
\n
"
,
g_args
.
max_sql_len
);
fprintf
(
fp
,
"database count: %d
\n
"
,
g_Dbs
.
dbCount
);
fprintf
(
fp
,
"database count: %d
\n
"
,
g_Dbs
.
dbCount
);
...
@@ -1654,12 +1661,12 @@ static void printfInsertMetaToFile(FILE* fp) {
...
@@ -1654,12 +1661,12 @@ static void printfInsertMetaToFile(FILE* fp) {
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblPrefix
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblPrefix
);
fprintf
(
fp
,
" dataSource: %s
\n
"
,
fprintf
(
fp
,
" dataSource: %s
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
dataSource
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
dataSource
);
fprintf
(
fp
,
" i
nsertMode:
%s
\n
"
,
fprintf
(
fp
,
" i
face:
%s
\n
"
,
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
nsertMod
e
==
TAOSC_IFACE
)
?
"taosc"
:
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
fac
e
==
TAOSC_IFACE
)
?
"taosc"
:
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
nsertMod
e
==
REST_IFACE
)
?
"rest"
:
"stmt"
);
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
fac
e
==
REST_IFACE
)
?
"rest"
:
"stmt"
);
fprintf
(
fp
,
" insertRows: %"
PRId64
"
\n
"
,
fprintf
(
fp
,
" insertRows: %"
PRId64
"
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
insertRows
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
insertRows
);
fprintf
(
fp
,
" interlace rows: %
"
PRIu64
"
\n
"
,
fprintf
(
fp
,
" interlace rows: %
ud
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
);
if
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
>
0
)
{
if
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
>
0
)
{
fprintf
(
fp
,
" stable insert interval: %"
PRIu64
"
\n
"
,
fprintf
(
fp
,
" stable insert interval: %"
PRIu64
"
\n
"
,
...
@@ -1672,7 +1679,7 @@ static void printfInsertMetaToFile(FILE* fp) {
...
@@ -1672,7 +1679,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " multiThreadWriteOneTbl: yes\n");
fprintf(fp, " multiThreadWriteOneTbl: yes\n");
}
}
*/
*/
fprintf
(
fp
,
" interlaceRows: %
"
PRIu64
"
\n
"
,
fprintf
(
fp
,
" interlaceRows: %
ud
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
);
fprintf
(
fp
,
" disorderRange: %d
\n
"
,
fprintf
(
fp
,
" disorderRange: %d
\n
"
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRange
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
disorderRange
);
...
@@ -1757,7 +1764,7 @@ static void printfQueryMeta() {
...
@@ -1757,7 +1764,7 @@ static void printfQueryMeta() {
printf
(
"query interval:
\033
[33m%"
PRIu64
" ms
\033
[0m
\n
"
,
printf
(
"query interval:
\033
[33m%"
PRIu64
" ms
\033
[0m
\n
"
,
g_queryInfo
.
specifiedQueryInfo
.
queryInterval
);
g_queryInfo
.
specifiedQueryInfo
.
queryInterval
);
printf
(
"top query times:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
g_args
.
query_times
);
printf
(
"top query times:
\033
[33m%"
PRIu64
"
\033
[0m
\n
"
,
g_args
.
query_times
);
printf
(
"concurrent:
\033
[33m%
"
PRIu64
"
\033
[0m
\n
"
,
printf
(
"concurrent:
\033
[33m%
d
\033
[0m
\n
"
,
g_queryInfo
.
specifiedQueryInfo
.
concurrent
);
g_queryInfo
.
specifiedQueryInfo
.
concurrent
);
printf
(
"mod:
\033
[33m%s
\033
[0m
\n
"
,
printf
(
"mod:
\033
[33m%s
\033
[0m
\n
"
,
(
g_queryInfo
.
specifiedQueryInfo
.
asyncMode
)
?
"async"
:
"sync"
);
(
g_queryInfo
.
specifiedQueryInfo
.
asyncMode
)
?
"async"
:
"sync"
);
...
@@ -2054,13 +2061,13 @@ static void printfQuerySystemInfo(TAOS * taos) {
...
@@ -2054,13 +2061,13 @@ static void printfQuerySystemInfo(TAOS * taos) {
// show variables
// show variables
res
=
taos_query
(
taos
,
"show variables;"
);
res
=
taos_query
(
taos
,
"show variables;"
);
//
appendResultToFile
(res, filename);
//
fetchResult
(res, filename);
xDumpResultToFile
(
filename
,
res
);
xDumpResultToFile
(
filename
,
res
);
// show dnodes
// show dnodes
res
=
taos_query
(
taos
,
"show dnodes;"
);
res
=
taos_query
(
taos
,
"show dnodes;"
);
xDumpResultToFile
(
filename
,
res
);
xDumpResultToFile
(
filename
,
res
);
//
appendResultToFile
(res, filename);
//
fetchResult
(res, filename);
// show databases
// show databases
res
=
taos_query
(
taos
,
"show databases;"
);
res
=
taos_query
(
taos
,
"show databases;"
);
...
@@ -2096,7 +2103,7 @@ static void printfQuerySystemInfo(TAOS * taos) {
...
@@ -2096,7 +2103,7 @@ static void printfQuerySystemInfo(TAOS * taos) {
}
}
static
int
postProceSql
(
char
*
host
,
struct
sockaddr_in
*
pServAddr
,
uint16_t
port
,
static
int
postProceSql
(
char
*
host
,
struct
sockaddr_in
*
pServAddr
,
uint16_t
port
,
char
*
sqlstr
,
char
*
resultFile
)
char
*
sqlstr
,
threadInfo
*
pThreadInfo
)
{
{
char
*
req_fmt
=
"POST %s HTTP/1.1
\r\n
Host: %s:%d
\r\n
Accept: */*
\r\n
Authorization: Basic %s
\r\n
Content-Length: %d
\r\n
Content-Type: application/x-www-form-urlencoded
\r\n\r\n
%s"
;
char
*
req_fmt
=
"POST %s HTTP/1.1
\r\n
Host: %s:%d
\r\n
Accept: */*
\r\n
Authorization: Basic %s
\r\n
Content-Length: %d
\r\n
Content-Type: application/x-www-form-urlencoded
\r\n\r\n
%s"
;
...
@@ -2232,8 +2239,8 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
...
@@ -2232,8 +2239,8 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
response_buf
[
RESP_BUF_LEN
-
1
]
=
'\0'
;
response_buf
[
RESP_BUF_LEN
-
1
]
=
'\0'
;
printf
(
"Response:
\n
%s
\n
"
,
response_buf
);
printf
(
"Response:
\n
%s
\n
"
,
response_buf
);
if
(
resultFile
)
{
if
(
strlen
(
pThreadInfo
->
filePath
)
>
0
)
{
appendResultBufToFile
(
response_buf
,
resultFile
);
appendResultBufToFile
(
response_buf
,
pThreadInfo
);
}
}
free
(
request_buf
);
free
(
request_buf
);
...
@@ -2725,8 +2732,6 @@ static int createSuperTable(
...
@@ -2725,8 +2732,6 @@ static int createSuperTable(
snprintf
(
command
,
BUFFER_SIZE
,
snprintf
(
command
,
BUFFER_SIZE
,
"create table if not exists %s.%s (ts timestamp%s) tags %s"
,
"create table if not exists %s.%s (ts timestamp%s) tags %s"
,
dbName
,
superTbl
->
sTblName
,
cols
,
tags
);
dbName
,
superTbl
->
sTblName
,
cols
,
tags
);
verbosePrint
(
"%s() LN%d: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
errorPrint
(
"create supertable %s failed!
\n\n
"
,
errorPrint
(
"create supertable %s failed!
\n\n
"
,
superTbl
->
sTblName
);
superTbl
->
sTblName
);
...
@@ -2749,7 +2754,6 @@ static int createDatabasesAndStables() {
...
@@ -2749,7 +2754,6 @@ static int createDatabasesAndStables() {
for
(
int
i
=
0
;
i
<
g_Dbs
.
dbCount
;
i
++
)
{
for
(
int
i
=
0
;
i
<
g_Dbs
.
dbCount
;
i
++
)
{
if
(
g_Dbs
.
db
[
i
].
drop
)
{
if
(
g_Dbs
.
db
[
i
].
drop
)
{
sprintf
(
command
,
"drop database if exists %s;"
,
g_Dbs
.
db
[
i
].
dbName
);
sprintf
(
command
,
"drop database if exists %s;"
,
g_Dbs
.
db
[
i
].
dbName
);
verbosePrint
(
"%s() %d command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
taos
);
taos_close
(
taos
);
return
-
1
;
return
-
1
;
...
@@ -2822,7 +2826,6 @@ static int createDatabasesAndStables() {
...
@@ -2822,7 +2826,6 @@ static int createDatabasesAndStables() {
" precision
\'
%s
\'
;"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
" precision
\'
%s
\'
;"
,
g_Dbs
.
db
[
i
].
dbCfg
.
precision
);
}
}
debugPrint
(
"%s() %d command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
if
(
0
!=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
taos
);
taos_close
(
taos
);
errorPrint
(
"
\n
create database %s failed!
\n\n
"
,
g_Dbs
.
db
[
i
].
dbName
);
errorPrint
(
"
\n
create database %s failed!
\n\n
"
,
g_Dbs
.
db
[
i
].
dbName
);
...
@@ -2839,8 +2842,6 @@ static int createDatabasesAndStables() {
...
@@ -2839,8 +2842,6 @@ static int createDatabasesAndStables() {
for
(
uint64_t
j
=
0
;
j
<
g_Dbs
.
db
[
i
].
superTblCount
;
j
++
)
{
for
(
uint64_t
j
=
0
;
j
<
g_Dbs
.
db
[
i
].
superTblCount
;
j
++
)
{
sprintf
(
command
,
"describe %s.%s;"
,
g_Dbs
.
db
[
i
].
dbName
,
sprintf
(
command
,
"describe %s.%s;"
,
g_Dbs
.
db
[
i
].
dbName
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sTblName
);
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sTblName
);
verbosePrint
(
"%s() %d command: %s
\n
"
,
__func__
,
__LINE__
,
command
);
ret
=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
true
);
ret
=
queryDbExec
(
taos
,
command
,
NO_INSERT_TYPE
,
true
);
if
((
ret
!=
0
)
||
(
g_Dbs
.
db
[
i
].
drop
))
{
if
((
ret
!=
0
)
||
(
g_Dbs
.
db
[
i
].
drop
))
{
...
@@ -2944,7 +2945,6 @@ static void* createTable(void *sarg)
...
@@ -2944,7 +2945,6 @@ static void* createTable(void *sarg)
}
}
len
=
0
;
len
=
0
;
verbosePrint
(
"%s() LN%d %s
\n
"
,
__func__
,
__LINE__
,
buffer
);
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
buffer
,
NO_INSERT_TYPE
,
false
)){
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
buffer
,
NO_INSERT_TYPE
,
false
)){
errorPrint
(
"queryDbExec() failed. buffer:
\n
%s
\n
"
,
buffer
);
errorPrint
(
"queryDbExec() failed. buffer:
\n
%s
\n
"
,
buffer
);
free
(
buffer
);
free
(
buffer
);
...
@@ -2960,7 +2960,6 @@ static void* createTable(void *sarg)
...
@@ -2960,7 +2960,6 @@ static void* createTable(void *sarg)
}
}
if
(
0
!=
len
)
{
if
(
0
!=
len
)
{
verbosePrint
(
"%s() %d buffer: %s
\n
"
,
__func__
,
__LINE__
,
buffer
);
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
buffer
,
NO_INSERT_TYPE
,
false
))
{
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
buffer
,
NO_INSERT_TYPE
,
false
))
{
errorPrint
(
"queryDbExec() failed. buffer:
\n
%s
\n
"
,
buffer
);
errorPrint
(
"queryDbExec() failed. buffer:
\n
%s
\n
"
,
buffer
);
}
}
...
@@ -2995,19 +2994,19 @@ static int startMultiThreadCreateChildTable(
...
@@ -2995,19 +2994,19 @@ static int startMultiThreadCreateChildTable(
int64_t
b
=
0
;
int64_t
b
=
0
;
b
=
ntables
%
threads
;
b
=
ntables
%
threads
;
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
for
(
int
64_t
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infos
+
i
;
threadInfo
*
pThreadI
nfo
=
infos
+
i
;
t_i
nfo
->
threadID
=
i
;
pThreadI
nfo
->
threadID
=
i
;
tstrncpy
(
t_i
nfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
tstrncpy
(
pThreadI
nfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
t_i
nfo
->
superTblInfo
=
superTblInfo
;
pThreadI
nfo
->
superTblInfo
=
superTblInfo
;
verbosePrint
(
"%s() %d db_name: %s
\n
"
,
__func__
,
__LINE__
,
db_name
);
verbosePrint
(
"%s() %d db_name: %s
\n
"
,
__func__
,
__LINE__
,
db_name
);
t_i
nfo
->
taos
=
taos_connect
(
pThreadI
nfo
->
taos
=
taos_connect
(
g_Dbs
.
host
,
g_Dbs
.
host
,
g_Dbs
.
user
,
g_Dbs
.
user
,
g_Dbs
.
password
,
g_Dbs
.
password
,
db_name
,
db_name
,
g_Dbs
.
port
);
g_Dbs
.
port
);
if
(
t_i
nfo
->
taos
==
NULL
)
{
if
(
pThreadI
nfo
->
taos
==
NULL
)
{
errorPrint
(
"%s() LN%d, Failed to connect to TDengine, reason:%s
\n
"
,
errorPrint
(
"%s() LN%d, Failed to connect to TDengine, reason:%s
\n
"
,
__func__
,
__LINE__
,
taos_errstr
(
NULL
));
__func__
,
__LINE__
,
taos_errstr
(
NULL
));
free
(
pids
);
free
(
pids
);
...
@@ -3015,14 +3014,14 @@ static int startMultiThreadCreateChildTable(
...
@@ -3015,14 +3014,14 @@ static int startMultiThreadCreateChildTable(
return
-
1
;
return
-
1
;
}
}
t_i
nfo
->
start_table_from
=
startFrom
;
pThreadI
nfo
->
start_table_from
=
startFrom
;
t_i
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
pThreadI
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
t_i
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
pThreadI
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
t_i
nfo
->
end_table_to
+
1
;
startFrom
=
pThreadI
nfo
->
end_table_to
+
1
;
t_i
nfo
->
use_metric
=
true
;
pThreadI
nfo
->
use_metric
=
true
;
t_i
nfo
->
cols
=
cols
;
pThreadI
nfo
->
cols
=
cols
;
t_i
nfo
->
minDelay
=
UINT64_MAX
;
pThreadI
nfo
->
minDelay
=
UINT64_MAX
;
pthread_create
(
pids
+
i
,
NULL
,
createTable
,
t_i
nfo
);
pthread_create
(
pids
+
i
,
NULL
,
createTable
,
pThreadI
nfo
);
}
}
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
...
@@ -3030,8 +3029,8 @@ static int startMultiThreadCreateChildTable(
...
@@ -3030,8 +3029,8 @@ static int startMultiThreadCreateChildTable(
}
}
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infos
+
i
;
threadInfo
*
pThreadI
nfo
=
infos
+
i
;
taos_close
(
t_i
nfo
->
taos
);
taos_close
(
pThreadI
nfo
->
taos
);
}
}
free
(
pids
);
free
(
pids
);
...
@@ -3559,9 +3558,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3559,9 +3558,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
// rows per table need be less than insert batch
// rows per table need be less than insert batch
if
(
g_args
.
interlace_rows
>
g_args
.
num_of_RPR
)
{
if
(
g_args
.
interlace_rows
>
g_args
.
num_of_RPR
)
{
printf
(
"NOTICE: interlace rows value %
"
PRIu64
" > num_of_records_per_req %"
PRIu64
"
\n\n
"
,
printf
(
"NOTICE: interlace rows value %
ud > num_of_records_per_req %ud
\n\n
"
,
g_args
.
interlace_rows
,
g_args
.
num_of_RPR
);
g_args
.
interlace_rows
,
g_args
.
num_of_RPR
);
printf
(
" interlace rows value will be set to num_of_records_per_req %
"
PRIu64
"
\n\n
"
,
printf
(
" interlace rows value will be set to num_of_records_per_req %
ud
\n\n
"
,
g_args
.
num_of_RPR
);
g_args
.
num_of_RPR
);
prompt
();
prompt
();
g_args
.
interlace_rows
=
g_args
.
num_of_RPR
;
g_args
.
interlace_rows
=
g_args
.
num_of_RPR
;
...
@@ -3876,22 +3875,22 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3876,22 +3875,22 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
cJSON
*
insertMod
e
=
cJSON_GetObjectItem
(
stbInfo
,
"insert_mode"
);
// taosc , rest, stmt
cJSON
*
stbIfac
e
=
cJSON_GetObjectItem
(
stbInfo
,
"insert_mode"
);
// taosc , rest, stmt
if
(
insertMode
&&
insertMod
e
->
type
==
cJSON_String
if
(
stbIface
&&
stbIfac
e
->
type
==
cJSON_String
&&
insertMod
e
->
valuestring
!=
NULL
)
{
&&
stbIfac
e
->
valuestring
!=
NULL
)
{
if
(
0
==
strcasecmp
(
insertMod
e
->
valuestring
,
"taosc"
))
{
if
(
0
==
strcasecmp
(
stbIfac
e
->
valuestring
,
"taosc"
))
{
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
nsertMode
=
TAOSC_IFACE
;
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
face
=
TAOSC_IFACE
;
}
else
if
(
0
==
strcasecmp
(
insertMod
e
->
valuestring
,
"rest"
))
{
}
else
if
(
0
==
strcasecmp
(
stbIfac
e
->
valuestring
,
"rest"
))
{
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
nsertMode
=
REST_IFACE
;
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
face
=
REST_IFACE
;
}
else
if
(
0
==
strcasecmp
(
insertMod
e
->
valuestring
,
"stmt"
))
{
}
else
if
(
0
==
strcasecmp
(
stbIfac
e
->
valuestring
,
"stmt"
))
{
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
nsertMode
=
STMT_IFACE
;
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
face
=
STMT_IFACE
;
}
else
{
}
else
{
errorPrint
(
"%s() LN%d, failed to read json, insert_mode %s not recognized
\n
"
,
errorPrint
(
"%s() LN%d, failed to read json, insert_mode %s not recognized
\n
"
,
__func__
,
__LINE__
,
insertMod
e
->
valuestring
);
__func__
,
__LINE__
,
stbIfac
e
->
valuestring
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
}
else
if
(
!
insertMod
e
)
{
}
else
if
(
!
stbIfac
e
)
{
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
nsertMod
e
=
TAOSC_IFACE
;
g_Dbs
.
db
[
i
].
superTbls
[
j
].
i
fac
e
=
TAOSC_IFACE
;
}
else
{
}
else
{
errorPrint
(
"%s"
,
"failed to read json, insert_mode not found
\n
"
);
errorPrint
(
"%s"
,
"failed to read json, insert_mode not found
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
...
@@ -4028,9 +4027,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -4028,9 +4027,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
=
stbInterlaceRows
->
valueint
;
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
=
stbInterlaceRows
->
valueint
;
// rows per table need be less than insert batch
// rows per table need be less than insert batch
if
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
>
g_args
.
num_of_RPR
)
{
if
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
>
g_args
.
num_of_RPR
)
{
printf
(
"NOTICE: db[%d].superTbl[%d]'s interlace rows value %"
PRIu64
" > num_of_records_per_req %"
PRIu64
"
\n\n
"
,
printf
(
"NOTICE: db[%d].superTbl[%d]'s interlace rows value %ud > num_of_records_per_req %ud
\n\n
"
,
i
,
j
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
,
g_args
.
num_of_RPR
);
i
,
j
,
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
,
printf
(
" interlace rows value will be set to num_of_records_per_req %"
PRIu64
"
\n\n
"
,
g_args
.
num_of_RPR
);
printf
(
" interlace rows value will be set to num_of_records_per_req %ud
\n\n
"
,
g_args
.
num_of_RPR
);
g_args
.
num_of_RPR
);
prompt
();
prompt
();
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
=
g_args
.
num_of_RPR
;
g_Dbs
.
db
[
i
].
superTbls
[
j
].
interlaceRows
=
g_args
.
num_of_RPR
;
...
@@ -4247,7 +4247,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
...
@@ -4247,7 +4247,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if
(
concurrent
&&
concurrent
->
type
==
cJSON_Number
)
{
if
(
concurrent
&&
concurrent
->
type
==
cJSON_Number
)
{
if
(
concurrent
->
valueint
<=
0
)
{
if
(
concurrent
->
valueint
<=
0
)
{
errorPrint
(
errorPrint
(
"%s() LN%d, query sqlCount %"
PRIu64
" or concurrent %
"
PRIu64
"
is not correct.
\n
"
,
"%s() LN%d, query sqlCount %"
PRIu64
" or concurrent %
d
is not correct.
\n
"
,
__func__
,
__LINE__
,
__func__
,
__LINE__
,
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
,
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
,
g_queryInfo
.
specifiedQueryInfo
.
concurrent
);
g_queryInfo
.
specifiedQueryInfo
.
concurrent
);
...
@@ -4314,24 +4314,28 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
...
@@ -4314,24 +4314,28 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
}
// sqls
// sqls
cJSON
*
s
uper
Sqls
=
cJSON_GetObjectItem
(
specifiedQuery
,
"sqls"
);
cJSON
*
s
pecified
Sqls
=
cJSON_GetObjectItem
(
specifiedQuery
,
"sqls"
);
if
(
!
s
uper
Sqls
)
{
if
(
!
s
pecified
Sqls
)
{
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
=
0
;
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
=
0
;
}
else
if
(
s
uper
Sqls
->
type
!=
cJSON_Array
)
{
}
else
if
(
s
pecified
Sqls
->
type
!=
cJSON_Array
)
{
errorPrint
(
"%s() LN%d, failed to read json, super sqls not found
\n
"
,
errorPrint
(
"%s() LN%d, failed to read json, super sqls not found
\n
"
,
__func__
,
__LINE__
);
__func__
,
__LINE__
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
else
{
}
else
{
int
superSqlSize
=
cJSON_GetArraySize
(
superSqls
);
int
superSqlSize
=
cJSON_GetArraySize
(
specifiedSqls
);
if
(
superSqlSize
>
MAX_QUERY_SQL_COUNT
)
{
if
(
superSqlSize
*
g_queryInfo
.
specifiedQueryInfo
.
concurrent
errorPrint
(
"%s() LN%d, failed to read json, query sql size overflow, max is %d
\n
"
,
>
MAX_QUERY_SQL_COUNT
)
{
__func__
,
__LINE__
,
MAX_QUERY_SQL_COUNT
);
errorPrint
(
"%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d
\n
"
,
__func__
,
__LINE__
,
superSqlSize
,
g_queryInfo
.
specifiedQueryInfo
.
concurrent
,
MAX_QUERY_SQL_COUNT
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
=
superSqlSize
;
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
=
superSqlSize
;
for
(
int
j
=
0
;
j
<
superSqlSize
;
++
j
)
{
for
(
int
j
=
0
;
j
<
superSqlSize
;
++
j
)
{
cJSON
*
sql
=
cJSON_GetArrayItem
(
s
uper
Sqls
,
j
);
cJSON
*
sql
=
cJSON_GetArrayItem
(
s
pecified
Sqls
,
j
);
if
(
sql
==
NULL
)
continue
;
if
(
sql
==
NULL
)
continue
;
cJSON
*
sqlStr
=
cJSON_GetObjectItem
(
sql
,
"sql"
);
cJSON
*
sqlStr
=
cJSON_GetObjectItem
(
sql
,
"sql"
);
...
@@ -4507,16 +4511,16 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
...
@@ -4507,16 +4511,16 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo
.
superQueryInfo
.
resubAfterConsume
=
1
;
g_queryInfo
.
superQueryInfo
.
resubAfterConsume
=
1
;
}
}
// sqls
// s
upert table s
qls
cJSON
*
su
bs
qls
=
cJSON_GetObjectItem
(
superQuery
,
"sqls"
);
cJSON
*
su
perS
qls
=
cJSON_GetObjectItem
(
superQuery
,
"sqls"
);
if
(
!
su
bs
qls
)
{
if
(
!
su
perS
qls
)
{
g_queryInfo
.
superQueryInfo
.
sqlCount
=
0
;
g_queryInfo
.
superQueryInfo
.
sqlCount
=
0
;
}
else
if
(
su
bs
qls
->
type
!=
cJSON_Array
)
{
}
else
if
(
su
perS
qls
->
type
!=
cJSON_Array
)
{
errorPrint
(
"%s() LN%d: failed to read json, super sqls not found
\n
"
,
errorPrint
(
"%s() LN%d: failed to read json, super sqls not found
\n
"
,
__func__
,
__LINE__
);
__func__
,
__LINE__
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
else
{
}
else
{
int
superSqlSize
=
cJSON_GetArraySize
(
su
bs
qls
);
int
superSqlSize
=
cJSON_GetArraySize
(
su
perS
qls
);
if
(
superSqlSize
>
MAX_QUERY_SQL_COUNT
)
{
if
(
superSqlSize
>
MAX_QUERY_SQL_COUNT
)
{
errorPrint
(
"%s() LN%d, failed to read json, query sql size overflow, max is %d
\n
"
,
errorPrint
(
"%s() LN%d, failed to read json, query sql size overflow, max is %d
\n
"
,
__func__
,
__LINE__
,
MAX_QUERY_SQL_COUNT
);
__func__
,
__LINE__
,
MAX_QUERY_SQL_COUNT
);
...
@@ -4525,7 +4529,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
...
@@ -4525,7 +4529,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo
.
superQueryInfo
.
sqlCount
=
superSqlSize
;
g_queryInfo
.
superQueryInfo
.
sqlCount
=
superSqlSize
;
for
(
int
j
=
0
;
j
<
superSqlSize
;
++
j
)
{
for
(
int
j
=
0
;
j
<
superSqlSize
;
++
j
)
{
cJSON
*
sql
=
cJSON_GetArrayItem
(
su
bs
qls
,
j
);
cJSON
*
sql
=
cJSON_GetArrayItem
(
su
perS
qls
,
j
);
if
(
sql
==
NULL
)
continue
;
if
(
sql
==
NULL
)
continue
;
cJSON
*
sqlStr
=
cJSON_GetObjectItem
(
sql
,
"sql"
);
cJSON
*
sqlStr
=
cJSON_GetObjectItem
(
sql
,
"sql"
);
...
@@ -4856,11 +4860,11 @@ static int64_t execInsert(threadInfo *pThreadInfo, uint64_t k)
...
@@ -4856,11 +4860,11 @@ static int64_t execInsert(threadInfo *pThreadInfo, uint64_t k)
verbosePrint
(
"[%d] %s() LN%d %s
\n
"
,
pThreadInfo
->
threadID
,
verbosePrint
(
"[%d] %s() LN%d %s
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
pThreadInfo
->
buffer
);
__func__
,
__LINE__
,
pThreadInfo
->
buffer
);
if
(
superTblInfo
)
{
if
(
superTblInfo
)
{
if
(
superTblInfo
->
i
nsertMod
e
==
TAOSC_IFACE
)
{
if
(
superTblInfo
->
i
fac
e
==
TAOSC_IFACE
)
{
affectedRows
=
queryDbExec
(
affectedRows
=
queryDbExec
(
pThreadInfo
->
taos
,
pThreadInfo
->
taos
,
pThreadInfo
->
buffer
,
INSERT_TYPE
,
false
);
pThreadInfo
->
buffer
,
INSERT_TYPE
,
false
);
}
else
if
(
superTblInfo
->
i
nsertMod
e
==
REST_IFACE
)
{
}
else
if
(
superTblInfo
->
i
fac
e
==
REST_IFACE
)
{
if
(
0
!=
postProceSql
(
g_Dbs
.
host
,
&
g_Dbs
.
serv_addr
,
g_Dbs
.
port
,
if
(
0
!=
postProceSql
(
g_Dbs
.
host
,
&
g_Dbs
.
serv_addr
,
g_Dbs
.
port
,
pThreadInfo
->
buffer
,
NULL
/* not set result file */
))
{
pThreadInfo
->
buffer
,
NULL
/* not set result file */
))
{
affectedRows
=
-
1
;
affectedRows
=
-
1
;
...
@@ -4869,7 +4873,7 @@ static int64_t execInsert(threadInfo *pThreadInfo, uint64_t k)
...
@@ -4869,7 +4873,7 @@ static int64_t execInsert(threadInfo *pThreadInfo, uint64_t k)
}
else
{
}
else
{
affectedRows
=
k
;
affectedRows
=
k
;
}
}
}
else
if
(
superTblInfo
->
i
nsertMod
e
==
STMT_IFACE
)
{
}
else
if
(
superTblInfo
->
i
fac
e
==
STMT_IFACE
)
{
debugPrint
(
"%s() LN%d, stmt=%p"
,
__func__
,
__LINE__
,
pThreadInfo
->
stmt
);
debugPrint
(
"%s() LN%d, stmt=%p"
,
__func__
,
__LINE__
,
pThreadInfo
->
stmt
);
if
(
0
!=
taos_stmt_execute
(
pThreadInfo
->
stmt
))
{
if
(
0
!=
taos_stmt_execute
(
pThreadInfo
->
stmt
))
{
errorPrint
(
"%s() LN%d, failied to execute insert statement
\n
"
,
errorPrint
(
"%s() LN%d, failied to execute insert statement
\n
"
,
...
@@ -4880,7 +4884,7 @@ static int64_t execInsert(threadInfo *pThreadInfo, uint64_t k)
...
@@ -4880,7 +4884,7 @@ static int64_t execInsert(threadInfo *pThreadInfo, uint64_t k)
affectedRows
=
k
;
affectedRows
=
k
;
}
else
{
}
else
{
errorPrint
(
"%s() LN%d: unknown insert mode: %d
\n
"
,
errorPrint
(
"%s() LN%d: unknown insert mode: %d
\n
"
,
__func__
,
__LINE__
,
superTblInfo
->
i
nsertMod
e
);
__func__
,
__LINE__
,
superTblInfo
->
i
fac
e
);
affectedRows
=
0
;
affectedRows
=
0
;
}
}
}
else
{
}
else
{
...
@@ -4916,7 +4920,7 @@ static void getTableName(char *pTblName,
...
@@ -4916,7 +4920,7 @@ static void getTableName(char *pTblName,
}
}
static
int64_t
generateDataTailWithoutStb
(
static
int64_t
generateDataTailWithoutStb
(
uint
64
_t
batch
,
char
*
buffer
,
uint
32
_t
batch
,
char
*
buffer
,
int64_t
remainderBufLen
,
int64_t
insertRows
,
int64_t
remainderBufLen
,
int64_t
insertRows
,
uint64_t
startFrom
,
int64_t
startTime
,
uint64_t
startFrom
,
int64_t
startTime
,
/* int64_t *pSamplePos, */
int64_t
*
dataLen
)
{
/* int64_t *pSamplePos, */
int64_t
*
dataLen
)
{
...
@@ -4924,7 +4928,7 @@ static int64_t generateDataTailWithoutStb(
...
@@ -4924,7 +4928,7 @@ static int64_t generateDataTailWithoutStb(
uint64_t
len
=
0
;
uint64_t
len
=
0
;
char
*
pstr
=
buffer
;
char
*
pstr
=
buffer
;
verbosePrint
(
"%s() LN%d batch=%
"
PRIu64
"
\n
"
,
__func__
,
__LINE__
,
batch
);
verbosePrint
(
"%s() LN%d batch=%
d
\n
"
,
__func__
,
__LINE__
,
batch
);
int64_t
k
=
0
;
int64_t
k
=
0
;
for
(
k
=
0
;
k
<
batch
;)
{
for
(
k
=
0
;
k
<
batch
;)
{
...
@@ -4936,22 +4940,11 @@ static int64_t generateDataTailWithoutStb(
...
@@ -4936,22 +4940,11 @@ static int64_t generateDataTailWithoutStb(
char
**
data_type
=
g_args
.
datatype
;
char
**
data_type
=
g_args
.
datatype
;
int
lenOfBinary
=
g_args
.
len_of_binary
;
int
lenOfBinary
=
g_args
.
len_of_binary
;
int64_t
randTail
=
DEFAULT_TIMESTAMP_STEP
*
k
;
if
(
g_args
.
disorderRatio
!=
0
)
{
int
rand_num
=
taosRandom
()
%
100
;
if
(
rand_num
<
g_args
.
disorderRatio
)
{
randTail
=
(
randTail
+
(
taosRandom
()
%
g_args
.
disorderRange
+
1
))
*
(
-
1
);
debugPrint
(
"rand data generated, back %"
PRId64
"
\n
"
,
randTail
);
}
}
else
{
randTail
=
DEFAULT_TIMESTAMP_STEP
*
k
;
}
retLen
=
generateData
(
data
,
data_type
,
retLen
=
generateData
(
data
,
data_type
,
startTime
+
randTail
,
startTime
+
getTSRandTail
(
(
int64_t
)
DEFAULT_TIMESTAMP_STEP
,
k
,
g_args
.
disorderRatio
,
g_args
.
disorderRange
),
lenOfBinary
);
lenOfBinary
);
if
(
len
>
remainderBufLen
)
if
(
len
>
remainderBufLen
)
...
@@ -4976,9 +4969,25 @@ static int64_t generateDataTailWithoutStb(
...
@@ -4976,9 +4969,25 @@ static int64_t generateDataTailWithoutStb(
return
k
;
return
k
;
}
}
static
int64_t
generateStbDataTail
(
static
int64_t
getTSRandTail
(
int64_t
timeStampStep
,
int32_t
seq
,
int
disorderRatio
,
int
disorderRange
)
{
int64_t
randTail
=
timeStampStep
*
seq
;
if
(
disorderRatio
>
0
)
{
int
rand_num
=
taosRandom
()
%
100
;
if
(
rand_num
<
disorderRatio
)
{
randTail
=
(
randTail
+
(
taosRandom
()
%
disorderRange
+
1
))
*
(
-
1
);
debugPrint
(
"rand data generated, back %"
PRId64
"
\n
"
,
randTail
);
}
}
return
randTail
;
}
static
int32_t
generateStbDataTail
(
SSuperTable
*
superTblInfo
,
SSuperTable
*
superTblInfo
,
uint
64
_t
batch
,
char
*
buffer
,
uint
32
_t
batch
,
char
*
buffer
,
int64_t
remainderBufLen
,
int64_t
insertRows
,
int64_t
remainderBufLen
,
int64_t
insertRows
,
uint64_t
startFrom
,
int64_t
startTime
,
uint64_t
startFrom
,
int64_t
startTime
,
int64_t
*
pSamplePos
,
int64_t
*
dataLen
)
{
int64_t
*
pSamplePos
,
int64_t
*
dataLen
)
{
...
@@ -4986,37 +4995,35 @@ static int64_t generateStbDataTail(
...
@@ -4986,37 +4995,35 @@ static int64_t generateStbDataTail(
char
*
pstr
=
buffer
;
char
*
pstr
=
buffer
;
verbosePrint
(
"%s() LN%d batch=%"
PRIu64
"
\n
"
,
__func__
,
__LINE__
,
batch
);
bool
tsRand
;
if
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"rand"
,
strlen
(
"rand"
)))
{
tsRand
=
true
;
}
else
{
tsRand
=
false
;
}
verbosePrint
(
"%s() LN%d batch=%ud
\n
"
,
__func__
,
__LINE__
,
batch
);
int
64
_t
k
=
0
;
int
32
_t
k
=
0
;
for
(
k
=
0
;
k
<
batch
;)
{
for
(
k
=
0
;
k
<
batch
;)
{
char
data
[
MAX_DATA_SIZE
];
char
data
[
MAX_DATA_SIZE
];
memset
(
data
,
0
,
MAX_DATA_SIZE
);
memset
(
data
,
0
,
MAX_DATA_SIZE
);
int64_t
retLen
=
0
;
int64_t
retLen
=
0
;
if
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
if
(
tsRand
)
{
"sample"
,
strlen
(
"sample"
)))
{
retLen
=
generateStbRowData
(
superTblInfo
,
data
,
startTime
+
getTSRandTail
(
superTblInfo
->
timeStampStep
,
k
,
superTblInfo
->
disorderRatio
,
superTblInfo
->
disorderRange
)
);
}
else
{
retLen
=
getRowDataFromSample
(
retLen
=
getRowDataFromSample
(
data
,
data
,
remainderBufLen
,
remainderBufLen
,
startTime
+
superTblInfo
->
timeStampStep
*
k
,
startTime
+
superTblInfo
->
timeStampStep
*
k
,
superTblInfo
,
superTblInfo
,
pSamplePos
);
pSamplePos
);
}
else
if
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"rand"
,
strlen
(
"rand"
)))
{
int64_t
randTail
=
superTblInfo
->
timeStampStep
*
k
;
if
(
superTblInfo
->
disorderRatio
>
0
)
{
int
rand_num
=
taosRandom
()
%
100
;
if
(
rand_num
<
superTblInfo
->
disorderRatio
)
{
randTail
=
(
randTail
+
(
taosRandom
()
%
superTblInfo
->
disorderRange
+
1
))
*
(
-
1
);
debugPrint
(
"rand data generated, back %"
PRId64
"
\n
"
,
randTail
);
}
}
int64_t
d
=
startTime
+
randTail
;
retLen
=
generateStbRowData
(
superTblInfo
,
data
,
d
);
}
}
if
(
retLen
>
remainderBufLen
)
{
if
(
retLen
>
remainderBufLen
)
{
...
@@ -5028,7 +5035,7 @@ static int64_t generateStbDataTail(
...
@@ -5028,7 +5035,7 @@ static int64_t generateStbDataTail(
len
+=
retLen
;
len
+=
retLen
;
remainderBufLen
-=
retLen
;
remainderBufLen
-=
retLen
;
verbosePrint
(
"%s() LN%d len=%"
PRIu64
" k=%
"
PRIu64
"
\n
buffer=%s
\n
"
,
verbosePrint
(
"%s() LN%d len=%"
PRIu64
" k=%
ud
\n
buffer=%s
\n
"
,
__func__
,
__LINE__
,
len
,
k
,
buffer
);
__func__
,
__LINE__
,
len
,
k
,
buffer
);
startFrom
++
;
startFrom
++
;
...
@@ -5125,9 +5132,11 @@ static int generateStbSQLHead(
...
@@ -5125,9 +5132,11 @@ static int generateStbSQLHead(
return
len
;
return
len
;
}
}
static
int
64
_t
generateStbInterlaceData
(
static
int
32
_t
generateStbInterlaceData
(
SSuperTable
*
superTblInfo
,
SSuperTable
*
superTblInfo
,
char
*
tableName
,
uint64_t
batchPerTbl
,
uint64_t
i
,
uint64_t
batchPerTblTimes
,
char
*
tableName
,
uint32_t
batchPerTbl
,
uint64_t
i
,
uint32_t
batchPerTblTimes
,
uint64_t
tableSeq
,
uint64_t
tableSeq
,
threadInfo
*
pThreadInfo
,
char
*
buffer
,
threadInfo
*
pThreadInfo
,
char
*
buffer
,
int64_t
insertRows
,
int64_t
insertRows
,
...
@@ -5154,7 +5163,7 @@ static int64_t generateStbInterlaceData(
...
@@ -5154,7 +5163,7 @@ static int64_t generateStbInterlaceData(
int64_t
dataLen
=
0
;
int64_t
dataLen
=
0
;
verbosePrint
(
"[%d] %s() LN%d i=%"
PRIu64
" batchPerTblTimes=%
"
PRIu64
" batchPerTbl = %"
PRIu64
"
\n
"
,
verbosePrint
(
"[%d] %s() LN%d i=%"
PRIu64
" batchPerTblTimes=%
ud batchPerTbl = %ud
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
i
,
batchPerTblTimes
,
batchPerTbl
);
i
,
batchPerTblTimes
,
batchPerTbl
);
...
@@ -5162,7 +5171,7 @@ static int64_t generateStbInterlaceData(
...
@@ -5162,7 +5171,7 @@ static int64_t generateStbInterlaceData(
startTime
=
taosGetTimestamp
(
pThreadInfo
->
time_precision
);
startTime
=
taosGetTimestamp
(
pThreadInfo
->
time_precision
);
}
}
int
64_t
k
=
generateStbDataTail
(
int
32_t
k
=
generateStbDataTail
(
superTblInfo
,
superTblInfo
,
batchPerTbl
,
pstr
,
*
pRemainderBufLen
,
insertRows
,
0
,
batchPerTbl
,
pstr
,
*
pRemainderBufLen
,
insertRows
,
0
,
startTime
,
startTime
,
...
@@ -5172,7 +5181,7 @@ static int64_t generateStbInterlaceData(
...
@@ -5172,7 +5181,7 @@ static int64_t generateStbInterlaceData(
pstr
+=
dataLen
;
pstr
+=
dataLen
;
*
pRemainderBufLen
-=
dataLen
;
*
pRemainderBufLen
-=
dataLen
;
}
else
{
}
else
{
debugPrint
(
"%s() LN%d, generated data tail: %
"
PRIu64
", not equal batch per table: %"
PRIu64
"
\n
"
,
debugPrint
(
"%s() LN%d, generated data tail: %
ud, not equal batch per table: %ud
\n
"
,
__func__
,
__LINE__
,
k
,
batchPerTbl
);
__func__
,
__LINE__
,
k
,
batchPerTbl
);
pstr
-=
headLen
;
pstr
-=
headLen
;
pstr
[
0
]
=
'\0'
;
pstr
[
0
]
=
'\0'
;
...
@@ -5183,7 +5192,7 @@ static int64_t generateStbInterlaceData(
...
@@ -5183,7 +5192,7 @@ static int64_t generateStbInterlaceData(
}
}
static
int64_t
generateInterlaceDataWithoutStb
(
static
int64_t
generateInterlaceDataWithoutStb
(
char
*
tableName
,
uint
64
_t
batchPerTbl
,
char
*
tableName
,
uint
32
_t
batchPerTbl
,
uint64_t
tableSeq
,
uint64_t
tableSeq
,
char
*
dbName
,
char
*
buffer
,
char
*
dbName
,
char
*
buffer
,
int64_t
insertRows
,
int64_t
insertRows
,
...
@@ -5215,7 +5224,7 @@ static int64_t generateInterlaceDataWithoutStb(
...
@@ -5215,7 +5224,7 @@ static int64_t generateInterlaceDataWithoutStb(
pstr
+=
dataLen
;
pstr
+=
dataLen
;
*
pRemainderBufLen
-=
dataLen
;
*
pRemainderBufLen
-=
dataLen
;
}
else
{
}
else
{
debugPrint
(
"%s() LN%d, generated data tail: %"
PRIu64
", not equal batch per table: %
"
PRIu64
"
\n
"
,
debugPrint
(
"%s() LN%d, generated data tail: %"
PRIu64
", not equal batch per table: %
ud
\n
"
,
__func__
,
__LINE__
,
k
,
batchPerTbl
);
__func__
,
__LINE__
,
k
,
batchPerTbl
);
pstr
-=
headLen
;
pstr
-=
headLen
;
pstr
[
0
]
=
'\0'
;
pstr
[
0
]
=
'\0'
;
...
@@ -5225,7 +5234,71 @@ static int64_t generateInterlaceDataWithoutStb(
...
@@ -5225,7 +5234,71 @@ static int64_t generateInterlaceDataWithoutStb(
return
k
;
return
k
;
}
}
static
int64_t
generateStbProgressiveData
(
static
int32_t
prepareStbStmt
(
SSuperTable
*
stbInfo
,
TAOS_STMT
*
stmt
,
char
*
tableName
,
uint32_t
batch
,
uint64_t
insertRows
,
int64_t
startTime
,
char
*
buffer
)
{
uint32_t
k
;
int
ret
;
char
*
pstr
=
buffer
;
pstr
+=
sprintf
(
pstr
,
"INSERT INTO %s values(?"
,
tableName
);
for
(
int
i
=
0
;
i
<
stbInfo
->
columnCount
;
i
++
)
{
pstr
+=
sprintf
(
pstr
,
",?"
);
}
pstr
+=
sprintf
(
pstr
,
")"
);
ret
=
taos_stmt_prepare
(
stmt
,
buffer
,
0
);
if
(
ret
!=
0
){
errorPrint
(
"failed to execute taos_stmt_prepare. return 0x%x. reason: %s
\n
"
,
ret
,
taos_errstr
(
NULL
));
return
ret
;
}
void
*
bindArray
=
malloc
(
sizeof
(
TAOS_BIND
)
*
(
stbInfo
->
columnCount
+
1
));
if
(
bindArray
==
NULL
)
{
errorPrint
(
"Failed to allocate %d bind params
\n
"
,
batch
);
return
-
1
;
}
bool
tsRand
;
if
(
0
==
strncasecmp
(
stbInfo
->
dataSource
,
"rand"
,
strlen
(
"rand"
)))
{
tsRand
=
true
;
}
else
{
tsRand
=
false
;
}
for
(
k
=
0
;
k
<
batch
;
k
++
)
{
/* columnCount + 1 (ts) */
for
(
int
i
=
0
;
i
<=
stbInfo
->
columnCount
;
i
++
)
{
TAOS_BIND
*
bind
=
(
TAOS_BIND
*
)
bindArray
+
(
sizeof
(
TAOS_BIND
)
*
i
);
if
(
i
==
0
)
{
bind
->
buffer_type
=
TSDB_DATA_TYPE_TIMESTAMP
;
int64_t
ts
;
if
(
tsRand
)
{
ts
=
startTime
+
getTSRandTail
(
stbInfo
->
timeStampStep
,
k
,
stbInfo
->
disorderRatio
,
stbInfo
->
disorderRange
);
}
else
{
ts
=
startTime
+
stbInfo
->
timeStampStep
*
k
;
}
bind
->
buffer
=
&
ts
;
}
else
{
}
}
// if msg > 3MB, break
}
taos_stmt_bind_param
(
stmt
,
bindArray
);
taos_stmt_add_batch
(
stmt
);
return
k
;
}
static
int32_t
generateStbProgressiveData
(
SSuperTable
*
superTblInfo
,
SSuperTable
*
superTblInfo
,
char
*
tableName
,
char
*
tableName
,
int64_t
tableSeq
,
int64_t
tableSeq
,
...
@@ -5259,12 +5332,17 @@ static int64_t generateStbProgressiveData(
...
@@ -5259,12 +5332,17 @@ static int64_t generateStbProgressiveData(
pSamplePos
,
&
dataLen
);
pSamplePos
,
&
dataLen
);
}
}
static
int64_t
prepareStmtWithoutStb
(
char
*
tableName
)
{
return
-
1
;
}
static
int64_t
generateProgressiveDataWithoutStb
(
static
int64_t
generateProgressiveDataWithoutStb
(
char
*
tableName
,
char
*
tableName
,
int64_t
tableSeq
,
/* int64_t tableSeq, */
threadInfo
*
pThreadInfo
,
char
*
buffer
,
threadInfo
*
pThreadInfo
,
char
*
buffer
,
int64_t
insertRows
,
int64_t
insertRows
,
uint64_t
startFrom
,
int64_t
startTime
,
int64_t
*
pSamplePos
,
uint64_t
startFrom
,
int64_t
startTime
,
/*int64_t *pSamplePos, */
int64_t
*
pRemainderBufLen
)
int64_t
*
pRemainderBufLen
)
{
{
assert
(
buffer
!=
NULL
);
assert
(
buffer
!=
NULL
);
...
@@ -5305,7 +5383,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5305,7 +5383,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo
->
threadID
,
__func__
,
__LINE__
);
pThreadInfo
->
threadID
,
__func__
,
__LINE__
);
int64_t
insertRows
;
int64_t
insertRows
;
uint
64
_t
interlaceRows
;
uint
32
_t
interlaceRows
;
uint64_t
maxSqlLen
;
uint64_t
maxSqlLen
;
int64_t
nTimeStampStep
;
int64_t
nTimeStampStep
;
uint64_t
insert_interval
;
uint64_t
insert_interval
;
...
@@ -5343,8 +5421,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5343,8 +5421,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
if
(
interlaceRows
>
g_args
.
num_of_RPR
)
if
(
interlaceRows
>
g_args
.
num_of_RPR
)
interlaceRows
=
g_args
.
num_of_RPR
;
interlaceRows
=
g_args
.
num_of_RPR
;
uint
64
_t
batchPerTbl
=
interlaceRows
;
uint
32
_t
batchPerTbl
=
interlaceRows
;
uint
64
_t
batchPerTblTimes
;
uint
32
_t
batchPerTblTimes
;
if
((
interlaceRows
>
0
)
&&
(
pThreadInfo
->
ntables
>
1
))
{
if
((
interlaceRows
>
0
)
&&
(
pThreadInfo
->
ntables
>
1
))
{
batchPerTblTimes
=
batchPerTblTimes
=
...
@@ -5393,9 +5471,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5393,9 +5471,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pstr
+=
len
;
pstr
+=
len
;
remainderBufLen
-=
len
;
remainderBufLen
-=
len
;
uint
64
_t
recOfBatch
=
0
;
uint
32
_t
recOfBatch
=
0
;
for
(
uint
64
_t
i
=
0
;
i
<
batchPerTblTimes
;
i
++
)
{
for
(
uint
32
_t
i
=
0
;
i
<
batchPerTblTimes
;
i
++
)
{
char
tableName
[
TSDB_TABLE_NAME_LEN
];
char
tableName
[
TSDB_TABLE_NAME_LEN
];
getTableName
(
tableName
,
pThreadInfo
,
tableSeq
);
getTableName
(
tableName
,
pThreadInfo
,
tableSeq
);
...
@@ -5408,11 +5486,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5408,11 +5486,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
uint64_t
oldRemainderLen
=
remainderBufLen
;
uint64_t
oldRemainderLen
=
remainderBufLen
;
int
64
_t
generated
;
int
32
_t
generated
;
if
(
superTblInfo
)
{
if
(
superTblInfo
)
{
generated
=
generateStbInterlaceData
(
generated
=
generateStbInterlaceData
(
superTblInfo
,
superTblInfo
,
tableName
,
batchPerTbl
,
i
,
batchPerTblTimes
,
tableName
,
batchPerTbl
,
i
,
batchPerTblTimes
,
tableSeq
,
tableSeq
,
pThreadInfo
,
pstr
,
pThreadInfo
,
pstr
,
insertRows
,
insertRows
,
...
@@ -5427,10 +5506,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5427,10 +5506,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
&
remainderBufLen
);
&
remainderBufLen
);
}
}
debugPrint
(
"[%d] %s() LN%d, generated records is %
"
PRId64
"
\n
"
,
debugPrint
(
"[%d] %s() LN%d, generated records is %
d
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
generated
);
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
generated
);
if
(
generated
<
0
)
{
if
(
generated
<
0
)
{
errorPrint
(
"[%d] %s() LN%d, generated records is %
"
PRId64
"
\n
"
,
errorPrint
(
"[%d] %s() LN%d, generated records is %
d
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
generated
);
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
generated
);
goto
free_of_interlace
;
goto
free_of_interlace
;
}
else
if
(
generated
==
0
)
{
}
else
if
(
generated
==
0
)
{
...
@@ -5442,7 +5521,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5442,7 +5521,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pstr
+=
(
oldRemainderLen
-
remainderBufLen
);
pstr
+=
(
oldRemainderLen
-
remainderBufLen
);
// startTime += batchPerTbl * superTblInfo->timeStampStep;
// startTime += batchPerTbl * superTblInfo->timeStampStep;
pThreadInfo
->
totalInsertRows
+=
batchPerTbl
;
pThreadInfo
->
totalInsertRows
+=
batchPerTbl
;
verbosePrint
(
"[%d] %s() LN%d batchPerTbl=%
"
PRId64
" recOfBatch=%"
PRId64
"
\n
"
,
verbosePrint
(
"[%d] %s() LN%d batchPerTbl=%
d recOfBatch=%d
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
batchPerTbl
,
recOfBatch
);
batchPerTbl
,
recOfBatch
);
...
@@ -5458,7 +5537,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5458,7 +5537,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
if
(
generatedRecPerTbl
>=
insertRows
)
if
(
generatedRecPerTbl
>=
insertRows
)
break
;
break
;
int
remainRows
=
insertRows
-
generatedRecPerTbl
;
int
64_t
remainRows
=
insertRows
-
generatedRecPerTbl
;
if
((
remainRows
>
0
)
&&
(
batchPerTbl
>
remainRows
))
if
((
remainRows
>
0
)
&&
(
batchPerTbl
>
remainRows
))
batchPerTbl
=
remainRows
;
batchPerTbl
=
remainRows
;
...
@@ -5474,7 +5553,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5474,7 +5553,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
break
;
break
;
}
}
verbosePrint
(
"[%d] %s() LN%d recOfBatch=%
"
PRIu64
"
totalInsertRows=%"
PRIu64
"
\n
"
,
verbosePrint
(
"[%d] %s() LN%d recOfBatch=%
d
totalInsertRows=%"
PRIu64
"
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
recOfBatch
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
recOfBatch
,
pThreadInfo
->
totalInsertRows
);
pThreadInfo
->
totalInsertRows
);
verbosePrint
(
"[%d] %s() LN%d, buffer=%s
\n
"
,
verbosePrint
(
"[%d] %s() LN%d, buffer=%s
\n
"
,
...
@@ -5483,7 +5562,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5483,7 +5562,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
startTs
=
taosGetTimestampMs
();
startTs
=
taosGetTimestampMs
();
if
(
recOfBatch
==
0
)
{
if
(
recOfBatch
==
0
)
{
errorPrint
(
"[%d] %s() LN%d try inserting records of batch is %
"
PRIu64
"
\n
"
,
errorPrint
(
"[%d] %s() LN%d try inserting records of batch is %
d
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
recOfBatch
);
recOfBatch
);
errorPrint
(
"%s
\n
"
,
"
\t
Please check if the batch or the buffer length is proper value!
\n
"
);
errorPrint
(
"%s
\n
"
,
"
\t
Please check if the batch or the buffer length is proper value!
\n
"
);
...
@@ -5505,7 +5584,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
...
@@ -5505,7 +5584,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo
->
totalDelay
+=
delay
;
pThreadInfo
->
totalDelay
+=
delay
;
if
(
recOfBatch
!=
affectedRows
)
{
if
(
recOfBatch
!=
affectedRows
)
{
errorPrint
(
"[%d] %s() LN%d execInsert insert %
"
PRIu64
"
, affected rows: %"
PRId64
"
\n
%s
\n
"
,
errorPrint
(
"[%d] %s() LN%d execInsert insert %
d
, affected rows: %"
PRId64
"
\n
%s
\n
"
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
pThreadInfo
->
threadID
,
__func__
,
__LINE__
,
recOfBatch
,
affectedRows
,
pThreadInfo
->
buffer
);
recOfBatch
,
affectedRows
,
pThreadInfo
->
buffer
);
goto
free_of_interlace
;
goto
free_of_interlace
;
...
@@ -5566,12 +5645,6 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
...
@@ -5566,12 +5645,6 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
uint64_t
startTs
=
taosGetTimestampMs
();
uint64_t
startTs
=
taosGetTimestampMs
();
uint64_t
endTs
;
uint64_t
endTs
;
/* int insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
uint64_t st = 0;
uint64_t et = 0xffffffff;
*/
pThreadInfo
->
totalInsertRows
=
0
;
pThreadInfo
->
totalInsertRows
=
0
;
pThreadInfo
->
totalAffectedRows
=
0
;
pThreadInfo
->
totalAffectedRows
=
0
;
...
@@ -5598,21 +5671,34 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
...
@@ -5598,21 +5671,34 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pstr
+=
len
;
pstr
+=
len
;
remainderBufLen
-=
len
;
remainderBufLen
-=
len
;
int
64
_t
generated
;
int
32
_t
generated
;
if
(
superTblInfo
)
{
if
(
superTblInfo
)
{
if
(
superTblInfo
->
iface
==
STMT_IFACE
)
{
generated
=
prepareStbStmt
(
superTblInfo
,
pThreadInfo
->
stmt
,
tableName
,
g_args
.
num_of_RPR
,
insertRows
,
start_time
,
pstr
);
}
else
{
generated
=
generateStbProgressiveData
(
generated
=
generateStbProgressiveData
(
superTblInfo
,
superTblInfo
,
tableName
,
tableSeq
,
pThreadInfo
->
db_name
,
pstr
,
insertRows
,
tableName
,
tableSeq
,
pThreadInfo
->
db_name
,
pstr
,
i
,
start_time
,
insertRows
,
i
,
start_time
,
&
(
pThreadInfo
->
samplePos
),
&
(
pThreadInfo
->
samplePos
),
&
remainderBufLen
);
&
remainderBufLen
);
}
}
else
{
if
(
g_args
.
iface
==
STMT_IFACE
)
{
generated
=
prepareStmtWithoutStb
(
tableName
);
}
else
{
}
else
{
generated
=
generateProgressiveDataWithoutStb
(
generated
=
generateProgressiveDataWithoutStb
(
tableName
,
tableSeq
,
pThreadInfo
,
pstr
,
insertRows
,
tableName
,
/* tableSeq, */
pThreadInfo
,
pstr
,
insertRows
,
i
,
start_time
,
i
,
start_time
,
&
(
pThreadInfo
->
samplePos
),
/* &(pThreadInfo->samplePos), */
&
remainderBufLen
);
&
remainderBufLen
);
}
}
}
if
(
generated
>
0
)
if
(
generated
>
0
)
i
+=
generated
;
i
+=
generated
;
else
else
...
@@ -5679,7 +5765,7 @@ static void* syncWrite(void *sarg) {
...
@@ -5679,7 +5765,7 @@ static void* syncWrite(void *sarg) {
threadInfo
*
pThreadInfo
=
(
threadInfo
*
)
sarg
;
threadInfo
*
pThreadInfo
=
(
threadInfo
*
)
sarg
;
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
SSuperTable
*
superTblInfo
=
pThreadInfo
->
superTblInfo
;
in
t
interlaceRows
;
uint32_
t
interlaceRows
;
if
(
superTblInfo
)
{
if
(
superTblInfo
)
{
if
((
superTblInfo
->
interlaceRows
==
0
)
if
((
superTblInfo
->
interlaceRows
==
0
)
...
@@ -5810,7 +5896,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
...
@@ -5810,7 +5896,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
char
*
precision
,
SSuperTable
*
superTblInfo
)
{
char
*
precision
,
SSuperTable
*
superTblInfo
)
{
//TAOS* taos;
//TAOS* taos;
//if (0 == strncasecmp(superTblInfo->i
nsertMod
e, "taosc", 5)) {
//if (0 == strncasecmp(superTblInfo->i
fac
e, "taosc", 5)) {
// taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port);
// taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port);
// if (NULL == taos) {
// if (NULL == taos) {
// printf("connect to server fail, reason: %s\n", taos_errstr(NULL));
// printf("connect to server fail, reason: %s\n", taos_errstr(NULL));
...
@@ -5955,8 +6041,9 @@ static void startMultiThreadInsertData(int threads, char* db_name,
...
@@ -5955,8 +6041,9 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
if
((
superTblInfo
)
if
((
superTblInfo
)
&&
(
superTblInfo
->
insertMode
==
REST_IFACE
))
{
&&
(
superTblInfo
->
iface
==
REST_IFACE
))
{
if
(
convertHostToServAddr
(
g_Dbs
.
host
,
g_Dbs
.
port
,
&
(
g_Dbs
.
serv_addr
))
!=
0
)
{
if
(
convertHostToServAddr
(
g_Dbs
.
host
,
g_Dbs
.
port
,
&
(
g_Dbs
.
serv_addr
))
!=
0
)
{
exit
(
-
1
);
exit
(
-
1
);
}
}
}
}
...
@@ -5971,22 +6058,22 @@ static void startMultiThreadInsertData(int threads, char* db_name,
...
@@ -5971,22 +6058,22 @@ static void startMultiThreadInsertData(int threads, char* db_name,
memset
(
infos
,
0
,
threads
*
sizeof
(
threadInfo
));
memset
(
infos
,
0
,
threads
*
sizeof
(
threadInfo
));
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infos
+
i
;
threadInfo
*
pThreadI
nfo
=
infos
+
i
;
t_i
nfo
->
threadID
=
i
;
pThreadI
nfo
->
threadID
=
i
;
tstrncpy
(
t_i
nfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
tstrncpy
(
pThreadI
nfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
t_i
nfo
->
time_precision
=
timePrec
;
pThreadI
nfo
->
time_precision
=
timePrec
;
t_i
nfo
->
superTblInfo
=
superTblInfo
;
pThreadI
nfo
->
superTblInfo
=
superTblInfo
;
t_i
nfo
->
start_time
=
start_time
;
pThreadI
nfo
->
start_time
=
start_time
;
t_i
nfo
->
minDelay
=
UINT64_MAX
;
pThreadI
nfo
->
minDelay
=
UINT64_MAX
;
if
((
NULL
==
superTblInfo
)
||
if
((
NULL
==
superTblInfo
)
||
(
superTblInfo
->
i
nsertMod
e
!=
REST_IFACE
))
{
(
superTblInfo
->
i
fac
e
!=
REST_IFACE
))
{
//t_info->taos = taos;
//t_info->taos = taos;
t_i
nfo
->
taos
=
taos_connect
(
pThreadI
nfo
->
taos
=
taos_connect
(
g_Dbs
.
host
,
g_Dbs
.
user
,
g_Dbs
.
host
,
g_Dbs
.
user
,
g_Dbs
.
password
,
db_name
,
g_Dbs
.
port
);
g_Dbs
.
password
,
db_name
,
g_Dbs
.
port
);
if
(
NULL
==
t_i
nfo
->
taos
)
{
if
(
NULL
==
pThreadI
nfo
->
taos
)
{
errorPrint
(
errorPrint
(
"%s() LN%d, connect to server fail from insert sub thread, reason: %s
\n
"
,
"%s() LN%d, connect to server fail from insert sub thread, reason: %s
\n
"
,
__func__
,
__LINE__
,
__func__
,
__LINE__
,
...
@@ -5995,9 +6082,9 @@ static void startMultiThreadInsertData(int threads, char* db_name,
...
@@ -5995,9 +6082,9 @@ static void startMultiThreadInsertData(int threads, char* db_name,
exit
(
-
1
);
exit
(
-
1
);
}
}
if
((
superTblInfo
)
&&
(
superTblInfo
->
i
nsertMod
e
==
STMT_IFACE
))
{
if
((
superTblInfo
)
&&
(
superTblInfo
->
i
fac
e
==
STMT_IFACE
))
{
t_info
->
stmt
=
taos_stmt_init
(
t_i
nfo
->
taos
);
pThreadInfo
->
stmt
=
taos_stmt_init
(
pThreadI
nfo
->
taos
);
if
(
NULL
==
t_i
nfo
->
stmt
)
{
if
(
NULL
==
pThreadI
nfo
->
stmt
)
{
errorPrint
(
errorPrint
(
"%s() LN%d, failed init stmt, reason: %s
\n
"
,
"%s() LN%d, failed init stmt, reason: %s
\n
"
,
__func__
,
__LINE__
,
__func__
,
__LINE__
,
...
@@ -6008,27 +6095,27 @@ static void startMultiThreadInsertData(int threads, char* db_name,
...
@@ -6008,27 +6095,27 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
}
}
}
else
{
}
else
{
t_i
nfo
->
taos
=
NULL
;
pThreadI
nfo
->
taos
=
NULL
;
}
}
/* if ((NULL == superTblInfo)
/* if ((NULL == superTblInfo)
|| (0 == superTblInfo->multiThreadWriteOneTbl)) {
|| (0 == superTblInfo->multiThreadWriteOneTbl)) {
*/
*/
t_i
nfo
->
start_table_from
=
startFrom
;
pThreadI
nfo
->
start_table_from
=
startFrom
;
t_i
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
pThreadI
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
t_i
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
pThreadI
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
t_i
nfo
->
end_table_to
+
1
;
startFrom
=
pThreadI
nfo
->
end_table_to
+
1
;
/* } else {
/* } else {
t_i
nfo->start_table_from = 0;
pThreadI
nfo->start_table_from = 0;
t_i
nfo->ntables = superTblInfo->childTblCount;
pThreadI
nfo->ntables = superTblInfo->childTblCount;
t_info->start_time = t_i
nfo->start_time + rand_int() % 10000 - rand_tinyint();
pThreadInfo->start_time = pThreadI
nfo->start_time + rand_int() % 10000 - rand_tinyint();
}
}
*/
*/
tsem_init
(
&
(
t_i
nfo
->
lock_sem
),
0
,
0
);
tsem_init
(
&
(
pThreadI
nfo
->
lock_sem
),
0
,
0
);
if
(
ASYNC_MODE
==
g_Dbs
.
asyncMode
)
{
if
(
ASYNC_MODE
==
g_Dbs
.
asyncMode
)
{
pthread_create
(
pids
+
i
,
NULL
,
asyncWrite
,
t_i
nfo
);
pthread_create
(
pids
+
i
,
NULL
,
asyncWrite
,
pThreadI
nfo
);
}
else
{
}
else
{
pthread_create
(
pids
+
i
,
NULL
,
syncWrite
,
t_i
nfo
);
pthread_create
(
pids
+
i
,
NULL
,
syncWrite
,
pThreadI
nfo
);
}
}
}
}
...
@@ -6043,31 +6130,32 @@ static void startMultiThreadInsertData(int threads, char* db_name,
...
@@ -6043,31 +6130,32 @@ static void startMultiThreadInsertData(int threads, char* db_name,
double
avgDelay
=
0
;
double
avgDelay
=
0
;
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infos
+
i
;
threadInfo
*
pThreadI
nfo
=
infos
+
i
;
tsem_destroy
(
&
(
t_i
nfo
->
lock_sem
));
tsem_destroy
(
&
(
pThreadI
nfo
->
lock_sem
));
if
(
t_i
nfo
->
stmt
)
{
if
(
pThreadI
nfo
->
stmt
)
{
taos_stmt_close
(
t_i
nfo
->
stmt
);
taos_stmt_close
(
pThreadI
nfo
->
stmt
);
}
}
taos_close
(
t_info
->
taos
);
tsem_destroy
(
&
(
pThreadInfo
->
lock_sem
));
taos_close
(
pThreadInfo
->
taos
);
debugPrint
(
"%s() LN%d, [%d] totalInsert=%"
PRIu64
" totalAffected=%"
PRIu64
"
\n
"
,
debugPrint
(
"%s() LN%d, [%d] totalInsert=%"
PRIu64
" totalAffected=%"
PRIu64
"
\n
"
,
__func__
,
__LINE__
,
__func__
,
__LINE__
,
t_info
->
threadID
,
t_i
nfo
->
totalInsertRows
,
pThreadInfo
->
threadID
,
pThreadI
nfo
->
totalInsertRows
,
t_i
nfo
->
totalAffectedRows
);
pThreadI
nfo
->
totalAffectedRows
);
if
(
superTblInfo
)
{
if
(
superTblInfo
)
{
superTblInfo
->
totalAffectedRows
+=
t_i
nfo
->
totalAffectedRows
;
superTblInfo
->
totalAffectedRows
+=
pThreadI
nfo
->
totalAffectedRows
;
superTblInfo
->
totalInsertRows
+=
t_i
nfo
->
totalInsertRows
;
superTblInfo
->
totalInsertRows
+=
pThreadI
nfo
->
totalInsertRows
;
}
else
{
}
else
{
g_args
.
totalAffectedRows
+=
t_i
nfo
->
totalAffectedRows
;
g_args
.
totalAffectedRows
+=
pThreadI
nfo
->
totalAffectedRows
;
g_args
.
totalInsertRows
+=
t_i
nfo
->
totalInsertRows
;
g_args
.
totalInsertRows
+=
pThreadI
nfo
->
totalInsertRows
;
}
}
totalDelay
+=
t_i
nfo
->
totalDelay
;
totalDelay
+=
pThreadI
nfo
->
totalDelay
;
cntDelay
+=
t_i
nfo
->
cntDelay
;
cntDelay
+=
pThreadI
nfo
->
cntDelay
;
if
(
t_info
->
maxDelay
>
maxDelay
)
maxDelay
=
t_i
nfo
->
maxDelay
;
if
(
pThreadInfo
->
maxDelay
>
maxDelay
)
maxDelay
=
pThreadI
nfo
->
maxDelay
;
if
(
t_info
->
minDelay
<
minDelay
)
minDelay
=
t_i
nfo
->
minDelay
;
if
(
pThreadInfo
->
minDelay
<
minDelay
)
minDelay
=
pThreadI
nfo
->
minDelay
;
}
}
cntDelay
-=
1
;
cntDelay
-=
1
;
...
@@ -6123,26 +6211,26 @@ static void startMultiThreadInsertData(int threads, char* db_name,
...
@@ -6123,26 +6211,26 @@ static void startMultiThreadInsertData(int threads, char* db_name,
static
void
*
readTable
(
void
*
sarg
)
{
static
void
*
readTable
(
void
*
sarg
)
{
#if 1
#if 1
threadInfo
*
ri
nfo
=
(
threadInfo
*
)
sarg
;
threadInfo
*
pThreadI
nfo
=
(
threadInfo
*
)
sarg
;
TAOS
*
taos
=
ri
nfo
->
taos
;
TAOS
*
taos
=
pThreadI
nfo
->
taos
;
char
command
[
BUFFER_SIZE
]
=
"
\0
"
;
char
command
[
BUFFER_SIZE
]
=
"
\0
"
;
uint64_t
sTime
=
ri
nfo
->
start_time
;
uint64_t
sTime
=
pThreadI
nfo
->
start_time
;
char
*
tb_prefix
=
ri
nfo
->
tb_prefix
;
char
*
tb_prefix
=
pThreadI
nfo
->
tb_prefix
;
FILE
*
fp
=
fopen
(
rinfo
->
fp
,
"a"
);
FILE
*
fp
=
fopen
(
pThreadInfo
->
filePath
,
"a"
);
if
(
NULL
==
fp
)
{
if
(
NULL
==
fp
)
{
errorPrint
(
"fopen %s fail, reason:%s.
\n
"
,
rinfo
->
fp
,
strerror
(
errno
));
errorPrint
(
"fopen %s fail, reason:%s.
\n
"
,
pThreadInfo
->
filePath
,
strerror
(
errno
));
return
NULL
;
return
NULL
;
}
}
int64_t
num_of_DPT
;
int64_t
num_of_DPT
;
/* if (
ri
nfo->superTblInfo) {
/* if (
pThreadI
nfo->superTblInfo) {
num_of_DPT =
ri
nfo->superTblInfo->insertRows; // nrecords_per_table;
num_of_DPT =
pThreadI
nfo->superTblInfo->insertRows; // nrecords_per_table;
} else {
} else {
*/
*/
num_of_DPT
=
g_args
.
num_of_DPT
;
num_of_DPT
=
g_args
.
num_of_DPT
;
// }
// }
int64_t
num_of_tables
=
ri
nfo
->
ntables
;
// rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t
num_of_tables
=
pThreadI
nfo
->
ntables
;
// rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t
totalData
=
num_of_DPT
*
num_of_tables
;
int64_t
totalData
=
num_of_DPT
*
num_of_tables
;
bool
do_aggreFunc
=
g_Dbs
.
do_aggreFunc
;
bool
do_aggreFunc
=
g_Dbs
.
do_aggreFunc
;
...
@@ -6195,17 +6283,17 @@ static void *readTable(void *sarg) {
...
@@ -6195,17 +6283,17 @@ static void *readTable(void *sarg) {
static
void
*
readMetric
(
void
*
sarg
)
{
static
void
*
readMetric
(
void
*
sarg
)
{
#if 1
#if 1
threadInfo
*
ri
nfo
=
(
threadInfo
*
)
sarg
;
threadInfo
*
pThreadI
nfo
=
(
threadInfo
*
)
sarg
;
TAOS
*
taos
=
ri
nfo
->
taos
;
TAOS
*
taos
=
pThreadI
nfo
->
taos
;
char
command
[
BUFFER_SIZE
]
=
"
\0
"
;
char
command
[
BUFFER_SIZE
]
=
"
\0
"
;
FILE
*
fp
=
fopen
(
rinfo
->
fp
,
"a"
);
FILE
*
fp
=
fopen
(
pThreadInfo
->
filePath
,
"a"
);
if
(
NULL
==
fp
)
{
if
(
NULL
==
fp
)
{
printf
(
"fopen %s fail, reason:%s.
\n
"
,
rinfo
->
fp
,
strerror
(
errno
));
printf
(
"fopen %s fail, reason:%s.
\n
"
,
pThreadInfo
->
filePath
,
strerror
(
errno
));
return
NULL
;
return
NULL
;
}
}
int64_t
num_of_DPT
=
ri
nfo
->
superTblInfo
->
insertRows
;
int64_t
num_of_DPT
=
pThreadI
nfo
->
superTblInfo
->
insertRows
;
int64_t
num_of_tables
=
ri
nfo
->
ntables
;
// rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t
num_of_tables
=
pThreadI
nfo
->
ntables
;
// rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t
totalData
=
num_of_DPT
*
num_of_tables
;
int64_t
totalData
=
num_of_DPT
*
num_of_tables
;
bool
do_aggreFunc
=
g_Dbs
.
do_aggreFunc
;
bool
do_aggreFunc
=
g_Dbs
.
do_aggreFunc
;
...
@@ -6404,8 +6492,8 @@ static void *specifiedTableQuery(void *sarg) {
...
@@ -6404,8 +6492,8 @@ static void *specifiedTableQuery(void *sarg) {
uint64_t
lastPrintTime
=
taosGetTimestampMs
();
uint64_t
lastPrintTime
=
taosGetTimestampMs
();
uint64_t
startTs
=
taosGetTimestampMs
();
uint64_t
startTs
=
taosGetTimestampMs
();
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
]
[
0
]
!=
0
)
{
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
]
!=
NULL
)
{
sprintf
(
pThreadInfo
->
fp
,
"%s-%d"
,
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
pThreadInfo
->
threadID
);
}
}
...
@@ -6505,8 +6593,8 @@ static void *superTableQuery(void *sarg) {
...
@@ -6505,8 +6593,8 @@ static void *superTableQuery(void *sarg) {
for
(
int
j
=
0
;
j
<
g_queryInfo
.
superQueryInfo
.
sqlCount
;
j
++
)
{
for
(
int
j
=
0
;
j
<
g_queryInfo
.
superQueryInfo
.
sqlCount
;
j
++
)
{
memset
(
sqlstr
,
0
,
sizeof
(
sqlstr
));
memset
(
sqlstr
,
0
,
sizeof
(
sqlstr
));
replaceChildTblName
(
g_queryInfo
.
superQueryInfo
.
sql
[
j
],
sqlstr
,
i
);
replaceChildTblName
(
g_queryInfo
.
superQueryInfo
.
sql
[
j
],
sqlstr
,
i
);
if
(
g_queryInfo
.
superQueryInfo
.
result
[
j
]
[
0
]
!=
0
)
{
if
(
g_queryInfo
.
superQueryInfo
.
result
[
j
]
!=
NULL
)
{
sprintf
(
pThreadInfo
->
f
p
,
"%s-%d"
,
sprintf
(
pThreadInfo
->
f
ilePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
j
],
g_queryInfo
.
superQueryInfo
.
result
[
j
],
pThreadInfo
->
threadID
);
pThreadInfo
->
threadID
);
}
}
...
@@ -6596,15 +6684,14 @@ static int queryTestProcess() {
...
@@ -6596,15 +6684,14 @@ static int queryTestProcess() {
for
(
uint64_t
i
=
0
;
i
<
nSqlCount
;
i
++
)
{
for
(
uint64_t
i
=
0
;
i
<
nSqlCount
;
i
++
)
{
for
(
int
j
=
0
;
j
<
nConcurrent
;
j
++
)
{
for
(
int
j
=
0
;
j
<
nConcurrent
;
j
++
)
{
uint64_t
seq
=
i
*
nConcurrent
+
j
;
uint64_t
seq
=
i
*
nConcurrent
+
j
;
threadInfo
*
t_i
nfo
=
infos
+
seq
;
threadInfo
*
pThreadI
nfo
=
infos
+
seq
;
t_i
nfo
->
threadID
=
seq
;
pThreadI
nfo
->
threadID
=
seq
;
t_i
nfo
->
querySeq
=
i
;
pThreadI
nfo
->
querySeq
=
i
;
if
(
0
==
strncasecmp
(
g_queryInfo
.
queryMode
,
"taosc"
,
5
))
{
if
(
0
==
strncasecmp
(
g_queryInfo
.
queryMode
,
"taosc"
,
5
))
{
char
sqlStr
[
MAX_TB_NAME_SIZE
*
2
];
char
sqlStr
[
MAX_TB_NAME_SIZE
*
2
];
sprintf
(
sqlStr
,
"use %s"
,
g_queryInfo
.
dbName
);
sprintf
(
sqlStr
,
"use %s"
,
g_queryInfo
.
dbName
);
verbosePrint
(
"%s() %d sqlStr: %s
\n
"
,
__func__
,
__LINE__
,
sqlStr
);
if
(
0
!=
queryDbExec
(
taos
,
sqlStr
,
NO_INSERT_TYPE
,
false
))
{
if
(
0
!=
queryDbExec
(
taos
,
sqlStr
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
taos
);
taos_close
(
taos
);
free
(
infos
);
free
(
infos
);
...
@@ -6615,10 +6702,10 @@ static int queryTestProcess() {
...
@@ -6615,10 +6702,10 @@ static int queryTestProcess() {
}
}
}
}
t_i
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pThreadI
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pids
+
seq
,
NULL
,
specifiedTableQuery
,
pthread_create
(
pids
+
seq
,
NULL
,
specifiedTableQuery
,
t_i
nfo
);
pThreadI
nfo
);
}
}
}
}
}
else
{
}
else
{
...
@@ -6658,15 +6745,15 @@ static int queryTestProcess() {
...
@@ -6658,15 +6745,15 @@ static int queryTestProcess() {
uint64_t
startFrom
=
0
;
uint64_t
startFrom
=
0
;
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
t_i
nfo
=
infosOfSub
+
i
;
threadInfo
*
pThreadI
nfo
=
infosOfSub
+
i
;
t_i
nfo
->
threadID
=
i
;
pThreadI
nfo
->
threadID
=
i
;
t_i
nfo
->
start_table_from
=
startFrom
;
pThreadI
nfo
->
start_table_from
=
startFrom
;
t_i
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
pThreadI
nfo
->
ntables
=
i
<
b
?
a
+
1
:
a
;
t_i
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
pThreadI
nfo
->
end_table_to
=
i
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
t_i
nfo
->
end_table_to
+
1
;
startFrom
=
pThreadI
nfo
->
end_table_to
+
1
;
t_i
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pThreadI
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pidsOfSub
+
i
,
NULL
,
superTableQuery
,
t_i
nfo
);
pthread_create
(
pidsOfSub
+
i
,
NULL
,
superTableQuery
,
pThreadI
nfo
);
}
}
g_queryInfo
.
superQueryInfo
.
threadCnt
=
threads
;
g_queryInfo
.
superQueryInfo
.
threadCnt
=
threads
;
...
@@ -6713,7 +6800,7 @@ static void stable_sub_callback(
...
@@ -6713,7 +6800,7 @@ static void stable_sub_callback(
}
}
if
(
param
)
if
(
param
)
appendResultToFile
(
res
,
((
threadInfo
*
)
param
)
->
fp
);
fetchResult
(
res
,
(
threadInfo
*
)
param
);
// tao_unscribe() will free result.
// tao_unscribe() will free result.
}
}
...
@@ -6726,7 +6813,7 @@ static void specified_sub_callback(
...
@@ -6726,7 +6813,7 @@ static void specified_sub_callback(
}
}
if
(
param
)
if
(
param
)
appendResultToFile
(
res
,
((
threadInfo
*
)
param
)
->
fp
);
fetchResult
(
res
,
(
threadInfo
*
)
param
);
// tao_unscribe() will free result.
// tao_unscribe() will free result.
}
}
...
@@ -6779,28 +6866,16 @@ static void *superSubscribe(void *sarg) {
...
@@ -6779,28 +6866,16 @@ static void *superSubscribe(void *sarg) {
exit
(
-
1
);
exit
(
-
1
);
}
}
if
(
g_queryInfo
.
superQueryInfo
.
sqlCount
*
pThreadInfo
->
ntables
>
MAX_QUERY_SQL_COUNT
)
{
errorPrint
(
"The number %"
PRId64
" of sql count(%"
PRIu64
") multiple the table number(%"
PRId64
") of the thread is more than max query sql count: %d
\n
"
,
g_queryInfo
.
superQueryInfo
.
sqlCount
*
pThreadInfo
->
ntables
,
g_queryInfo
.
superQueryInfo
.
sqlCount
,
pThreadInfo
->
ntables
,
MAX_QUERY_SQL_COUNT
);
exit
(
-
1
);
}
if
(
pThreadInfo
->
taos
==
NULL
)
{
if
(
pThreadInfo
->
taos
==
NULL
)
{
TAOS
*
taos
=
NULL
;
pThreadInfo
->
taos
=
taos_connect
(
g_queryInfo
.
host
,
taos
=
taos_connect
(
g_queryInfo
.
host
,
g_queryInfo
.
user
,
g_queryInfo
.
user
,
g_queryInfo
.
password
,
g_queryInfo
.
password
,
g_queryInfo
.
dbName
,
g_queryInfo
.
dbName
,
g_queryInfo
.
port
);
g_queryInfo
.
port
);
if
(
taos
==
NULL
)
{
if
(
pThreadInfo
->
taos
==
NULL
)
{
errorPrint
(
"[%d] Failed to connect to TDengine, reason:%s
\n
"
,
errorPrint
(
"[%d] Failed to connect to TDengine, reason:%s
\n
"
,
pThreadInfo
->
threadID
,
taos_errstr
(
NULL
));
pThreadInfo
->
threadID
,
taos_errstr
(
NULL
));
return
NULL
;
return
NULL
;
}
else
{
pThreadInfo
->
taos
=
taos
;
}
}
}
}
...
@@ -6830,7 +6905,7 @@ static void *superSubscribe(void *sarg) {
...
@@ -6830,7 +6905,7 @@ static void *superSubscribe(void *sarg) {
g_queryInfo
.
superQueryInfo
.
sql
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
superQueryInfo
.
sql
[
pThreadInfo
->
querySeq
],
subSqlstr
,
i
);
subSqlstr
,
i
);
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
f
p
,
"%s-%d"
,
sprintf
(
pThreadInfo
->
f
ilePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
pThreadInfo
->
threadID
);
}
}
...
@@ -6873,16 +6948,16 @@ static void *superSubscribe(void *sarg) {
...
@@ -6873,16 +6948,16 @@ static void *superSubscribe(void *sarg) {
if
(
res
)
{
if
(
res
)
{
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
f
p
,
"%s-%d"
,
sprintf
(
pThreadInfo
->
f
ilePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
pThreadInfo
->
threadID
);
appendResultToFile
(
res
,
pThreadInfo
->
fp
);
fetchResult
(
res
,
pThreadInfo
);
}
}
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
f
p
,
"%s-%d"
,
sprintf
(
pThreadInfo
->
f
ilePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
pThreadInfo
->
threadID
);
appendResultToFile
(
res
,
pThreadInfo
->
fp
);
fetchResult
(
res
,
pThreadInfo
);
}
}
consumed
[
tsubSeq
]
++
;
consumed
[
tsubSeq
]
++
;
...
@@ -6923,95 +6998,92 @@ static void *superSubscribe(void *sarg) {
...
@@ -6923,95 +6998,92 @@ static void *superSubscribe(void *sarg) {
static
void
*
specifiedSubscribe
(
void
*
sarg
)
{
static
void
*
specifiedSubscribe
(
void
*
sarg
)
{
threadInfo
*
pThreadInfo
=
(
threadInfo
*
)
sarg
;
threadInfo
*
pThreadInfo
=
(
threadInfo
*
)
sarg
;
TAOS_SUB
*
tsub
=
NULL
;
//
TAOS_SUB* tsub = NULL;
if
(
pThreadInfo
->
taos
==
NULL
)
{
if
(
pThreadInfo
->
taos
==
NULL
)
{
TAOS
*
taos
=
NULL
;
pThreadInfo
->
taos
=
taos_connect
(
g_queryInfo
.
host
,
taos
=
taos_connect
(
g_queryInfo
.
host
,
g_queryInfo
.
user
,
g_queryInfo
.
user
,
g_queryInfo
.
password
,
g_queryInfo
.
password
,
g_queryInfo
.
dbName
,
g_queryInfo
.
dbName
,
g_queryInfo
.
port
);
g_queryInfo
.
port
);
if
(
taos
==
NULL
)
{
if
(
pThreadInfo
->
taos
==
NULL
)
{
errorPrint
(
"[%d] Failed to connect to TDengine, reason:%s
\n
"
,
errorPrint
(
"[%d] Failed to connect to TDengine, reason:%s
\n
"
,
pThreadInfo
->
threadID
,
taos_errstr
(
NULL
));
pThreadInfo
->
threadID
,
taos_errstr
(
NULL
));
return
NULL
;
return
NULL
;
}
else
{
pThreadInfo
->
taos
=
taos
;
}
}
}
}
char
sqlStr
[
MAX_TB_NAME_SIZE
*
2
];
char
sqlStr
[
MAX_TB_NAME_SIZE
*
2
];
sprintf
(
sqlStr
,
"use %s"
,
g_queryInfo
.
dbName
);
sprintf
(
sqlStr
,
"use %s"
,
g_queryInfo
.
dbName
);
debugPrint
(
"%s() %d sqlStr: %s
\n
"
,
__func__
,
__LINE__
,
sqlStr
);
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
sqlStr
,
NO_INSERT_TYPE
,
false
))
{
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
sqlStr
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
pThreadInfo
->
taos
);
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
return
NULL
;
}
}
char
topic
[
32
]
=
{
0
};
sprintf
(
g_queryInfo
.
specifiedQueryInfo
.
topic
[
pThreadInfo
->
threadID
],
sprintf
(
topic
,
"taosdemo-subscribe-%"
PRIu64
""
,
pThreadInfo
->
querySeq
);
"taosdemo-subscribe-%"
PRIu64
"-%d"
,
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
pThreadInfo
->
querySeq
,
sprintf
(
pThreadInfo
->
fp
,
"%s-%d"
,
pThreadInfo
->
threadID
);
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
]
!=
NULL
)
{
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
pThreadInfo
->
threadID
);
}
}
tsub
=
subscribeImpl
(
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]
=
subscribeImpl
(
SPECIFIED_CLASS
,
pThreadInfo
,
SPECIFIED_CLASS
,
pThreadInfo
,
g_queryInfo
.
specifiedQueryInfo
.
sql
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
specifiedQueryInfo
.
sql
[
pThreadInfo
->
querySeq
],
topic
,
g_queryInfo
.
specifiedQueryInfo
.
topic
[
pThreadInfo
->
threadID
]
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeInterval
);
g_queryInfo
.
specifiedQueryInfo
.
subscribeInterval
);
if
(
NULL
==
tsub
)
{
if
(
NULL
==
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]
)
{
taos_close
(
pThreadInfo
->
taos
);
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
return
NULL
;
}
}
// start loop to consume result
// start loop to consume result
TAOS_RES
*
res
=
NULL
;
int
consumed
;
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
=
0
;
while
(
1
)
{
while
(
1
)
{
if
(
ASYNC_MODE
==
g_queryInfo
.
specifiedQueryInfo
.
asyncMode
)
{
if
(
ASYNC_MODE
==
g_queryInfo
.
specifiedQueryInfo
.
asyncMode
)
{
continue
;
continue
;
}
}
res
=
taos_consume
(
tsub
);
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
]
=
taos_consume
(
if
(
res
)
{
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]);
if
(
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
])
{
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
if
(
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
f
p
,
"%s-%d"
,
sprintf
(
pThreadInfo
->
f
ilePath
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
pThreadInfo
->
threadID
);
appendResultToFile
(
res
,
pThreadInfo
->
fp
);
fetchResult
(
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
],
pThreadInfo
);
}
}
consumed
++
;
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
++
;
if
((
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
)
if
((
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
)
&&
(
consumed
>=
&&
(
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
>=
g_queryInfo
.
specifiedQueryInfo
.
resubAfterConsume
[
pThreadInfo
->
querySeq
]))
{
g_queryInfo
.
specifiedQueryInfo
.
resubAfterConsume
[
pThreadInfo
->
querySeq
]))
{
printf
(
"keepProgress:%d, resub specified query: %"
PRIu64
"
\n
"
,
printf
(
"keepProgress:%d, resub specified query: %"
PRIu64
"
\n
"
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
,
pThreadInfo
->
querySeq
);
pThreadInfo
->
querySeq
);
consumed
=
0
;
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
=
0
;
taos_unsubscribe
(
tsub
,
taos_unsubscribe
(
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
);
g_queryInfo
.
specifiedQueryInfo
.
subscribeKeepProgress
);
tsub
=
subscribeImpl
(
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]
=
subscribeImpl
(
SPECIFIED_CLASS
,
SPECIFIED_CLASS
,
pThreadInfo
,
pThreadInfo
,
g_queryInfo
.
specifiedQueryInfo
.
sql
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
specifiedQueryInfo
.
sql
[
pThreadInfo
->
querySeq
],
topic
,
g_queryInfo
.
specifiedQueryInfo
.
topic
[
pThreadInfo
->
threadID
]
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeRestart
,
g_queryInfo
.
specifiedQueryInfo
.
subscribeInterval
);
g_queryInfo
.
specifiedQueryInfo
.
subscribeInterval
);
if
(
NULL
==
tsub
)
{
if
(
NULL
==
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
threadID
]
)
{
taos_close
(
pThreadInfo
->
taos
);
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
return
NULL
;
}
}
}
}
}
}
}
}
taos_free_result
(
res
);
taos_free_result
(
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
]
);
taos_unsubscribe
(
tsub
,
0
);
taos_unsubscribe
(
g_queryInfo
.
specifiedQueryInfo
.
tsub
[
pThreadInfo
->
querySeq
]
,
0
);
taos_close
(
pThreadInfo
->
taos
);
taos_close
(
pThreadInfo
->
taos
);
return
NULL
;
return
NULL
;
...
@@ -7081,18 +7153,18 @@ static int subscribeTestProcess() {
...
@@ -7081,18 +7153,18 @@ static int subscribeTestProcess() {
for
(
int
i
=
0
;
i
<
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
;
i
++
)
{
for
(
int
i
=
0
;
i
<
g_queryInfo
.
specifiedQueryInfo
.
sqlCount
;
i
++
)
{
for
(
int
j
=
0
;
j
<
g_queryInfo
.
specifiedQueryInfo
.
concurrent
;
j
++
)
{
for
(
int
j
=
0
;
j
<
g_queryInfo
.
specifiedQueryInfo
.
concurrent
;
j
++
)
{
uint64_t
seq
=
i
*
g_queryInfo
.
specifiedQueryInfo
.
concurrent
+
j
;
uint64_t
seq
=
i
*
g_queryInfo
.
specifiedQueryInfo
.
concurrent
+
j
;
threadInfo
*
t_i
nfo
=
infos
+
seq
;
threadInfo
*
pThreadI
nfo
=
infos
+
seq
;
t_i
nfo
->
threadID
=
seq
;
pThreadI
nfo
->
threadID
=
seq
;
t_i
nfo
->
querySeq
=
i
;
pThreadI
nfo
->
querySeq
=
i
;
t_i
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pThreadI
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pids
+
seq
,
NULL
,
specifiedSubscribe
,
t_i
nfo
);
pthread_create
(
pids
+
seq
,
NULL
,
specifiedSubscribe
,
pThreadI
nfo
);
}
}
}
}
}
}
//==== create threads for super table query
//==== create threads for super table query
if
(
g_queryInfo
.
superQueryInfo
.
sqlCount
<=
0
)
{
if
(
g_queryInfo
.
superQueryInfo
.
sqlCount
<=
0
)
{
printf
(
"%s() LN%d, super table query sqlCount %"
PRIu64
".
\n
"
,
debugPrint
(
"%s() LN%d, super table query sqlCount %"
PRIu64
".
\n
"
,
__func__
,
__LINE__
,
__func__
,
__LINE__
,
g_queryInfo
.
superQueryInfo
.
sqlCount
);
g_queryInfo
.
superQueryInfo
.
sqlCount
);
}
else
{
}
else
{
...
@@ -7131,17 +7203,17 @@ static int subscribeTestProcess() {
...
@@ -7131,17 +7203,17 @@ static int subscribeTestProcess() {
uint64_t
startFrom
=
0
;
uint64_t
startFrom
=
0
;
for
(
int
j
=
0
;
j
<
threads
;
j
++
)
{
for
(
int
j
=
0
;
j
<
threads
;
j
++
)
{
uint64_t
seq
=
i
*
threads
+
j
;
uint64_t
seq
=
i
*
threads
+
j
;
threadInfo
*
t_i
nfo
=
infosOfStable
+
seq
;
threadInfo
*
pThreadI
nfo
=
infosOfStable
+
seq
;
t_i
nfo
->
threadID
=
seq
;
pThreadI
nfo
->
threadID
=
seq
;
t_i
nfo
->
querySeq
=
i
;
pThreadI
nfo
->
querySeq
=
i
;
t_i
nfo
->
start_table_from
=
startFrom
;
pThreadI
nfo
->
start_table_from
=
startFrom
;
t_i
nfo
->
ntables
=
j
<
b
?
a
+
1
:
a
;
pThreadI
nfo
->
ntables
=
j
<
b
?
a
+
1
:
a
;
t_i
nfo
->
end_table_to
=
j
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
pThreadI
nfo
->
end_table_to
=
j
<
b
?
startFrom
+
a
:
startFrom
+
a
-
1
;
startFrom
=
t_i
nfo
->
end_table_to
+
1
;
startFrom
=
pThreadI
nfo
->
end_table_to
+
1
;
t_i
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pThreadI
nfo
->
taos
=
NULL
;
// TODO: workaround to use separate taos connection;
pthread_create
(
pidsOfStable
+
seq
,
pthread_create
(
pidsOfStable
+
seq
,
NULL
,
superSubscribe
,
t_i
nfo
);
NULL
,
superSubscribe
,
pThreadI
nfo
);
}
}
}
}
...
@@ -7257,7 +7329,7 @@ static void setParaFromArg(){
...
@@ -7257,7 +7329,7 @@ static void setParaFromArg(){
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblPrefix
,
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblPrefix
,
g_args
.
tb_prefix
,
MAX_TB_NAME_SIZE
);
g_args
.
tb_prefix
,
MAX_TB_NAME_SIZE
);
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
dataSource
,
"rand"
,
MAX_TB_NAME_SIZE
);
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
dataSource
,
"rand"
,
MAX_TB_NAME_SIZE
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
i
nsertMod
e
=
g_args
.
iface
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
i
fac
e
=
g_args
.
iface
;
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
startTimestamp
,
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
startTimestamp
,
"2017-07-14 10:40:00.000"
,
MAX_TB_NAME_SIZE
);
"2017-07-14 10:40:00.000"
,
MAX_TB_NAME_SIZE
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
timeStampStep
=
DEFAULT_TIMESTAMP_STEP
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
timeStampStep
=
DEFAULT_TIMESTAMP_STEP
;
...
@@ -7370,7 +7442,6 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
...
@@ -7370,7 +7442,6 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
}
}
memcpy
(
cmd
+
cmd_len
,
line
,
read_len
);
memcpy
(
cmd
+
cmd_len
,
line
,
read_len
);
verbosePrint
(
"%s() LN%d cmd: %s
\n
"
,
__func__
,
__LINE__
,
cmd
);
if
(
0
!=
queryDbExec
(
taos
,
cmd
,
NO_INSERT_TYPE
,
false
))
{
if
(
0
!=
queryDbExec
(
taos
,
cmd
,
NO_INSERT_TYPE
,
false
))
{
errorPrint
(
"%s() LN%d, queryDbExec %s failed!
\n
"
,
errorPrint
(
"%s() LN%d, queryDbExec %s failed!
\n
"
,
__func__
,
__LINE__
,
cmd
);
__func__
,
__LINE__
,
cmd
);
...
@@ -7420,47 +7491,47 @@ static void queryResult() {
...
@@ -7420,47 +7491,47 @@ static void queryResult() {
// query data
// query data
pthread_t
read_id
;
pthread_t
read_id
;
threadInfo
*
r
Info
=
malloc
(
sizeof
(
threadInfo
));
threadInfo
*
pThread
Info
=
malloc
(
sizeof
(
threadInfo
));
assert
(
r
Info
);
assert
(
pThread
Info
);
r
Info
->
start_time
=
1500000000000
;
// 2017-07-14 10:40:00.000
pThread
Info
->
start_time
=
1500000000000
;
// 2017-07-14 10:40:00.000
r
Info
->
start_table_from
=
0
;
pThread
Info
->
start_table_from
=
0
;
//
r
Info->do_aggreFunc = g_Dbs.do_aggreFunc;
//
pThread
Info->do_aggreFunc = g_Dbs.do_aggreFunc;
if
(
g_args
.
use_metric
)
{
if
(
g_args
.
use_metric
)
{
r
Info
->
ntables
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
;
pThread
Info
->
ntables
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
;
r
Info
->
end_table_to
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
-
1
;
pThread
Info
->
end_table_to
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
-
1
;
r
Info
->
superTblInfo
=
&
g_Dbs
.
db
[
0
].
superTbls
[
0
];
pThread
Info
->
superTblInfo
=
&
g_Dbs
.
db
[
0
].
superTbls
[
0
];
tstrncpy
(
r
Info
->
tb_prefix
,
tstrncpy
(
pThread
Info
->
tb_prefix
,
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblPrefix
,
MAX_TB_NAME_SIZE
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblPrefix
,
MAX_TB_NAME_SIZE
);
}
else
{
}
else
{
r
Info
->
ntables
=
g_args
.
num_of_tables
;
pThread
Info
->
ntables
=
g_args
.
num_of_tables
;
r
Info
->
end_table_to
=
g_args
.
num_of_tables
-
1
;
pThread
Info
->
end_table_to
=
g_args
.
num_of_tables
-
1
;
tstrncpy
(
r
Info
->
tb_prefix
,
g_args
.
tb_prefix
,
MAX_TB_NAME_SIZE
);
tstrncpy
(
pThread
Info
->
tb_prefix
,
g_args
.
tb_prefix
,
MAX_TB_NAME_SIZE
);
}
}
r
Info
->
taos
=
taos_connect
(
pThread
Info
->
taos
=
taos_connect
(
g_Dbs
.
host
,
g_Dbs
.
host
,
g_Dbs
.
user
,
g_Dbs
.
user
,
g_Dbs
.
password
,
g_Dbs
.
password
,
g_Dbs
.
db
[
0
].
dbName
,
g_Dbs
.
db
[
0
].
dbName
,
g_Dbs
.
port
);
g_Dbs
.
port
);
if
(
r
Info
->
taos
==
NULL
)
{
if
(
pThread
Info
->
taos
==
NULL
)
{
errorPrint
(
"Failed to connect to TDengine, reason:%s
\n
"
,
errorPrint
(
"Failed to connect to TDengine, reason:%s
\n
"
,
taos_errstr
(
NULL
));
taos_errstr
(
NULL
));
free
(
r
Info
);
free
(
pThread
Info
);
exit
(
-
1
);
exit
(
-
1
);
}
}
tstrncpy
(
rInfo
->
fp
,
g_Dbs
.
resultFile
,
MAX_FILE_NAME_LEN
);
tstrncpy
(
pThreadInfo
->
filePath
,
g_Dbs
.
resultFile
,
MAX_FILE_NAME_LEN
);
if
(
!
g_Dbs
.
use_metric
)
{
if
(
!
g_Dbs
.
use_metric
)
{
pthread_create
(
&
read_id
,
NULL
,
readTable
,
r
Info
);
pthread_create
(
&
read_id
,
NULL
,
readTable
,
pThread
Info
);
}
else
{
}
else
{
pthread_create
(
&
read_id
,
NULL
,
readMetric
,
r
Info
);
pthread_create
(
&
read_id
,
NULL
,
readMetric
,
pThread
Info
);
}
}
pthread_join
(
read_id
,
NULL
);
pthread_join
(
read_id
,
NULL
);
taos_close
(
r
Info
->
taos
);
taos_close
(
pThread
Info
->
taos
);
free
(
r
Info
);
free
(
pThread
Info
);
}
}
static
void
testCmdLine
()
{
static
void
testCmdLine
()
{
...
...
src/os/src/detail/osMemory.c
浏览文件 @
b9d4476b
...
@@ -62,7 +62,7 @@ static void* taosRandomRealloc(void* ptr, size_t size, const char* file, uint32_
...
@@ -62,7 +62,7 @@ static void* taosRandomRealloc(void* ptr, size_t size, const char* file, uint32_
static
char
*
taosRandomStrdup
(
const
char
*
str
,
const
char
*
file
,
uint32_t
line
)
{
static
char
*
taosRandomStrdup
(
const
char
*
str
,
const
char
*
file
,
uint32_t
line
)
{
size_t
len
=
strlen
(
str
);
size_t
len
=
strlen
(
str
);
return
taosRandomAllocFail
(
len
+
1
,
file
,
line
)
?
NULL
:
t
aosStrdupIm
p
(
str
);
return
taosRandomAllocFail
(
len
+
1
,
file
,
line
)
?
NULL
:
t
strdu
p
(
str
);
}
}
static
char
*
taosRandomStrndup
(
const
char
*
str
,
size_t
size
,
const
char
*
file
,
uint32_t
line
)
{
static
char
*
taosRandomStrndup
(
const
char
*
str
,
size_t
size
,
const
char
*
file
,
uint32_t
line
)
{
...
@@ -70,11 +70,11 @@ static char* taosRandomStrndup(const char* str, size_t size, const char* file, u
...
@@ -70,11 +70,11 @@ static char* taosRandomStrndup(const char* str, size_t size, const char* file, u
if
(
len
>
size
)
{
if
(
len
>
size
)
{
len
=
size
;
len
=
size
;
}
}
return
taosRandomAllocFail
(
len
+
1
,
file
,
line
)
?
NULL
:
t
aosStrndupIm
p
(
str
,
len
);
return
taosRandomAllocFail
(
len
+
1
,
file
,
line
)
?
NULL
:
t
strndu
p
(
str
,
len
);
}
}
static
ssize_t
taosRandomGetline
(
char
**
lineptr
,
size_t
*
n
,
FILE
*
stream
,
const
char
*
file
,
uint32_t
line
)
{
static
ssize_t
taosRandomGetline
(
char
**
lineptr
,
size_t
*
n
,
FILE
*
stream
,
const
char
*
file
,
uint32_t
line
)
{
return
taosRandomAllocFail
(
*
n
,
file
,
line
)
?
-
1
:
t
aosGetlineImp
(
lineptr
,
n
,
stream
);
return
taosRandomAllocFail
(
*
n
,
file
,
line
)
?
-
1
:
t
getline
(
lineptr
,
n
,
stream
);
}
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
...
@@ -242,7 +242,7 @@ static char* taosStrndupDetectLeak(const char* str, size_t size, const char* fil
...
@@ -242,7 +242,7 @@ static char* taosStrndupDetectLeak(const char* str, size_t size, const char* fil
static
ssize_t
taosGetlineDetectLeak
(
char
**
lineptr
,
size_t
*
n
,
FILE
*
stream
,
const
char
*
file
,
uint32_t
line
)
{
static
ssize_t
taosGetlineDetectLeak
(
char
**
lineptr
,
size_t
*
n
,
FILE
*
stream
,
const
char
*
file
,
uint32_t
line
)
{
char
*
buf
=
NULL
;
char
*
buf
=
NULL
;
size_t
bufSize
=
0
;
size_t
bufSize
=
0
;
ssize_t
size
=
t
aosGetlineImp
(
&
buf
,
&
bufSize
,
stream
);
ssize_t
size
=
t
getline
(
&
buf
,
&
bufSize
,
stream
);
if
(
size
!=
-
1
)
{
if
(
size
!=
-
1
)
{
if
(
*
n
<
size
+
1
)
{
if
(
*
n
<
size
+
1
)
{
void
*
p
=
taosReallocDetectLeak
(
*
lineptr
,
size
+
1
,
file
,
line
);
void
*
p
=
taosReallocDetectLeak
(
*
lineptr
,
size
+
1
,
file
,
line
);
...
@@ -372,7 +372,7 @@ void taosFreeMem(void* ptr, const char* file, uint32_t line) {
...
@@ -372,7 +372,7 @@ void taosFreeMem(void* ptr, const char* file, uint32_t line) {
char
*
taosStrdupMem
(
const
char
*
str
,
const
char
*
file
,
uint32_t
line
)
{
char
*
taosStrdupMem
(
const
char
*
str
,
const
char
*
file
,
uint32_t
line
)
{
switch
(
allocMode
)
{
switch
(
allocMode
)
{
case
TAOS_ALLOC_MODE_DEFAULT
:
case
TAOS_ALLOC_MODE_DEFAULT
:
return
t
aosStrdupIm
p
(
str
);
return
t
strdu
p
(
str
);
case
TAOS_ALLOC_MODE_RANDOM_FAIL
:
case
TAOS_ALLOC_MODE_RANDOM_FAIL
:
return
taosRandomStrdup
(
str
,
file
,
line
);
return
taosRandomStrdup
(
str
,
file
,
line
);
...
@@ -380,13 +380,13 @@ char* taosStrdupMem(const char* str, const char* file, uint32_t line) {
...
@@ -380,13 +380,13 @@ char* taosStrdupMem(const char* str, const char* file, uint32_t line) {
case
TAOS_ALLOC_MODE_DETECT_LEAK
:
case
TAOS_ALLOC_MODE_DETECT_LEAK
:
return
taosStrdupDetectLeak
(
str
,
file
,
line
);
return
taosStrdupDetectLeak
(
str
,
file
,
line
);
}
}
return
t
aosStrdupIm
p
(
str
);
return
t
strdu
p
(
str
);
}
}
char
*
taosStrndupMem
(
const
char
*
str
,
size_t
size
,
const
char
*
file
,
uint32_t
line
)
{
char
*
taosStrndupMem
(
const
char
*
str
,
size_t
size
,
const
char
*
file
,
uint32_t
line
)
{
switch
(
allocMode
)
{
switch
(
allocMode
)
{
case
TAOS_ALLOC_MODE_DEFAULT
:
case
TAOS_ALLOC_MODE_DEFAULT
:
return
t
aosStrndupIm
p
(
str
,
size
);
return
t
strndu
p
(
str
,
size
);
case
TAOS_ALLOC_MODE_RANDOM_FAIL
:
case
TAOS_ALLOC_MODE_RANDOM_FAIL
:
return
taosRandomStrndup
(
str
,
size
,
file
,
line
);
return
taosRandomStrndup
(
str
,
size
,
file
,
line
);
...
@@ -394,13 +394,13 @@ char* taosStrndupMem(const char* str, size_t size, const char* file, uint32_t li
...
@@ -394,13 +394,13 @@ char* taosStrndupMem(const char* str, size_t size, const char* file, uint32_t li
case
TAOS_ALLOC_MODE_DETECT_LEAK
:
case
TAOS_ALLOC_MODE_DETECT_LEAK
:
return
taosStrndupDetectLeak
(
str
,
size
,
file
,
line
);
return
taosStrndupDetectLeak
(
str
,
size
,
file
,
line
);
}
}
return
t
aosStrndupIm
p
(
str
,
size
);
return
t
strndu
p
(
str
,
size
);
}
}
ssize_t
taosGetlineMem
(
char
**
lineptr
,
size_t
*
n
,
FILE
*
stream
,
const
char
*
file
,
uint32_t
line
)
{
ssize_t
taosGetlineMem
(
char
**
lineptr
,
size_t
*
n
,
FILE
*
stream
,
const
char
*
file
,
uint32_t
line
)
{
switch
(
allocMode
)
{
switch
(
allocMode
)
{
case
TAOS_ALLOC_MODE_DEFAULT
:
case
TAOS_ALLOC_MODE_DEFAULT
:
return
t
aosGetlineImp
(
lineptr
,
n
,
stream
);
return
t
getline
(
lineptr
,
n
,
stream
);
case
TAOS_ALLOC_MODE_RANDOM_FAIL
:
case
TAOS_ALLOC_MODE_RANDOM_FAIL
:
return
taosRandomGetline
(
lineptr
,
n
,
stream
,
file
,
line
);
return
taosRandomGetline
(
lineptr
,
n
,
stream
,
file
,
line
);
...
@@ -408,7 +408,7 @@ ssize_t taosGetlineMem(char **lineptr, size_t *n, FILE *stream, const char* file
...
@@ -408,7 +408,7 @@ ssize_t taosGetlineMem(char **lineptr, size_t *n, FILE *stream, const char* file
case
TAOS_ALLOC_MODE_DETECT_LEAK
:
case
TAOS_ALLOC_MODE_DETECT_LEAK
:
return
taosGetlineDetectLeak
(
lineptr
,
n
,
stream
,
file
,
line
);
return
taosGetlineDetectLeak
(
lineptr
,
n
,
stream
,
file
,
line
);
}
}
return
t
aosGetlineImp
(
lineptr
,
n
,
stream
);
return
t
getline
(
lineptr
,
n
,
stream
);
}
}
static
void
taosCloseAllocLog
()
{
static
void
taosCloseAllocLog
()
{
...
...
src/os/src/windows/wSemphone.c
浏览文件 @
b9d4476b
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
*/
*/
#define _DEFAULT_SOURCE
#define _DEFAULT_SOURCE
#include "os.h"
#include "os.h"
#include "taosdef.h"
#include "taosdef.h"
#include "tglobal.h"
#include "tglobal.h"
...
@@ -24,7 +25,7 @@
...
@@ -24,7 +25,7 @@
bool
taosCheckPthreadValid
(
pthread_t
thread
)
{
return
thread
.
p
!=
NULL
;
}
bool
taosCheckPthreadValid
(
pthread_t
thread
)
{
return
thread
.
p
!=
NULL
;
}
void
taosResetPthread
(
pthread_t
*
thread
)
{
thread
->
p
=
0
;
}
void
taosResetPthread
(
pthread_t
*
thread
)
{
thread
->
p
=
0
;
}
int64_t
taosGetPthreadId
(
pthread_t
thread
)
{
int64_t
taosGetPthreadId
(
pthread_t
thread
)
{
#ifdef PTW32_VERSION
#ifdef PTW32_VERSION
...
@@ -34,27 +35,24 @@ int64_t taosGetPthreadId(pthread_t thread) {
...
@@ -34,27 +35,24 @@ int64_t taosGetPthreadId(pthread_t thread) {
#endif
#endif
}
}
int64_t
taosGetSelfPthreadId
()
{
int64_t
taosGetSelfPthreadId
()
{
return
GetCurrentThreadId
();
}
return
GetCurrentThreadId
();
}
bool
taosComparePthread
(
pthread_t
first
,
pthread_t
second
)
{
bool
taosComparePthread
(
pthread_t
first
,
pthread_t
second
)
{
return
first
.
p
==
second
.
p
;
}
return
first
.
p
==
second
.
p
;
}
int32_t
taosGetPId
()
{
int32_t
taosGetPId
()
{
return
GetCurrentProcessId
();
}
return
GetCurrentProcessId
();
}
int32_t
taosGetCurrentAPPName
(
char
*
name
,
int32_t
*
len
)
{
int32_t
taosGetCurrentAPPName
(
char
*
name
,
int32_t
*
len
)
{
char
filepath
[
1024
]
=
{
0
};
char
filepath
[
1024
]
=
{
0
};
GetModuleFileName
(
NULL
,
filepath
,
MAX_PATH
);
GetModuleFileName
(
NULL
,
filepath
,
MAX_PATH
);
*
strrchr
(
filepath
,
'.'
)
=
'\0'
;
char
*
sub
=
strrchr
(
filepath
,
'.'
);
if
(
sub
!=
NULL
)
{
*
sub
=
'\0'
;
}
strcpy
(
name
,
filepath
);
strcpy
(
name
,
filepath
);
if
(
len
!=
NULL
)
{
if
(
len
!=
NULL
)
{
*
len
=
(
int32_t
)
strlen
(
filepath
);
*
len
=
(
int32_t
)
strlen
(
filepath
);
}
}
return
0
;
return
0
;
...
...
tests/Jenkinsfile
浏览文件 @
b9d4476b
...
@@ -37,7 +37,7 @@ pipeline {
...
@@ -37,7 +37,7 @@ pipeline {
stage
(
'Parallel test stage'
)
{
stage
(
'Parallel test stage'
)
{
parallel
{
parallel
{
stage
(
'pytest'
)
{
stage
(
'pytest'
)
{
agent
{
label
'
184
'
}
agent
{
label
'
slad1
'
}
steps
{
steps
{
pre_test
()
pre_test
()
sh
'''
sh
'''
...
@@ -62,7 +62,7 @@ pipeline {
...
@@ -62,7 +62,7 @@ pipeline {
}
}
stage
(
'test_crash_gen'
)
{
stage
(
'test_crash_gen'
)
{
agent
{
label
"
185
"
}
agent
{
label
"
slad2
"
}
steps
{
steps
{
pre_test
()
pre_test
()
sh
'''
sh
'''
...
@@ -149,7 +149,7 @@ pipeline {
...
@@ -149,7 +149,7 @@ pipeline {
}
}
stage
(
'test_valgrind'
)
{
stage
(
'test_valgrind'
)
{
agent
{
label
"
186
"
}
agent
{
label
"
slad3
"
}
steps
{
steps
{
pre_test
()
pre_test
()
...
...
tests/script/unique/cluster/cache.sim
浏览文件 @
b9d4476b
...
@@ -41,7 +41,7 @@ sql create dnode $hostname2
...
@@ -41,7 +41,7 @@ sql create dnode $hostname2
sleep 10000
sleep 10000
sql show log.tables;
sql show log.tables;
if $rows
!= 5
then
if $rows
> 6
then
return -1
return -1
endi
endi
...
...
tests/script/unique/dnode/monitor.sim
浏览文件 @
b9d4476b
...
@@ -56,7 +56,7 @@ print $data30
...
@@ -56,7 +56,7 @@ print $data30
print $data40
print $data40
print $data50
print $data50
if $rows
!= 5
then
if $rows
> 6
then
return -1
return -1
endi
endi
...
...
tests/script/unique/dnode/monitor_bug.sim
浏览文件 @
b9d4476b
...
@@ -19,7 +19,7 @@ sleep 3000
...
@@ -19,7 +19,7 @@ sleep 3000
sql show dnodes
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode1 openVnodes $data2_1
if $data2_1
!= 1
then
if $data2_1
> 2
then
return -1
return -1
endi
endi
...
@@ -41,7 +41,7 @@ print dnode2 openVnodes $data2_2
...
@@ -41,7 +41,7 @@ print dnode2 openVnodes $data2_2
if $data2_1 != 0 then
if $data2_1 != 0 then
goto show2
goto show2
endi
endi
if $data2_2
!= 1
then
if $data2_2
> 2
then
goto show2
goto show2
endi
endi
...
@@ -55,7 +55,7 @@ print $data30
...
@@ -55,7 +55,7 @@ print $data30
print $data40
print $data40
print $data50
print $data50
if $rows
!= 4
then
if $rows
> 5
then
return -1
return -1
endi
endi
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录