“12ad3514efd764f67681aae12bdd4c4fc121c954”上不存在“docs-cn/07-develop/03-insert-data/_rust_stmt.mdx”
提交 eecd5c94 编写于 作者: H Haojun Liao

[td-225] merge develop

# Use the latest 2.1 version of CircleCI pipeline process engine. See: https://circleci.com/docs/2.0/configuration-reference
version: 2.1
# Use a package of configuration called an orb.
orbs:
# Declare a dependency on the welcome-orb
welcome: circleci/welcome-orb@0.4.1
# Orchestrate or schedule a set of jobs
workflows:
# Name the workflow "welcome"
welcome:
# Run the welcome/run job in its own container
jobs:
- welcome/run
[submodule "src/connector/go"]
path = src/connector/go
url = https://github.com/taosdata/driver-go
url = git@github.com:taosdata/driver-go.git
[submodule "src/connector/grafanaplugin"]
path = src/connector/grafanaplugin
url = https://github.com/taosdata/grafanaplugin
url = git@github.com:taosdata/grafanaplugin.git
[submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension
url = https://github.com/huskar-t/hivemq-tdengine-extension.git
url = git@github.com:taosdata/hivemq-tdengine-extension.git
[submodule "tests/examples/rust"]
path = tests/examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
......@@ -116,7 +116,7 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
在X86-64、X86、arm64 和 arm32 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
在X86-64、X86、arm64、arm32 和 mips64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
aarch64:
......@@ -130,6 +130,12 @@ aarch32:
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
mips64:
```bash
cmake .. -DCPUTYPE=mips64 && cmake --build .
```
### Windows 系统
如果你使用的是 Visual Studio 2013 版本:
......
......@@ -110,7 +110,7 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 and arm32 platform.
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform.
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct:
aarch64:
......@@ -123,6 +123,11 @@ aarch32:
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
mips64:
```bash
cmake .. -DCPUTYPE=mips64 && cmake --build .
```
### On Windows platform
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
......
......@@ -102,6 +102,12 @@ IF ("${CPUTYPE}" STREQUAL "")
SET(TD_LINUX TRUE)
SET(TD_LINUX_64 FALSE)
SET(TD_ARM_64 TRUE)
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "mips64")
SET(CPUTYPE "mips64")
MESSAGE(STATUS "Set CPUTYPE to mips64")
SET(TD_LINUX TRUE)
SET(TD_LINUX_64 FALSE)
SET(TD_MIPS_64 TRUE)
ENDIF ()
ELSE ()
......
......@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.1.0.0")
SET(TD_VER_NUMBER "2.1.1.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
# TDengine 2.0 错误码以及对应的十进制码
| 状态码 | 模 | 错误码(十六进制) | 错误描述 | 错误码(十进制) |
|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646|
|TSDB_CODE_RPC_AUTH_FAILURE| 0| 0x0003 | "Authentication failure"| -2147483645|
|TSDB_CODE_RPC_REDIRECT |0 | 0x0004| "Redirect"| -2147483644|
|TSDB_CODE_RPC_NOT_READY| 0 | 0x0005 | "System not ready"| -2147483643|
|TSDB_CODE_RPC_ALREADY_PROCESSED| 0 | 0x0006 |"Message already processed"| -2147483642|
|TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED| 0 |0x0007| "Last session not finished"| -2147483641|
|TSDB_CODE_RPC_MISMATCHED_LINK_ID| 0| 0x0008 | "Mismatched meter id"| -2147483640|
|TSDB_CODE_RPC_TOO_SLOW| 0 | 0x0009 | "Processing of request timed out"| -2147483639|
|TSDB_CODE_RPC_MAX_SESSIONS| 0 | 0x000A | "Number of sessions reached limit"| -2147483638|
|TSDB_CODE_RPC_NETWORK_UNAVAIL| 0 |0x000B | "Unable to establish connection" |-2147483637|
|TSDB_CODE_RPC_APP_ERROR| 0| 0x000C | "Unexpected generic error in RPC"| -2147483636|
|TSDB_CODE_RPC_UNEXPECTED_RESPONSE| 0 |0x000D | "Unexpected response"| -2147483635|
|TSDB_CODE_RPC_INVALID_VALUE| 0 | 0x000E | "Invalid value"| -2147483634|
|TSDB_CODE_RPC_INVALID_TRAN_ID| 0 | 0x000F | "Invalid transaction id"| -2147483633|
|TSDB_CODE_RPC_INVALID_SESSION_ID| 0| 0x0010 | "Invalid session id"| -2147483632|
|TSDB_CODE_RPC_INVALID_MSG_TYPE| 0| 0x0011| "Invalid message type"| -2147483631|
|TSDB_CODE_RPC_INVALID_RESPONSE_TYPE| 0 | 0x0012| "Invalid response type"| -2147483630|
|TSDB_CODE_RPC_INVALID_TIME_STAMP| 0| 0x0013| "Invalid timestamp"| -2147483629|
|TSDB_CODE_COM_OPS_NOT_SUPPORT| 0 | 0x0100| "Operation not supported"| -2147483392|
|TSDB_CODE_COM_MEMORY_CORRUPTED |0| 0x0101 | "Memory corrupted"| -2147483391|
|TSDB_CODE_COM_OUT_OF_MEMORY| 0| 0x0102| "Out of memory"| -2147483390|
|TSDB_CODE_COM_INVALID_CFG_MSG| 0 | 0x0103| "Invalid config message"| -2147483389|
|TSDB_CODE_COM_FILE_CORRUPTED| 0| 0x0104| "Data file corrupted" |-2147483388|
|TSDB_CODE_TSC_INVALID_SQL| 0| 0x0200 | "Invalid SQL statement"| -2147483136|
|TSDB_CODE_TSC_INVALID_QHANDLE| 0 | 0x0201 | "Invalid qhandle"| -2147483135|
|TSDB_CODE_TSC_INVALID_TIME_STAMP| 0 | 0x0202 | "Invalid combination of client/service time"| -2147483134|
|TSDB_CODE_TSC_INVALID_VALUE| 0 | 0x0203| "Invalid value in client"| -2147483133|
|TSDB_CODE_TSC_INVALID_VERSION| 0 | 0x0204 | "Invalid client version" |-2147483132|
|TSDB_CODE_TSC_INVALID_IE| 0 | 0x0205 | "Invalid client ie" |-2147483131|
|TSDB_CODE_TSC_INVALID_FQDN| 0 | 0x0206| "Invalid host name"| -2147483130|
|TSDB_CODE_TSC_INVALID_USER_LENGTH| 0 | 0x0207| "Invalid user name"| -2147483129|
|TSDB_CODE_TSC_INVALID_PASS_LENGTH| 0 | 0x0208 | "Invalid password"| -2147483128|
|TSDB_CODE_TSC_INVALID_DB_LENGTH| 0 | 0x0209| "Database name too long"| -2147483127|
|TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH| 0 | 0x020A | "Table name too long"| -2147483126|
|TSDB_CODE_TSC_INVALID_CONNECTION| 0 | 0x020B| "Invalid connection"| -2147483125|
|TSDB_CODE_TSC_OUT_OF_MEMORY| 0 | 0x020C | "System out of memory" |-2147483124|
|TSDB_CODE_TSC_NO_DISKSPACE| 0 | 0x020D | "System out of disk space"| -2147483123|
|TSDB_CODE_TSC_QUERY_CACHE_ERASED| 0 | 0x020E| "Query cache erased"| -2147483122|
|TSDB_CODE_TSC_QUERY_CANCELLED| 0 | 0x020F |"Query terminated"| -2147483121|
|TSDB_CODE_TSC_SORTED_RES_TOO_MANY| 0 |0x0210 | "Result set too large to be sorted"| -2147483120|
|TSDB_CODE_TSC_APP_ERROR| 0 | 0x0211 | "Application error"| -2147483119|
|TSDB_CODE_TSC_ACTION_IN_PROGRESS| 0 |0x0212 | "Action in progress"| -2147483118|
|TSDB_CODE_TSC_DISCONNECTED| 0 | 0x0213 |"Disconnected from service" |-2147483117|
|TSDB_CODE_TSC_NO_WRITE_AUTH| 0 | 0x0214 | "No write permission" |-2147483116|
|TSDB_CODE_MND_MSG_NOT_PROCESSED| 0| 0x0300| "Message not processed"| -2147482880|
|TSDB_CODE_MND_ACTION_IN_PROGRESS| 0 | 0x0301 |"Message is progressing"| -2147482879|
|TSDB_CODE_MND_ACTION_NEED_REPROCESSED| 0 | 0x0302 |"Messag need to be reprocessed"| -2147482878|
|TSDB_CODE_MND_NO_RIGHTS| 0 | 0x0303| "Insufficient privilege for operation"| -2147482877|
|TSDB_CODE_MND_APP_ERROR| 0 | 0x0304 | "Unexpected generic error in mnode"| -2147482876|
|TSDB_CODE_MND_INVALID_CONNECTION| 0 | 0x0305 | "Invalid message connection"| -2147482875|
|TSDB_CODE_MND_INVALID_MSG_VERSION| 0 | 0x0306 | "Incompatible protocol version"| -2147482874|
|TSDB_CODE_MND_INVALID_MSG_LEN| 0| 0x0307 | "Invalid message length"| -2147482873|
|TSDB_CODE_MND_INVALID_MSG_TYPE| 0 | 0x0308 | "Invalid message type" |-2147482872|
|TSDB_CODE_MND_TOO_MANY_SHELL_CONNS| 0 |0x0309 | "Too many connections"| -2147482871|
|TSDB_CODE_MND_OUT_OF_MEMORY| 0 |0x030A | "Out of memory in mnode"| -2147482870|
|TSDB_CODE_MND_INVALID_SHOWOBJ| 0 | 0x030B |"Data expired"| -2147482869|
|TSDB_CODE_MND_INVALID_QUERY_ID |0 | 0x030C |"Invalid query id" |-2147482868|
|TSDB_CODE_MND_INVALID_STREAM_ID| 0 |0x030D | "Invalid stream id"| -2147482867|
|TSDB_CODE_MND_INVALID_CONN_ID| 0| 0x030E | "Invalid connection id" |-2147482866|
|TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE| 0 | 0x0320| "Object already there"| -2147482848|
|TSDB_CODE_MND_SDB_ERROR| 0 |0x0321 | "Unexpected generic error in sdb" |-2147482847|
|TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE| 0 | 0x0322| "Invalid table type" |-2147482846|
|TSDB_CODE_MND_SDB_OBJ_NOT_THERE| 0 | 0x0323 |"Object not there" |-2147482845|
|TSDB_CODE_MND_SDB_INVAID_META_ROW| 0 | 0x0324| "Invalid meta row" |-2147482844|
|TSDB_CODE_MND_SDB_INVAID_KEY_TYPE| 0 | 0x0325 |"Invalid key type" |-2147482843|
|TSDB_CODE_MND_DNODE_ALREADY_EXIST| 0 | 0x0330 | "DNode already exists"| -2147482832|
|TSDB_CODE_MND_DNODE_NOT_EXIST| 0 | 0x0331| "DNode does not exist" |-2147482831|
|TSDB_CODE_MND_VGROUP_NOT_EXIST| 0 | 0x0332 |"VGroup does not exist"| -2147482830|
|TSDB_CODE_MND_NO_REMOVE_MASTER |0 | 0x0333 | "Master DNode cannot be removed"| -2147482829|
|TSDB_CODE_MND_NO_ENOUGH_DNODES |0 | 0x0334| "Out of DNodes"| -2147482828|
|TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT |0 | 0x0335 | "Cluster cfg inconsistent"| -2147482827|
|TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION| 0 | 0x0336 | "Invalid dnode cfg option"| -2147482826|
|TSDB_CODE_MND_BALANCE_ENABLED| 0 | 0x0337 | "Balance already enabled" |-2147482825|
|TSDB_CODE_MND_VGROUP_NOT_IN_DNODE| 0 |0x0338 | "Vgroup not in dnode"| -2147482824|
|TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE| 0 | 0x0339 | "Vgroup already in dnode"| -2147482823|
|TSDB_CODE_MND_DNODE_NOT_FREE |0 | 0x033A |"Dnode not avaliable"| -2147482822|
|TSDB_CODE_MND_INVALID_CLUSTER_ID |0 |0x033B | "Cluster id not match"| -2147482821|
|TSDB_CODE_MND_NOT_READY| 0 | 0x033C |"Cluster not ready"| -2147482820|
|TSDB_CODE_MND_ACCT_ALREADY_EXIST| 0 | 0x0340 | "Account already exists" |-2147482816|
|TSDB_CODE_MND_INVALID_ACCT| 0 | 0x0341| "Invalid account"| -2147482815|
|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814|
|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800|
|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799|
|TSDB_CODE_MND_INVALID_USER_FORMAT| 0 |0x0352 |"Invalid user format" |-2147482798|
|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797|
|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796|
|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795|
|TSDB_CODE_MND_TABLE_ALREADY_EXIST| 0| 0x0360| "Table already exists"| -2147482784|
|TSDB_CODE_MND_INVALID_TABLE_ID| 0| 0x0361| "Table name too long"| -2147482783|
|TSDB_CODE_MND_INVALID_TABLE_NAME| 0| 0x0362 | "Table does not exist"| -2147482782|
|TSDB_CODE_MND_INVALID_TABLE_TYPE| 0| 0x0363 | "Invalid table type in tsdb"| -2147482781|
|TSDB_CODE_MND_TOO_MANY_TAGS| 0 | 0x0364| "Too many tags"| -2147482780|
|TSDB_CODE_MND_TOO_MANY_TIMESERIES| 0| 0x0366| "Too many time series"| -2147482778|
|TSDB_CODE_MND_NOT_SUPER_TABLE| 0 |0x0367| "Not super table"| -2147482777|
|TSDB_CODE_MND_COL_NAME_TOO_LONG| 0| 0x0368| "Tag name too long"| -2147482776|
|TSDB_CODE_MND_TAG_ALREAY_EXIST| 0| 0x0369| "Tag already exists"| -2147482775|
|TSDB_CODE_MND_TAG_NOT_EXIST| 0 |0x036A | "Tag does not exist" |-2147482774|
|TSDB_CODE_MND_FIELD_ALREAY_EXIST| 0 | 0x036B| "Field already exists"| -2147482773|
|TSDB_CODE_MND_FIELD_NOT_EXIST| 0 | 0x036C | "Field does not exist"| -2147482772|
|TSDB_CODE_MND_INVALID_STABLE_NAME |0 | 0x036D |"Super table does not exist" |-2147482771|
|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752|
|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751|
|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750|
|TSDB_CODE_MND_INVALID_DB| 0 | 0x0383 | "Invalid database name"| -2147482749|
|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748|
|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747|
|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746|
|TSDB_CODE_DND_MSG_NOT_PROCESSED| 0| 0x0400 | "Message not processed"| -2147482624|
|TSDB_CODE_DND_OUT_OF_MEMORY |0 | 0x0401 | "Dnode out of memory"| -2147482623|
|TSDB_CODE_DND_NO_WRITE_ACCESS| 0 | 0x0402 | "No permission for disk files in dnode"| -2147482622|
|TSDB_CODE_DND_INVALID_MSG_LEN| 0 | 0x0403 | "Invalid message length"| -2147482621|
|TSDB_CODE_VND_ACTION_IN_PROGRESS |0 |0x0500| "Action in progress" |-2147482368|
|TSDB_CODE_VND_MSG_NOT_PROCESSED| 0 |0x0501 | "Message not processed" |-2147482367|
|TSDB_CODE_VND_ACTION_NEED_REPROCESSED |0 |0x0502| "Action need to be reprocessed"| -2147482366|
|TSDB_CODE_VND_INVALID_VGROUP_ID |0 | 0x0503| "Invalid Vgroup ID"| -2147482365|
|TSDB_CODE_VND_INIT_FAILED| 0 | 0x0504 | "Vnode initialization failed"| -2147482364|
|TSDB_CODE_VND_NO_DISKSPACE| 0 |0x0505| "System out of disk space" |-2147482363|
|TSDB_CODE_VND_NO_DISK_PERMISSIONS| 0 | 0x0506| "No write permission for disk files" |-2147482362|
|TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR| 0 | 0x0507 | "Missing data file"| -2147482361|
|TSDB_CODE_VND_OUT_OF_MEMORY |0| 0x0508 | "Out of memory"| -2147482360|
|TSDB_CODE_VND_APP_ERROR| 0| 0x0509 | "Unexpected generic error in vnode"| -2147482359|
|TSDB_CODE_VND_INVALID_STATUS |0| 0x0510 | "Database not ready"| -2147482352|
|TSDB_CODE_VND_NOT_SYNCED| 0 | 0x0511 | "Database suspended"| -2147482351|
|TSDB_CODE_VND_NO_WRITE_AUTH| 0 | 0x0512| "Write operation denied" |-2147482350|
|TSDB_CODE_TDB_INVALID_TABLE_ID |0 | 0x0600 | "Invalid table ID"| -2147482112|
|TSDB_CODE_TDB_INVALID_TABLE_TYPE| 0| 0x0601 |"Invalid table type"| -2147482111|
|TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION| 0| 0x0602| "Invalid table schema version"| -2147482110|
|TSDB_CODE_TDB_TABLE_ALREADY_EXIST| 0 | 0x0603| "Table already exists"| -2147482109|
|TSDB_CODE_TDB_INVALID_CONFIG| 0 | 0x0604| "Invalid configuration"| -2147482108|
|TSDB_CODE_TDB_INIT_FAILED| 0 | 0x0605| "Tsdb init failed"| -2147482107|
|TSDB_CODE_TDB_NO_DISKSPACE| 0 | 0x0606| "No diskspace for tsdb"| -2147482106|
|TSDB_CODE_TDB_NO_DISK_PERMISSIONS| 0 | 0x0607| "No permission for disk files"| -2147482105|
|TSDB_CODE_TDB_FILE_CORRUPTED| 0 | 0x0608| "Data file(s) corrupted"| -2147482104|
|TSDB_CODE_TDB_OUT_OF_MEMORY| 0 | 0x0609| "Out of memory"| -2147482103|
|TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE| 0 | 0x060A| "Tag too old"| -2147482102|
|TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE |0| 0x060B | "Timestamp data out of range"| -2147482101|
|TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP| 0| 0x060C| "Submit message is messed up"| -2147482100|
|TSDB_CODE_TDB_INVALID_ACTION| 0 | 0x060D | "Invalid operation"| -2147482099|
|TSDB_CODE_TDB_INVALID_CREATE_TB_MSG| 0 | 0x060E| "Invalid creation of table"| -2147482098|
|TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM| 0 | 0x060F| "No table data in memory skiplist" |-2147482097|
|TSDB_CODE_TDB_FILE_ALREADY_EXISTS| 0 | 0x0610| "File already exists"| -2147482096|
|TSDB_CODE_TDB_TABLE_RECONFIGURE| 0 | 0x0611| "Need to reconfigure table"| -2147482095|
|TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO| 0 | 0x0612| "Invalid information to create table"| -2147482094|
|TSDB_CODE_QRY_INVALID_QHANDLE| 0 | 0x0700| "Invalid handle"| -2147481856|
|TSDB_CODE_QRY_INVALID_MSG| 0 | 0x0701| "Invalid message"| -2147481855|
|TSDB_CODE_QRY_NO_DISKSPACE| 0 | 0x0702 | "No diskspace for query"| -2147481854|
|TSDB_CODE_QRY_OUT_OF_MEMORY| 0 | 0x0703 | "System out of memory"| -2147481853|
|TSDB_CODE_QRY_APP_ERROR| 0 | 0x0704 | "Unexpected generic error in query"| -2147481852|
|TSDB_CODE_QRY_DUP_JOIN_KEY| 0 | 0x0705| "Duplicated join key"| -2147481851|
|TSDB_CODE_QRY_EXCEED_TAGS_LIMIT| 0 | 0x0706 | "Tag conditon too many"| -2147481850|
|TSDB_CODE_QRY_NOT_READY |0| 0x0707 | "Query not ready" |-2147481849|
|TSDB_CODE_QRY_HAS_RSP| 0 | 0x0708| "Query should response"| -2147481848|
|TSDB_CODE_GRANT_EXPIRED| 0 | 0x0800| "License expired"| -2147481600|
|TSDB_CODE_GRANT_DNODE_LIMITED| 0 | 0x0801 | "DNode creation limited by licence"| -2147481599|
|TSDB_CODE_GRANT_ACCT_LIMITED |0| 0x0802 |"Account creation limited by license"| -2147481598|
|TSDB_CODE_GRANT_TIMESERIES_LIMITED| 0 | 0x0803 | "Table creation limited by license"| -2147481597|
|TSDB_CODE_GRANT_DB_LIMITED| 0 | 0x0804 | "DB creation limited by license"| -2147481596|
|TSDB_CODE_GRANT_USER_LIMITED| 0 | 0x0805 | "User creation limited by license"| -2147481595|
|TSDB_CODE_GRANT_CONN_LIMITED| 0| 0x0806 | "Conn creation limited by license" |-2147481594|
|TSDB_CODE_GRANT_STREAM_LIMITED| 0 | 0x0807 | "Stream creation limited by license"| -2147481593|
|TSDB_CODE_GRANT_SPEED_LIMITED| 0 | 0x0808 | "Write speed limited by license" |-2147481592|
|TSDB_CODE_GRANT_STORAGE_LIMITED| 0 |0x0809 | "Storage capacity limited by license"| -2147481591|
|TSDB_CODE_GRANT_QUERYTIME_LIMITED| 0 | 0x080A | "Query time limited by license" |-2147481590|
|TSDB_CODE_GRANT_CPU_LIMITED| 0 |0x080B |"CPU cores limited by license"| -2147481589|
|TSDB_CODE_SYN_INVALID_CONFIG| 0 | 0x0900| "Invalid Sync Configuration"| -2147481344|
|TSDB_CODE_SYN_NOT_ENABLED| 0 | 0x0901 | "Sync module not enabled" |-2147481343|
|TSDB_CODE_WAL_APP_ERROR| 0| 0x1000 | "Unexpected generic error in wal" |-2147479552|
\ No newline at end of file
| 状态码 | 模 | 错误码(十六进制) | 错误描述 | 错误码(十进制) |
| :-------------------------------------- | :--: | :----------------: | :------------------------------------------- | :--------------- |
| TSDB_CODE_RPC_ACTION_IN_PROGRESS | 0 | 0x0001 | "Action in progress" | -2147483647 |
| TSDB_CODE_RPC_AUTH_REQUIRED | 0 | 0x0002 | "Authentication required" | -2147483646 |
| TSDB_CODE_RPC_AUTH_FAILURE | 0 | 0x0003 | "Authentication failure" | -2147483645 |
| TSDB_CODE_RPC_REDIRECT | 0 | 0x0004 | "Redirect" | -2147483644 |
| TSDB_CODE_RPC_NOT_READY | 0 | 0x0005 | "System not ready" | -2147483643 |
| TSDB_CODE_RPC_ALREADY_PROCESSED | 0 | 0x0006 | "Message already processed" | -2147483642 |
| TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED | 0 | 0x0007 | "Last session not finished" | -2147483641 |
| TSDB_CODE_RPC_MISMATCHED_LINK_ID | 0 | 0x0008 | "Mismatched meter id" | -2147483640 |
| TSDB_CODE_RPC_TOO_SLOW | 0 | 0x0009 | "Processing of request timed out" | -2147483639 |
| TSDB_CODE_RPC_MAX_SESSIONS | 0 | 0x000A | "Number of sessions reached limit" | -2147483638 |
| TSDB_CODE_RPC_NETWORK_UNAVAIL | 0 | 0x000B | "Unable to establish connection" | -2147483637 |
| TSDB_CODE_RPC_APP_ERROR | 0 | 0x000C | "Unexpected generic error in RPC" | -2147483636 |
| TSDB_CODE_RPC_UNEXPECTED_RESPONSE | 0 | 0x000D | "Unexpected response" | -2147483635 |
| TSDB_CODE_RPC_INVALID_VALUE | 0 | 0x000E | "Invalid value" | -2147483634 |
| TSDB_CODE_RPC_INVALID_TRAN_ID | 0 | 0x000F | "Invalid transaction id" | -2147483633 |
| TSDB_CODE_RPC_INVALID_SESSION_ID | 0 | 0x0010 | "Invalid session id" | -2147483632 |
| TSDB_CODE_RPC_INVALID_MSG_TYPE | 0 | 0x0011 | "Invalid message type" | -2147483631 |
| TSDB_CODE_RPC_INVALID_RESPONSE_TYPE | 0 | 0x0012 | "Invalid response type" | -2147483630 |
| TSDB_CODE_RPC_INVALID_TIME_STAMP | 0 | 0x0013 | "Invalid timestamp" | -2147483629 |
| TSDB_CODE_COM_OPS_NOT_SUPPORT | 0 | 0x0100 | "Operation not supported" | -2147483392 |
| TSDB_CODE_COM_MEMORY_CORRUPTED | 0 | 0x0101 | "Memory corrupted" | -2147483391 |
| TSDB_CODE_COM_OUT_OF_MEMORY | 0 | 0x0102 | "Out of memory" | -2147483390 |
| TSDB_CODE_COM_INVALID_CFG_MSG | 0 | 0x0103 | "Invalid config message" | -2147483389 |
| TSDB_CODE_COM_FILE_CORRUPTED | 0 | 0x0104 | "Data file corrupted" | -2147483388 |
| TSDB_CODE_TSC_INVALID_SQL | 0 | 0x0200 | "Invalid SQL statement" | -2147483136 |
| TSDB_CODE_TSC_INVALID_QHANDLE | 0 | 0x0201 | "Invalid qhandle" | -2147483135 |
| TSDB_CODE_TSC_INVALID_TIME_STAMP | 0 | 0x0202 | "Invalid combination of client/service time" | -2147483134 |
| TSDB_CODE_TSC_INVALID_VALUE | 0 | 0x0203 | "Invalid value in client" | -2147483133 |
| TSDB_CODE_TSC_INVALID_VERSION | 0 | 0x0204 | "Invalid client version" | -2147483132 |
| TSDB_CODE_TSC_INVALID_IE | 0 | 0x0205 | "Invalid client ie" | -2147483131 |
| TSDB_CODE_TSC_INVALID_FQDN | 0 | 0x0206 | "Invalid host name" | -2147483130 |
| TSDB_CODE_TSC_INVALID_USER_LENGTH | 0 | 0x0207 | "Invalid user name" | -2147483129 |
| TSDB_CODE_TSC_INVALID_PASS_LENGTH | 0 | 0x0208 | "Invalid password" | -2147483128 |
| TSDB_CODE_TSC_INVALID_DB_LENGTH | 0 | 0x0209 | "Database name too long" | -2147483127 |
| TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH | 0 | 0x020A | "Table name too long" | -2147483126 |
| TSDB_CODE_TSC_INVALID_CONNECTION | 0 | 0x020B | "Invalid connection" | -2147483125 |
| TSDB_CODE_TSC_OUT_OF_MEMORY | 0 | 0x020C | "System out of memory" | -2147483124 |
| TSDB_CODE_TSC_NO_DISKSPACE | 0 | 0x020D | "System out of disk space" | -2147483123 |
| TSDB_CODE_TSC_QUERY_CACHE_ERASED | 0 | 0x020E | "Query cache erased" | -2147483122 |
| TSDB_CODE_TSC_QUERY_CANCELLED | 0 | 0x020F | "Query terminated" | -2147483121 |
| TSDB_CODE_TSC_SORTED_RES_TOO_MANY | 0 | 0x0210 | "Result set too large to be sorted" | -2147483120 |
| TSDB_CODE_TSC_APP_ERROR | 0 | 0x0211 | "Application error" | -2147483119 |
| TSDB_CODE_TSC_ACTION_IN_PROGRESS | 0 | 0x0212 | "Action in progress" | -2147483118 |
| TSDB_CODE_TSC_DISCONNECTED | 0 | 0x0213 | "Disconnected from service" | -2147483117 |
| TSDB_CODE_TSC_NO_WRITE_AUTH | 0 | 0x0214 | "No write permission" | -2147483116 |
| TSDB_CODE_MND_MSG_NOT_PROCESSED | 0 | 0x0300 | "Message not processed" | -2147482880 |
| TSDB_CODE_MND_ACTION_IN_PROGRESS | 0 | 0x0301 | "Message is progressing" | -2147482879 |
| TSDB_CODE_MND_ACTION_NEED_REPROCESSED | 0 | 0x0302 | "Messag need to be reprocessed" | -2147482878 |
| TSDB_CODE_MND_NO_RIGHTS | 0 | 0x0303 | "Insufficient privilege for operation" | -2147482877 |
| TSDB_CODE_MND_APP_ERROR | 0 | 0x0304 | "Unexpected generic error in mnode" | -2147482876 |
| TSDB_CODE_MND_INVALID_CONNECTION | 0 | 0x0305 | "Invalid message connection" | -2147482875 |
| TSDB_CODE_MND_INVALID_MSG_VERSION | 0 | 0x0306 | "Incompatible protocol version" | -2147482874 |
| TSDB_CODE_MND_INVALID_MSG_LEN | 0 | 0x0307 | "Invalid message length" | -2147482873 |
| TSDB_CODE_MND_INVALID_MSG_TYPE | 0 | 0x0308 | "Invalid message type" | -2147482872 |
| TSDB_CODE_MND_TOO_MANY_SHELL_CONNS | 0 | 0x0309 | "Too many connections" | -2147482871 |
| TSDB_CODE_MND_OUT_OF_MEMORY | 0 | 0x030A | "Out of memory in mnode" | -2147482870 |
| TSDB_CODE_MND_INVALID_SHOWOBJ | 0 | 0x030B | "Data expired" | -2147482869 |
| TSDB_CODE_MND_INVALID_QUERY_ID | 0 | 0x030C | "Invalid query id" | -2147482868 |
| TSDB_CODE_MND_INVALID_STREAM_ID | 0 | 0x030D | "Invalid stream id" | -2147482867 |
| TSDB_CODE_MND_INVALID_CONN_ID | 0 | 0x030E | "Invalid connection id" | -2147482866 |
| TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE | 0 | 0x0320 | "Object already there" | -2147482848 |
| TSDB_CODE_MND_SDB_ERROR | 0 | 0x0321 | "Unexpected generic error in sdb" | -2147482847 |
| TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE | 0 | 0x0322 | "Invalid table type" | -2147482846 |
| TSDB_CODE_MND_SDB_OBJ_NOT_THERE | 0 | 0x0323 | "Object not there" | -2147482845 |
| TSDB_CODE_MND_SDB_INVAID_META_ROW | 0 | 0x0324 | "Invalid meta row" | -2147482844 |
| TSDB_CODE_MND_SDB_INVAID_KEY_TYPE | 0 | 0x0325 | "Invalid key type" | -2147482843 |
| TSDB_CODE_MND_DNODE_ALREADY_EXIST | 0 | 0x0330 | "DNode already exists" | -2147482832 |
| TSDB_CODE_MND_DNODE_NOT_EXIST | 0 | 0x0331 | "DNode does not exist" | -2147482831 |
| TSDB_CODE_MND_VGROUP_NOT_EXIST | 0 | 0x0332 | "VGroup does not exist" | -2147482830 |
| TSDB_CODE_MND_NO_REMOVE_MASTER | 0 | 0x0333 | "Master DNode cannot be removed" | -2147482829 |
| TSDB_CODE_MND_NO_ENOUGH_DNODES | 0 | 0x0334 | "Out of DNodes" | -2147482828 |
| TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT | 0 | 0x0335 | "Cluster cfg inconsistent" | -2147482827 |
| TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION | 0 | 0x0336 | "Invalid dnode cfg option" | -2147482826 |
| TSDB_CODE_MND_BALANCE_ENABLED | 0 | 0x0337 | "Balance already enabled" | -2147482825 |
| TSDB_CODE_MND_VGROUP_NOT_IN_DNODE | 0 | 0x0338 | "Vgroup not in dnode" | -2147482824 |
| TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE | 0 | 0x0339 | "Vgroup already in dnode" | -2147482823 |
| TSDB_CODE_MND_DNODE_NOT_FREE | 0 | 0x033A | "Dnode not avaliable" | -2147482822 |
| TSDB_CODE_MND_INVALID_CLUSTER_ID | 0 | 0x033B | "Cluster id not match" | -2147482821 |
| TSDB_CODE_MND_NOT_READY | 0 | 0x033C | "Cluster not ready" | -2147482820 |
| TSDB_CODE_MND_ACCT_ALREADY_EXIST | 0 | 0x0340 | "Account already exists" | -2147482816 |
| TSDB_CODE_MND_INVALID_ACCT | 0 | 0x0341 | "Invalid account" | -2147482815 |
| TSDB_CODE_MND_INVALID_ACCT_OPTION | 0 | 0x0342 | "Invalid account options" | -2147482814 |
| TSDB_CODE_MND_USER_ALREADY_EXIST | 0 | 0x0350 | "User already exists" | -2147482800 |
| TSDB_CODE_MND_INVALID_USER | 0 | 0x0351 | "Invalid user" | -2147482799 |
| TSDB_CODE_MND_INVALID_USER_FORMAT | 0 | 0x0352 | "Invalid user format" | -2147482798 |
| TSDB_CODE_MND_INVALID_PASS_FORMAT | 0 | 0x0353 | "Invalid password format" | -2147482797 |
| TSDB_CODE_MND_NO_USER_FROM_CONN | 0 | 0x0354 | "Can not get user from conn" | -2147482796 |
| TSDB_CODE_MND_TOO_MANY_USERS | 0 | 0x0355 | "Too many users" | -2147482795 |
| TSDB_CODE_MND_TABLE_ALREADY_EXIST | 0 | 0x0360 | "Table already exists" | -2147482784 |
| TSDB_CODE_MND_INVALID_TABLE_ID | 0 | 0x0361 | "Table name too long" | -2147482783 |
| TSDB_CODE_MND_INVALID_TABLE_NAME | 0 | 0x0362 | "Table does not exist" | -2147482782 |
| TSDB_CODE_MND_INVALID_TABLE_TYPE | 0 | 0x0363 | "Invalid table type in tsdb" | -2147482781 |
| TSDB_CODE_MND_TOO_MANY_TAGS | 0 | 0x0364 | "Too many tags" | -2147482780 |
| TSDB_CODE_MND_TOO_MANY_TIMESERIES | 0 | 0x0366 | "Too many time series" | -2147482778 |
| TSDB_CODE_MND_NOT_SUPER_TABLE | 0 | 0x0367 | "Not super table" | -2147482777 |
| TSDB_CODE_MND_COL_NAME_TOO_LONG | 0 | 0x0368 | "Tag name too long" | -2147482776 |
| TSDB_CODE_MND_TAG_ALREAY_EXIST | 0 | 0x0369 | "Tag already exists" | -2147482775 |
| TSDB_CODE_MND_TAG_NOT_EXIST | 0 | 0x036A | "Tag does not exist" | -2147482774 |
| TSDB_CODE_MND_FIELD_ALREAY_EXIST | 0 | 0x036B | "Field already exists" | -2147482773 |
| TSDB_CODE_MND_FIELD_NOT_EXIST | 0 | 0x036C | "Field does not exist" | -2147482772 |
| TSDB_CODE_MND_INVALID_STABLE_NAME | 0 | 0x036D | "Super table does not exist" | -2147482771 |
| TSDB_CODE_MND_DB_NOT_SELECTED | 0 | 0x0380 | "Database not specified or available" | -2147482752 |
| TSDB_CODE_MND_DB_ALREADY_EXIST | 0 | 0x0381 | "Database already exists" | -2147482751 |
| TSDB_CODE_MND_INVALID_DB_OPTION | 0 | 0x0382 | "Invalid database options" | -2147482750 |
| TSDB_CODE_MND_INVALID_DB | 0 | 0x0383 | "Invalid database name" | -2147482749 |
| TSDB_CODE_MND_MONITOR_DB_FORBIDDEN | 0 | 0x0384 | "Cannot delete monitor database" | -2147482748 |
| TSDB_CODE_MND_TOO_MANY_DATABASES | 0 | 0x0385 | "Too many databases for account" | -2147482747 |
| TSDB_CODE_MND_DB_IN_DROPPING | 0 | 0x0386 | "Database not available" | -2147482746 |
| TSDB_CODE_DND_MSG_NOT_PROCESSED | 0 | 0x0400 | "Message not processed" | -2147482624 |
| TSDB_CODE_DND_OUT_OF_MEMORY | 0 | 0x0401 | "Dnode out of memory" | -2147482623 |
| TSDB_CODE_DND_NO_WRITE_ACCESS | 0 | 0x0402 | "No permission for disk files in dnode" | -2147482622 |
| TSDB_CODE_DND_INVALID_MSG_LEN | 0 | 0x0403 | "Invalid message length" | -2147482621 |
| TSDB_CODE_VND_ACTION_IN_PROGRESS | 0 | 0x0500 | "Action in progress" | -2147482368 |
| TSDB_CODE_VND_MSG_NOT_PROCESSED | 0 | 0x0501 | "Message not processed" | -2147482367 |
| TSDB_CODE_VND_ACTION_NEED_REPROCESSED | 0 | 0x0502 | "Action need to be reprocessed" | -2147482366 |
| TSDB_CODE_VND_INVALID_VGROUP_ID | 0 | 0x0503 | "Invalid Vgroup ID" | -2147482365 |
| TSDB_CODE_VND_INIT_FAILED | 0 | 0x0504 | "Vnode initialization failed" | -2147482364 |
| TSDB_CODE_VND_NO_DISKSPACE | 0 | 0x0505 | "System out of disk space" | -2147482363 |
| TSDB_CODE_VND_NO_DISK_PERMISSIONS | 0 | 0x0506 | "No write permission for disk files" | -2147482362 |
| TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR | 0 | 0x0507 | "Missing data file" | -2147482361 |
| TSDB_CODE_VND_OUT_OF_MEMORY | 0 | 0x0508 | "Out of memory" | -2147482360 |
| TSDB_CODE_VND_APP_ERROR | 0 | 0x0509 | "Unexpected generic error in vnode" | -2147482359 |
| TSDB_CODE_VND_INVALID_STATUS | 0 | 0x0510 | "Database not ready" | -2147482352 |
| TSDB_CODE_VND_NOT_SYNCED | 0 | 0x0511 | "Database suspended" | -2147482351 |
| TSDB_CODE_VND_NO_WRITE_AUTH | 0 | 0x0512 | "Write operation denied" | -2147482350 |
| TSDB_CODE_TDB_INVALID_TABLE_ID | 0 | 0x0600 | "Invalid table ID" | -2147482112 |
| TSDB_CODE_TDB_INVALID_TABLE_TYPE | 0 | 0x0601 | "Invalid table type" | -2147482111 |
| TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION | 0 | 0x0602 | "Invalid table schema version" | -2147482110 |
| TSDB_CODE_TDB_TABLE_ALREADY_EXIST | 0 | 0x0603 | "Table already exists" | -2147482109 |
| TSDB_CODE_TDB_INVALID_CONFIG | 0 | 0x0604 | "Invalid configuration" | -2147482108 |
| TSDB_CODE_TDB_INIT_FAILED | 0 | 0x0605 | "Tsdb init failed" | -2147482107 |
| TSDB_CODE_TDB_NO_DISKSPACE | 0 | 0x0606 | "No diskspace for tsdb" | -2147482106 |
| TSDB_CODE_TDB_NO_DISK_PERMISSIONS | 0 | 0x0607 | "No permission for disk files" | -2147482105 |
| TSDB_CODE_TDB_FILE_CORRUPTED | 0 | 0x0608 | "Data file(s) corrupted" | -2147482104 |
| TSDB_CODE_TDB_OUT_OF_MEMORY | 0 | 0x0609 | "Out of memory" | -2147482103 |
| TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE | 0 | 0x060A | "Tag too old" | -2147482102 |
| TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE | 0 | 0x060B | "Timestamp data out of range" | -2147482101 |
| TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP | 0 | 0x060C | "Submit message is messed up" | -2147482100 |
| TSDB_CODE_TDB_INVALID_ACTION | 0 | 0x060D | "Invalid operation" | -2147482099 |
| TSDB_CODE_TDB_INVALID_CREATE_TB_MSG | 0 | 0x060E | "Invalid creation of table" | -2147482098 |
| TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM | 0 | 0x060F | "No table data in memory skiplist" | -2147482097 |
| TSDB_CODE_TDB_FILE_ALREADY_EXISTS | 0 | 0x0610 | "File already exists" | -2147482096 |
| TSDB_CODE_TDB_TABLE_RECONFIGURE | 0 | 0x0611 | "Need to reconfigure table" | -2147482095 |
| TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO | 0 | 0x0612 | "Invalid information to create table" | -2147482094 |
| TSDB_CODE_QRY_INVALID_QHANDLE | 0 | 0x0700 | "Invalid handle" | -2147481856 |
| TSDB_CODE_QRY_INVALID_MSG | 0 | 0x0701 | "Invalid message" | -2147481855 |
| TSDB_CODE_QRY_NO_DISKSPACE | 0 | 0x0702 | "No diskspace for query" | -2147481854 |
| TSDB_CODE_QRY_OUT_OF_MEMORY | 0 | 0x0703 | "System out of memory" | -2147481853 |
| TSDB_CODE_QRY_APP_ERROR | 0 | 0x0704 | "Unexpected generic error in query" | -2147481852 |
| TSDB_CODE_QRY_DUP_JOIN_KEY | 0 | 0x0705 | "Duplicated join key" | -2147481851 |
| TSDB_CODE_QRY_EXCEED_TAGS_LIMIT | 0 | 0x0706 | "Tag conditon too many" | -2147481850 |
| TSDB_CODE_QRY_NOT_READY | 0 | 0x0707 | "Query not ready" | -2147481849 |
| TSDB_CODE_QRY_HAS_RSP | 0 | 0x0708 | "Query should response" | -2147481848 |
| TSDB_CODE_GRANT_EXPIRED | 0 | 0x0800 | "License expired" | -2147481600 |
| TSDB_CODE_GRANT_DNODE_LIMITED | 0 | 0x0801 | "DNode creation limited by licence" | -2147481599 |
| TSDB_CODE_GRANT_ACCT_LIMITED | 0 | 0x0802 | "Account creation limited by license" | -2147481598 |
| TSDB_CODE_GRANT_TIMESERIES_LIMITED | 0 | 0x0803 | "Table creation limited by license" | -2147481597 |
| TSDB_CODE_GRANT_DB_LIMITED | 0 | 0x0804 | "DB creation limited by license" | -2147481596 |
| TSDB_CODE_GRANT_USER_LIMITED | 0 | 0x0805 | "User creation limited by license" | -2147481595 |
| TSDB_CODE_GRANT_CONN_LIMITED | 0 | 0x0806 | "Conn creation limited by license" | -2147481594 |
| TSDB_CODE_GRANT_STREAM_LIMITED | 0 | 0x0807 | "Stream creation limited by license" | -2147481593 |
| TSDB_CODE_GRANT_SPEED_LIMITED | 0 | 0x0808 | "Write speed limited by license" | -2147481592 |
| TSDB_CODE_GRANT_STORAGE_LIMITED | 0 | 0x0809 | "Storage capacity limited by license" | -2147481591 |
| TSDB_CODE_GRANT_QUERYTIME_LIMITED | 0 | 0x080A | "Query time limited by license" | -2147481590 |
| TSDB_CODE_GRANT_CPU_LIMITED | 0 | 0x080B | "CPU cores limited by license" | -2147481589 |
| TSDB_CODE_SYN_INVALID_CONFIG | 0 | 0x0900 | "Invalid Sync Configuration" | -2147481344 |
| TSDB_CODE_SYN_NOT_ENABLED | 0 | 0x0901 | "Sync module not enabled" | -2147481343 |
| TSDB_CODE_WAL_APP_ERROR | 0 | 0x1000 | "Unexpected generic error in wal" | -2147479552 |
......@@ -41,9 +41,9 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
在TDengine中,普通表的数据模型中可使用以下 10 种数据类型。
| | 类型 | Bytes | 说明 |
| # | **类型** | **Bytes** | **说明** |
| ---- | :-------: | ------ | ------------------------------------------------------------ |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18 版本开始,已经去除了这一时间范围限制) |
| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18.0 版本开始,已经去除了这一时间范围限制) |
| 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL |
| 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL |
| 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
......@@ -53,6 +53,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
| 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL |
| 9 | BOOL | 1 | 布尔型,{true, false} |
| 10 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
<!-- REPLACE_OPEN_TO_ENTERPRISE__COLUMN_TYPE_ADDONS -->
**Tips**:
1. TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
......@@ -63,11 +64,11 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
- **创建数据库**
```mysql
CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [UPDATE 1];
CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [DAYS days] [UPDATE 1];
```
说明:
说明:<!-- 注意:上一行中的 SQL 语句在企业版文档中会被替换,因此修改此语句的话,需要修改企业版文档的替换字典键值!! -->
1) KEEP是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据;
1) KEEP是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据;<!-- REPLACE_OPEN_TO_ENTERPRISE__KEEP_PARAM_DESCRIPTION -->
2) UPDATE 标志数据库支持更新相同时间戳数据;
......@@ -75,7 +76,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
4) 一条SQL 语句的最大长度为65480个字符;
5) 数据库还有更多与存储相关的配置参数,请参见系统管理
5) 数据库还有更多与存储相关的配置参数,请参见 [服务端配置](https://www.taosdata.com/cn/documentation/taos-sql#management) 章节
- **显示系统当前参数**
......@@ -167,22 +168,22 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name TAGS (tag_value1, ...);
```
以指定的超级表为模板,指定 tags 的值来创建数据表。
以指定的超级表为模板,指定 TAGS 的值来创建数据表。
- **以超级表为模板创建数据表,并指定具体的 tags 列**
- **以超级表为模板创建数据表,并指定具体的 TAGS 列**
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...);
```
以指定的超级表为模板,指定一部分 tags 列的值来创建数据表。(没被指定的 tags 列会设为空值。)
说明:从 2.0.17 版本开始支持这种方式。在之前的版本中,不允许指定 tags 列,而必须显式给出所有 tags 列的取值。
以指定的超级表为模板,指定一部分 TAGS 列的值来创建数据表(没被指定的 TAGS 列会设为空值)。
说明:从 2.0.17.0 版本开始支持这种方式。在之前的版本中,不允许指定 TAGS 列,而必须显式给出所有 TAGS 列的取值。
- **批量创建数据表**
```mysql
CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
```
以更快的速度批量创建大量数据表。(服务器端 2.0.14 及以上版本)
以更快的速度批量创建大量数据表(服务器端 2.0.14 及以上版本)。
说明:
......@@ -220,6 +221,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```mysql
SET MAX_BINARY_DISPLAY_WIDTH <nn>;
```
如显示的内容后面以...结尾时,表示该内容已被截断,可通过本命令修改显示字符宽度以显示完整的内容。
- **获取表的结构信息**
......@@ -236,14 +238,14 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
1) 列的最大个数为1024,最小个数为2;
2) 列名最大长度为64
2) 列名最大长度为64
- **表删除列**
```mysql
ALTER TABLE tb_name DROP COLUMN field_name;
```
如果表是通过[超级表](../super-table/)创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构
如果表是通过超级表创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构。
## <a class="anchor" id="super-table"></a>超级表STable管理
......@@ -254,7 +256,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```mysql
CREATE STABLE [IF NOT EXISTS] stb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...]) TAGS (tag1_name tag_type1, tag2_name tag_type2 [, tag3_name tag_type3]);
```
创建 STable,与创建表的 SQL 语法相似,但需指定 TAGS 字段的名称和类型
创建 STable,与创建表的 SQL 语法相似,但需指定 TAGS 字段的名称和类型
说明:
......@@ -276,7 +278,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
- **显示当前数据库下的所有超级表信息**
```mysql
SHOW STABLES [LIKE tb_name_wildcar];
SHOW STABLES [LIKE tb_name_wildcard];
```
查看数据库内全部 STable,及其相关信息,包括 STable 的名称、创建时间、列数量、标签(TAG)数量、通过该 STable 建表的数量。
......@@ -341,7 +343,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```mysql
INSERT INTO tb_name VALUES (field_value, ...);
```
向表tb_name中插入一条记录
向表tb_name中插入一条记录
- **插入一条记录,数据对应到指定的列**
```mysql
......@@ -353,42 +355,51 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```mysql
INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
向表tb_name中插入多条记录
向表tb_name中插入多条记录
**注意**:在使用“插入多条记录”方式写入数据时,不能把第一列的时间戳取值都设为now,否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。
- **按指定的列插入多条记录**
```mysql
INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
向表tb_name中按指定的列插入多条记录
向表tb_name中按指定的列插入多条记录
- **向多个表插入多条记录**
```mysql
INSERT INTO tb1_name VALUES (field1_value1, ...) (field1_value2, ...) ...
tb2_name VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
同时向表tb1_name和tb2_name中分别插入多条记录
同时向表tb1_name和tb2_name中分别插入多条记录
- **同时向多个表按列插入多条记录**
```mysql
INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...
tb2_name (tb2_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
同时向表tb1_name和tb2_name中按列分别插入多条记录
同时向表tb1_name和tb2_name中按列分别插入多条记录
注意:允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。
注意:
1) 如果时间戳为now,系统将自动使用客户端当前时间作为该记录的时间戳;
2) 允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。
- <a class="anchor" id="auto_create_table"></a>**插入记录时自动建表**
```mysql
INSERT INTO tb_name USING stb_name TAGS (tag_value1, ...) VALUES (field_value1, ...);
```
如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 tags 取值。
如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。
- **插入记录时自动建表,并指定具体的 tags 列**
- **插入记录时自动建表,并指定具体的 TAGS 列**
```mysql
INSERT INTO tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...) VALUES (field_value1, ...);
```
在自动建表时,可以只是指定部分 tags 列的取值,未被指定的 tags 列将取为空值。
在自动建表时,可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将取为空值。
- **同时向多个表按列插入多条记录,自动建表**
```mysql
INSERT INTO tb1_name (tb1_field1_name, ...) [USING stb1_name TAGS (tag_value1, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ...
tb2_name (tb2_field1_name, ...) [USING stb2_name TAGS (tag_value2, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ...;
```
以自动建表的方式,同时向表tb1_name和tb2_name中按列分别插入多条记录。
**历史记录写入**:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。
......@@ -471,7 +482,7 @@ Query OK, 9 row(s) in set (0.002022s)
SELECT * FROM d1001;
SELECT d1001.* FROM d1001;
```
在Join查询中,带前缀的\*和不带前缀\*返回的结果有差别, \*返回全部表的所有列数据(不包含标签),带前缀的通配符,则只返回该表的列数据。
在JOIN查询中,带前缀的\*和不带前缀\*返回的结果有差别, \*返回全部表的所有列数据(不包含标签),带前缀的通配符,则只返回该表的列数据。
```mysql
taos> SELECT * FROM d1001, d1003 WHERE d1001.ts=d1003.ts;
ts | current | voltage | phase | ts | current | voltage | phase |
......@@ -487,7 +498,7 @@ taos> SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts;
Query OK, 1 row(s) in set (0.020443s)
```
在使用SQL函数来进行查询过程中,部分SQL函数支持通配符操作。其中的区别在于:
在使用SQL函数来进行查询过程中,部分SQL函数支持通配符操作。其中的区别在于:
```count(*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。
```mysql
......@@ -522,12 +533,12 @@ Query OK, 2 row(s) in set (0.003112s)
##### 获取标签列的去重取值
从 2.0.15 版本开始,支持在超级表查询标签列时,指定 distinct 关键字,这样将返回指定标签列的所有不重复取值。
从 2.0.15 版本开始,支持在超级表查询标签列时,指定 DISTINCT 关键字,这样将返回指定标签列的所有不重复取值。
```mysql
SELECT DISTINCT tag_name FROM stb_name;
```
注意:目前 distinct 关键字只支持对超级表的标签列进行去重,而不能用于普通列。
注意:目前 DISTINCT 关键字只支持对超级表的标签列进行去重,而不能用于普通列。
......@@ -562,7 +573,7 @@ SELECT * FROM d1001;
#### 特殊功能
部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database()
部分特殊的查询功能可以不使用FROM子句执行。获取当前所在的数据库 database()
```mysql
taos> SELECT DATABASE();
database() |
......@@ -570,7 +581,7 @@ taos> SELECT DATABASE();
power |
Query OK, 1 row(s) in set (0.000079s)
```
如果登录的时候没有指定默认数据库,且没有使用```use```命令切换数据,则返回NULL。
如果登录的时候没有指定默认数据库,且没有使用```USE```命令切换数据,则返回NULL。
```mysql
taos> SELECT DATABASE();
database() |
......@@ -578,7 +589,7 @@ taos> SELECT DATABASE();
NULL |
Query OK, 1 row(s) in set (0.000184s)
```
获取服务器和客户端版本号:
获取服务器和客户端版本号
```mysql
taos> SELECT CLIENT_VERSION();
client_version() |
......@@ -622,7 +633,7 @@ SELECT TBNAME, location FROM meters;
```mysql
SELECT COUNT(TBNAME) FROM meters;
```
以上两个查询均只支持在Where条件子句中添加针对标签(TAGS)的过滤条件。例如:
以上两个查询均只支持在WHERE条件子句中添加针对标签(TAGS)的过滤条件。例如:
```mysql
taos> SELECT TBNAME, location FROM meters;
tbname | location |
......@@ -648,30 +659,31 @@ Query OK, 1 row(s) in set (0.001091s)
- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。
* 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。
- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。
- 通过 ">>" 输出结果可以导出到指定文件。
- 通过 “>>” 输出结果可以导出到指定文件。
### 支持的条件过滤操作
| Operation | Note | Applicable Data Types |
| ----------- | ----------------------------- | ------------------------------------- |
| > | larger than | **`timestamp`** and all numeric types |
| < | smaller than | **`timestamp`** and all numeric types |
| >= | larger than or equal to | **`timestamp`** and all numeric types |
| <= | smaller than or equal to | **`timestamp`** and all numeric types |
| = | equal to | all types |
| <> | not equal to | all types |
| between and | within a certain range | **`timestamp`** and all numeric types |
| % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** |
| **Operation** | **Note** | **Applicable Data Types** |
| --------------- | ----------------------------- | ------------------------------------- |
| > | larger than | **`timestamp`** and all numeric types |
| < | smaller than | **`timestamp`** and all numeric types |
| >= | larger than or equal to | **`timestamp`** and all numeric types |
| <= | smaller than or equal to | **`timestamp`** and all numeric types |
| = | equal to | all types |
| <> | not equal to | all types |
| between and | within a certain range | **`timestamp`** and all numeric types |
| % | match with any char sequences | **`binary`** **`nchar`** |
| _ | match with a single char | **`binary`** **`nchar`** |
1. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。
2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
<!--
### <a class="anchor" id="having"></a>GROUP BY 之后的 HAVING 过滤
<a class="anchor" id="having"></a>
### GROUP BY 之后的 HAVING 过滤
从 2.0.20 版本开始,GROUP BY 之后允许再跟一个 HAVING 子句,对成组后的各组数据再做筛选。HAVING 子句可以使用聚合函数和选择函数作为过滤条件(但暂时不支持 LEASTSQUARES、TOP、BOTTOM、LAST_ROW)。
从 2.0.20.0 版本开始,GROUP BY 之后允许再跟一个 HAVING 子句,对成组后的各组数据再做筛选。HAVING 子句可以使用聚合函数和选择函数作为过滤条件(但暂时不支持 LEASTSQUARES、TOP、BOTTOM、LAST_ROW)。
例如,如下语句只会输出 `AVG(f1) > 0` 的分组:
```mysql
......@@ -679,7 +691,8 @@ SELECT AVG(f1), SPREAD(f1, f2, st2.f1) FROM st2 WHERE f1 > 0 GROUP BY f1 HAVING
```
-->
### <a class="anchor" id="union"></a>UNION ALL 操作符
<a class="anchor" id="union"></a>
### UNION ALL 操作符
```mysql
SELECT ...
......@@ -691,37 +704,38 @@ TDengine 支持 UNION ALL 操作符。也就是说,如果多个 SELECT 子句
### SQL 示例
- 对于下面的例子,表tb1用以下语句创建
- 对于下面的例子,表tb1用以下语句创建
```mysql
CREATE TABLE tb1 (ts TIMESTAMP, col1 INT, col2 FLOAT, col3 BINARY(50));
```
- 查询tb1刚过去的一个小时的所有记录
- 查询tb1刚过去的一个小时的所有记录
```mysql
SELECT * FROM tb1 WHERE ts >= NOW - 1h;
```
- 查询表tb1从2018-06-01 08:00:00.000 到2018-06-02 08:00:00.000时间范围,并且col3的字符串是'nny'结尾的记录,结果按照时间戳降序
- 查询表tb1从2018-06-01 08:00:00.000 到2018-06-02 08:00:00.000时间范围,并且col3的字符串是'nny'结尾的记录,结果按照时间戳降序
```mysql
SELECT * FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND ts <= '2018-06-02 08:00:00.000' AND col3 LIKE '%nny' ORDER BY ts DESC;
```
- 查询col1与col2的和,并取名complex, 时间大于2018-06-01 08:00:00.000, col2大于1.2,结果输出仅仅10条记录,从第5条开始
- 查询col1与col2的和,并取名complex, 时间大于2018-06-01 08:00:00.000, col2大于1.2,结果输出仅仅10条记录,从第5条开始
```mysql
SELECT (col1 + col2) AS 'complex' FROM tb1 WHERE ts > '2018-06-01 08:00:00.000' AND col2 > 1.2 LIMIT 10 OFFSET 5;
```
- 查询过去10分钟的记录,col2的值大于3.14,并且将结果输出到文件 `/home/testoutpu.csv`.
- 查询过去10分钟的记录,col2的值大于3.14,并且将结果输出到文件 `/home/testoutpu.csv`
```mysql
SELECT COUNT(*) FROM tb1 WHERE ts >= NOW - 10m AND col2 > 3.14 >> /home/testoutpu.csv;
```
## <a class="anchor" id="functions"></a>SQL 函数
<a class="anchor" id="functions"></a>
## SQL 函数
### 聚合函数
......@@ -741,7 +755,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
说明:
1)可以使用星号\*来替代具体的字段,使用星号(\*)返回全部记录数量。
1)可以使用星号(\*)来替代具体的字段,使用星号(\*)返回全部记录数量。
2)针对同一表的(不包含NULL值)字段查询结果均相同。
......@@ -1012,7 +1026,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
1)*k*值取值范围1≤*k*≤100;
2)系统同时返回该记录关联的时间戳列。
2)系统同时返回该记录关联的时间戳列;
3)限制:TOP函数不支持FILL子句。
示例:
```mysql
......@@ -1048,7 +1064,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
1)*k*值取值范围1≤*k*≤100;
2)系统同时返回该记录关联的时间戳列。
2)系统同时返回该记录关联的时间戳列;
3)限制:BOTTOM函数不支持FILL子句。
示例:
```mysql
......@@ -1124,7 +1142,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
适用于:**表、超级表**。
说明:与last函数不同,last_row不支持时间范围限制,强制返回最后一条记录。
说明:与LAST函数不同,LAST_ROW不支持时间范围限制,强制返回最后一条记录。
限制:LAST_ROW()不能与INTERVAL一起使用。
示例:
```mysql
......@@ -1233,40 +1253,40 @@ SELECT function_list FROM tb_name
[WHERE where_condition]
INTERVAL (interval [, offset])
[SLIDING sliding]
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
[FILL ({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
SELECT function_list FROM stb_name
[WHERE where_condition]
INTERVAL (interval [, offset])
[SLIDING sliding]
[FILL ({ VALUE | PREV | NULL | LINEAR})]
[FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})]
[GROUP BY tags]
```
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
- WHERE语句可以指定查询的起止时间和其他过滤条件
- SLIDING语句用于指定聚合时间段的前向增量
- WHERE语句可以指定查询的起止时间和其他过滤条件
- SLIDING语句用于指定聚合时间段的前向增量
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
* 不进行填充:NONE(默认填充模式)。
* VALUE填充:固定值填充,此时需要指定填充的数值。例如:fill(value, 1.23)。
* NULL填充:使用NULL填充数据。例如:fill(null)。
* PREV填充:使用前一个非NULL值填充数据。例如:fill(prev)。
1. 不进行填充:NONE(默认填充模式)。
2. VALUE填充:固定值填充,此时需要指定填充的数值。例如:FILL(VALUE, 1.23)。
3. NULL填充:使用NULL填充数据。例如:FILL(NULL)。
4. PREV填充:使用前一个非NULL值填充数据。例如:FILL(PREV)。
5. NEXT填充:使用下一个非NULL值填充数据。例如:FILL(NEXT)。
说明:
1. 使用FILL语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过1千万条具有插值的结果。
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用group by语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了group by语句分组,则返回结果中每个group内不按照时间序列严格单调递增。
3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用GROUP BY语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了GROUP BY语句分组,则返回结果中每个GROUP内不按照时间序列严格单调递增。
时间聚合也常被用于连续查询场景,可以参考文档 [连续查询(Continuous Query)](https://www.taosdata.com/cn/documentation/advanced-features#continuous-query)。
**示例:** 智能电表的建表语句如下:
**示例**: 智能电表的建表语句如下:
```mysql
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
```
针对智能电表采集的数据,以10分钟为一个阶段,计算过去24小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非NULL值填充。
使用的查询语句如下:
针对智能电表采集的数据,以10分钟为一个阶段,计算过去24小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非NULL值填充。使用的查询语句如下:
```mysql
SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters
......@@ -1287,15 +1307,15 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P
## TAOS SQL其他约定
**group by的限制**
**GROUP BY的限制**
TAOS SQL支持对标签、tbname进行group by操作,也支持普通列进行group by,前提是:仅限一列且该列的唯一值小于10万个。
TAOS SQL支持对标签、TBNAME进行GROUP BY操作,也支持普通列进行GROUP BY,前提是:仅限一列且该列的唯一值小于10万个。
**join操作的限制**
**JOIN操作的限制**
TAOS SQL支持表之间按主键时间戳来join两张表的列,暂不支持两个表之间聚合后的四则运算。
**is not null与不为空的表达式适用范围**
**IS NOT NULL与不为空的表达式适用范围**
is not null支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
IS NOT NULL支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。
......@@ -26,17 +26,17 @@
## 2. Windows平台下JDBCDriver找不到动态链接库,怎么办?
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/jdbcdriver找不到动态链接库/)
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)
## 3. 创建数据表时提示more dnodes are needed
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/创建数据表时提示more-dnodes-are-needed/)
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)
## 4. 如何让TDengine crash时生成core文件?
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/tdengine-crash时生成core文件的方法/)
请看为此问题撰写的[技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)
## 5. 遇到错误"Unable to establish connection", 我怎么办?
## 5. 遇到错误“Unable to establish connection”, 我怎么办?
客户端遇到连接故障,请按照下面的步骤进行检查:
......@@ -51,13 +51,13 @@
4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得)),FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)
5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件
5. ping服务器FQDN,如果没有反应,请检查你的网络,DNS设置,或客户端所在计算机的系统hosts文件。如果部署的是TDengine集群,客户端需要能ping通所有集群节点的FQDN。
6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确认TCP/UDP 端口6030-6042 是打开的
7. 对于Linux上的JDBC(ODBC, Python, Go等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*
8. 对于windows上的JDBC, ODBC, Python, Go等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
8. 对于Windows上的JDBC, ODBC, Python, Go等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 *C:\Windows\System32*)
9. 如果仍不能排除连接故障
......@@ -70,7 +70,8 @@
10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)
## 6. 遇到错误“Unexpected generic error in RPC”或者"TDengine Error: Unable to resolve FQDN", 我怎么办?
## 6. 遇到错误“Unexpected generic error in RPC”或者“Unable to resolve FQDN”,我怎么办?
产生这个错误,是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用,请做如下检查:
1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)
......@@ -102,7 +103,7 @@ TDengine 目前尚不支持删除功能,未来根据用户需求可能会支
批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。
## 12. windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
## 12. Windows系统下插入的nchar类数据中的汉字被解析成了乱码如何解决?
Windows下插入nchar类的数据中如果有中文,请先确认系统的地区设置成了中国(在Control Panel里可以设置),这时cmd中的`taos`客户端应该已经可以正常工作了;如果是在IDE里开发Java应用,比如Eclipse, Intellij,请确认IDE里的文件编码为GBK(这是Java默认的编码类型),然后在生成Connection时,初始化客户端的配置,具体语句如下:
```JAVA
......@@ -115,15 +116,15 @@ Connection = DriverManager.getConnection(url, properties);
## 13.JDBC报错: the excuted SQL is not a DML or a DDL?
请更新至最新的JDBC驱动
```JAVA
```xml
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.4</version>
<version>2.0.27</version>
</dependency>
```
## 14. taos connect failed, reason: invalid timestamp
## 14. taos connect failed, reason&#58; invalid timestamp
常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。
......@@ -157,7 +158,8 @@ ALTER LOCAL RESETLOG;
其含义是,清空本机所有由客户端生成的日志文件。
## <a class="anchor" id="timezone"></a>18. 时间戳的时区信息是怎样处理的?
<a class="anchor" id="timezone"></a>
## 18. 时间戳的时区信息是怎样处理的?
TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。
......@@ -167,12 +169,13 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。
4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。
## <a class="anchor" id="port"></a>19. TDengine 都会用到哪些网络端口?
<a class="anchor" id="port"></a>
## 19. TDengine 都会用到哪些网络端口?
在 TDengine 2.0 版本中,会用到以下这些网络端口(以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么这里列举的端口都会出现变化),管理员可以参考这里的信息调整防火墙设置:
| 协议 | 默认端口 | 用途说明 | 修改方法 |
| --- | --------- | ------------------------------- | ------------------------------ |
| :--- | :-------- | :---------------------------------- | :------------------------------- |
| TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 |
| TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
| TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 |
......
name: tdengine
base: core18
version: '2.1.0.0'
version: '2.1.1.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
......@@ -73,7 +73,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.1.0.0
- usr/lib/libtaos.so.2.1.1.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
......
......@@ -318,7 +318,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf);
int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, void* buf);
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo);
......
......@@ -62,14 +62,16 @@ typedef struct CChildTableMeta {
int32_t vgId;
STableId id;
uint8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN]; //super table name, not full name
char sTableName[TSDB_TABLE_FNAME_LEN]; // TODO: refactor super table name, not full name
uint64_t suid; // super table id
} CChildTableMeta;
typedef struct STableMeta {
int32_t vgId;
STableId id;
uint8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN];
char sTableName[TSDB_TABLE_FNAME_LEN]; // super table name
uint64_t suid; // super table id
int16_t sversion;
int16_t tversion;
STableComInfo tableInfo;
......
......@@ -94,6 +94,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
pTableMeta->tableType = pTableMetaMsg->tableType;
pTableMeta->vgId = pTableMetaMsg->vgroup.vgId;
pTableMeta->suid = pTableMetaMsg->suid;
pTableMeta->tableInfo = (STableComInfo) {
.numOfTags = pTableMetaMsg->numOfTags,
......
......@@ -2488,13 +2488,11 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pTableMetaInfo->pTableMeta = (STableMeta *)tmp;
memset(pTableMetaInfo->pTableMeta, 0, size);
pTableMetaInfo->tableMetaSize = size;
} else {
memset(pTableMetaInfo->pTableMeta, 0, size);
pTableMetaInfo->tableMetaSize = size;
}
memset(pTableMetaInfo->pTableMeta, 0, size);
pTableMetaInfo->tableMetaSize = size;
pTableMetaInfo->pTableMeta->tableType = -1;
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
......@@ -2510,8 +2508,9 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) {
// in case of child table, here only get the
if (pMeta->tableType == TSDB_CHILD_TABLE) {
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, name, buf);
int32_t code = tscCreateTableMetaFromSTableMeta(pTableMetaInfo->pTableMeta, name, buf);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate);
}
......
......@@ -3862,20 +3862,25 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
CChildTableMeta* cMeta = calloc(1, sizeof(CChildTableMeta));
cMeta->tableType = TSDB_CHILD_TABLE;
cMeta->vgId = pTableMeta->vgId;
cMeta->id = pTableMeta->id;
cMeta->vgId = pTableMeta->vgId;
cMeta->id = pTableMeta->id;
cMeta->suid = pTableMeta->suid;
tstrncpy(cMeta->sTableName, pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
return cMeta;
}
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name, void* buf) {
int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, void* buf) {
assert(pChild != NULL && buf != NULL);
STableMeta* p = buf;
STableMeta* p = buf;
taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1);
if (p->id.uid > 0) { // tableMeta exists, build child table meta and return
// tableMeta exists, build child table meta according to the super table meta
// the uid need to be checked in addition to the general name of the super table.
if (p->id.uid > 0 && pChild->suid == p->id.uid) {
pChild->sversion = p->sversion;
pChild->tversion = p->tversion;
......
......@@ -142,12 +142,15 @@ extern int32_t tsMonitorInterval;
extern int8_t tsEnableStream;
// internal
extern int8_t tsCompactMnodeWal;
extern int8_t tsPrintAuth;
extern int8_t tscEmbedded;
extern char configDir[];
extern char tsVnodeDir[];
extern char tsDnodeDir[];
extern char tsMnodeDir[];
extern char tsMnodeBakDir[];
extern char tsMnodeTmpDir[];
extern char tsDataDir[];
extern char tsLogDir[];
extern char tsScriptDir[];
......
......@@ -176,12 +176,15 @@ int32_t tsMonitorInterval = 30; // seconds
int8_t tsEnableStream = 1;
// internal
int8_t tsCompactMnodeWal = 0;
int8_t tsPrintAuth = 0;
int8_t tscEmbedded = 0;
char configDir[TSDB_FILENAME_LEN] = {0};
char tsVnodeDir[TSDB_FILENAME_LEN] = {0};
char tsDnodeDir[TSDB_FILENAME_LEN] = {0};
char tsMnodeDir[TSDB_FILENAME_LEN] = {0};
char tsMnodeTmpDir[TSDB_FILENAME_LEN] = {0};
char tsMnodeBakDir[TSDB_FILENAME_LEN] = {0};
char tsDataDir[TSDB_FILENAME_LEN] = {0};
char tsScriptDir[TSDB_FILENAME_LEN] = {0};
char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/";
......
......@@ -207,10 +207,69 @@ public class TSDBPreparedStatementTest {
while(rs.next()) {
rows++;
}
Assert.assertEquals(numOfRows, rows);
Assert.assertEquals(numOfRows, rows);
}
}
@Test
public void bindDataSelectColumnTest() throws SQLException {
Statement stmt = conn.createStatement();
int numOfRows = 1000;
for (int loop = 0; loop < 10; loop++){
stmt.execute("drop table if exists weather_test");
stmt.execute("create table weather_test(ts timestamp, f1 nchar(4), f2 float, f3 double, f4 timestamp, f5 int, f6 bool, f7 binary(10))");
TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? (ts, f1, f7) values(?, ?, ?)");
Random r = new Random();
s.setTableName("weather_test");
ArrayList<Long> ts = new ArrayList<Long>();
for(int i = 0; i < numOfRows; i++) {
ts.add(System.currentTimeMillis() + i);
}
s.setTimestamp(0, ts);
int random = 10 + r.nextInt(5);
ArrayList<String> s2 = new ArrayList<String>();
for(int i = 0; i < numOfRows; i++) {
if(i % random == 0) {
s2.add(null);
}else{
s2.add("分支" + i % 4);
}
}
s.setNString(1, s2, 4);
random = 10 + r.nextInt(5);
ArrayList<String> s5 = new ArrayList<String>();
for(int i = 0; i < numOfRows; i++) {
if(i % random == 0) {
s5.add(null);
}else{
s5.add("test" + i % 10);
}
}
s.setString(2, s5, 10);
s.columnDataAddBatch();
s.columnDataExecuteBatch();
s.columnDataCloseBatch();
String sql = "select * from weather_test";
PreparedStatement statement = conn.prepareStatement(sql);
ResultSet rs = statement.executeQuery();
int rows = 0;
while(rs.next()) {
rows++;
}
Assert.assertEquals(numOfRows, rows);
}
}
@Test
public void setBoolean() throws SQLException {
pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis()));
......
......@@ -312,11 +312,7 @@ static int test_sqls_in_stmt(SQLHENV env, SQLHDBC conn, SQLHSTMT stmt, const cha
size_t len = 0;
ssize_t n = 0;
#ifdef _MSC_VER
n = taosGetlineImp(&line, &len, f);
#else
n = getline(&line, &len, f);
#endif
n = tgetline(&line, &len, f);
if (n==-1) break;
const char *p = NULL;
......
......@@ -40,6 +40,7 @@
#include "dnodeShell.h"
#include "dnodeTelemetry.h"
#include "module.h"
#include "mnode.h"
#if !defined(_MODULE) || !defined(_TD_LINUX)
int32_t moduleStart() { return 0; }
......@@ -216,6 +217,17 @@ static int32_t dnodeInitStorage() {
sprintf(tsDnodeDir, "%s/dnode", tsDataDir);
// sprintf(tsVnodeBakDir, "%s/vnode_bak", tsDataDir);
if (tsCompactMnodeWal == 1) {
sprintf(tsMnodeTmpDir, "%s/mnode_tmp", tsDataDir);
tfsRmdir(tsMnodeTmpDir);
if (dnodeCreateDir(tsMnodeTmpDir) < 0) {
dError("failed to create dir: %s, reason: %s", tsMnodeTmpDir, strerror(errno));
return -1;
}
sprintf(tsMnodeBakDir, "%s/mnode_bak", tsDataDir);
//tfsRmdir(tsMnodeBakDir);
}
//TODO(dengyihao): no need to init here
if (dnodeCreateDir(tsMnodeDir) < 0) {
dError("failed to create dir: %s, reason: %s", tsMnodeDir, strerror(errno));
......
......@@ -42,6 +42,8 @@ int32_t main(int32_t argc, char *argv[]) {
}
} else if (strcmp(argv[i], "-C") == 0) {
dump_config = 1;
} else if (strcmp(argv[i], "--compact-mnode-wal") == 0) {
tsCompactMnodeWal = 1;
} else if (strcmp(argv[i], "-V") == 0) {
#ifdef _ACCT
char *versionStr = "enterprise";
......
......@@ -73,6 +73,9 @@ int32_t mnodeProcessPeerReq(SMnodeMsg *pMsg);
void mnodeProcessPeerRsp(SRpcMsg *pMsg);
int32_t mnodeRetriveAuth(char *user, char *spi, char *encrypt, char *secret, char *ckey);
int32_t mnodeCompactWal();
int32_t mnodeCompactComponents();
#ifdef __cplusplus
}
#endif
......
......@@ -54,7 +54,8 @@ void monCleanupSystem();
void monSaveAcctLog(SAcctMonitorObj *pMonObj);
void monSaveLog(int32_t level, const char *const format, ...);
void monExecuteSQL(char *sql);
typedef void (*MonExecuteSQLCbFP)(void *param, TAOS_RES *, int code);
void monExecuteSQLWithResultCallback(char *sql, MonExecuteSQLCbFP callback, void* param);
#ifdef __cplusplus
}
#endif
......
......@@ -427,6 +427,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_FS_INVLD_LEVEL TAOS_DEF_ERROR_CODE(0, 0x2207) //"tfs invalid level")
#define TSDB_CODE_FS_NO_VALID_DISK TAOS_DEF_ERROR_CODE(0, 0x2208) //"tfs no valid disk")
// monitor
#define TSDB_CODE_MON_CONNECTION_INVALID TAOS_DEF_ERROR_CODE(0, 0x2300) //"monitor invalid monitor db connection")
#ifdef __cplusplus
}
#endif
......
{
"filetype":"subscribe",
"filetype": "subscribe",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
......
......@@ -19,6 +19,7 @@
*/
#include <stdint.h>
#include <taos.h>
#define _GNU_SOURCE
#define CURL_STATICLIB
......@@ -61,6 +62,8 @@ extern char configDir[];
#define QUERY_JSON_NAME "query.json"
#define SUBSCRIBE_JSON_NAME "subscribe.json"
#define STR_INSERT_INTO "INSERT INTO "
enum TEST_MODE {
INSERT_TEST, // 0
QUERY_TEST, // 1
......@@ -198,6 +201,8 @@ typedef struct {
} SColDes;
/* Used by main to communicate with parse_opt. */
static char *g_dupstr = NULL;
typedef struct SArguments_S {
char * metaFile;
uint32_t test_mode;
......@@ -225,13 +230,13 @@ typedef struct SArguments_S {
uint32_t num_of_threads;
uint64_t insert_interval;
int64_t query_times;
uint64_t interlace_rows;
uint64_t num_of_RPR; // num_of_records_per_req
uint32_t interlace_rows;
uint32_t num_of_RPR; // num_of_records_per_req
uint64_t max_sql_len;
int64_t num_of_tables;
int64_t num_of_DPT;
int abort;
int disorderRatio; // 0: no disorder, >0: x%
uint32_t disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision
uint32_t method_of_delete;
char ** arg_list;
......@@ -243,7 +248,7 @@ typedef struct SColumn_S {
char field[TSDB_COL_NAME_LEN + 1];
char dataType[MAX_TB_NAME_SIZE];
uint32_t dataLen;
char note[128];
char note[128];
} StrColumn;
typedef struct SSuperTable_S {
......@@ -254,12 +259,12 @@ typedef struct SSuperTable_S {
uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
char childTblPrefix[MAX_TB_NAME_SIZE];
char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample
uint16_t insertMode; // 0: taosc, 1: rest, 2: stmt
uint16_t iface; // 0: taosc, 1: rest, 2: stmt
int64_t childTblLimit;
uint64_t childTblOffset;
// int multiThreadWriteOneTbl; // 0: no, 1: yes
uint64_t interlaceRows; //
uint32_t interlaceRows; //
int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision
uint64_t maxSqlLen; //
......@@ -371,17 +376,20 @@ typedef struct SDbs_S {
typedef struct SpecifiedQueryInfo_S {
uint64_t queryInterval; // 0: unlimit > 0 loop/s
uint64_t concurrent;
uint32_t concurrent;
uint64_t sqlCount;
uint32_t asyncMode; // 0: sync, 1: async
uint64_t subscribeInterval; // ms
uint64_t queryTimes;
int subscribeRestart;
bool subscribeRestart;
int subscribeKeepProgress;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
int resubAfterConsume[MAX_QUERY_SQL_COUNT];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
char topic[MAX_QUERY_SQL_COUNT][32];
int consumed[MAX_QUERY_SQL_COUNT];
TAOS_RES* res[MAX_QUERY_SQL_COUNT];
uint64_t totalQueried;
} SpecifiedQueryInfo;
......@@ -391,7 +399,7 @@ typedef struct SuperQueryInfo_S {
uint32_t threadCnt;
uint32_t asyncMode; // 0: sync, 1: async
uint64_t subscribeInterval; // ms
int subscribeRestart;
bool subscribeRestart;
int subscribeKeepProgress;
uint64_t queryTimes;
int64_t childTblCount;
......@@ -399,7 +407,7 @@ typedef struct SuperQueryInfo_S {
uint64_t sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
int resubAfterConsume[MAX_QUERY_SQL_COUNT];
int resubAfterConsume;
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
char* childTblName;
......@@ -427,7 +435,8 @@ typedef struct SThreadInfo_S {
int threadID;
char db_name[MAX_DB_NAME_SIZE+1];
uint32_t time_precision;
char fp[4096];
char filePath[4096];
FILE *fp;
char tb_prefix[MAX_TB_NAME_SIZE];
uint64_t start_table_from;
uint64_t end_table_to;
......@@ -537,11 +546,14 @@ static int taosRandom()
#endif // ifdef Windows
static void prompt();
static int createDatabasesAndStables();
static void createChildTables();
static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet);
static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port,
char* sqlstr, char *resultFile);
static int postProceSql(char *host, struct sockaddr_in *pServAddr,
uint16_t port, char* sqlstr, threadInfo *pThreadInfo);
static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq,
int disorderRatio, int disorderRange);
/* ************ Global variables ************ */
......@@ -692,8 +704,9 @@ static void printHelp() {
"The data_type of columns, default: INT,INT,INT,INT.");
printf("%s%s%s%s\n", indent, "-w", indent,
"The length of data_type 'BINARY' or 'NCHAR'. Default is 16");
printf("%s%s%s%s\n", indent, "-l", indent,
"The number of columns per record. Default is 4.");
printf("%s%s%s%s%d\n", indent, "-l", indent,
"The number of columns per record. Default is 4. Max values is ",
MAX_NUM_DATATYPE);
printf("%s%s%s%s\n", indent, "-T", indent,
"The number of threads. Default is 10.");
printf("%s%s%s%s\n", indent, "-i", indent,
......@@ -737,7 +750,6 @@ static bool isStringNumber(char *input)
}
static void parse_args(int argc, char *argv[], SArguments *arguments) {
char **sptr;
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-f") == 0) {
......@@ -881,20 +893,31 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
arguments->database = argv[++i];
} else if (strcmp(argv[i], "-l") == 0) {
if ((argc == i+1) ||
(!isStringNumber(argv[i+1]))) {
printHelp();
errorPrint("%s", "\n\t-l need a number following!\n");
exit(EXIT_FAILURE);
if (argc == i+1) {
if (!isStringNumber(argv[i+1])) {
printHelp();
errorPrint("%s", "\n\t-l need a number following!\n");
exit(EXIT_FAILURE);
}
}
arguments->num_of_CPR = atoi(argv[++i]);
if (arguments->num_of_CPR > MAX_NUM_DATATYPE) {
printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_DATATYPE);
prompt();
arguments->num_of_CPR = MAX_NUM_DATATYPE;
}
for (int col = arguments->num_of_CPR; col < MAX_NUM_DATATYPE; col++) {
arguments->datatype[col] = NULL;
}
} else if (strcmp(argv[i], "-b") == 0) {
if (argc == i+1) {
printHelp();
errorPrint("%s", "\n\t-b need valid string following!\n");
exit(EXIT_FAILURE);
}
sptr = arguments->datatype;
++i;
if (strstr(argv[i], ",") == NULL) {
// only one col
......@@ -911,12 +934,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
errorPrint("%s", "-b: Invalid data_type!\n");
exit(EXIT_FAILURE);
}
sptr[0] = argv[i];
arguments->datatype[0] = argv[i];
} else {
// more than one col
int index = 0;
char *dupstr = strdup(argv[i]);
char *running = dupstr;
g_dupstr = strdup(argv[i]);
char *running = g_dupstr;
char *token = strsep(&running, ",");
while(token != NULL) {
if (strcasecmp(token, "INT")
......@@ -929,16 +952,15 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
&& strcasecmp(token, "BINARY")
&& strcasecmp(token, "NCHAR")) {
printHelp();
free(dupstr);
free(g_dupstr);
errorPrint("%s", "-b: Invalid data_type!\n");
exit(EXIT_FAILURE);
}
sptr[index++] = token;
arguments->datatype[index++] = token;
token = strsep(&running, ",");
if (index >= MAX_NUM_DATATYPE) break;
}
free(dupstr);
sptr[index] = NULL;
arguments->datatype[index] = NULL;
}
} else if (strcmp(argv[i], "-w") == 0) {
if ((argc == i+1) ||
......@@ -1051,7 +1073,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
printf("# Insertion interval: %"PRIu64"\n",
arguments->insert_interval);
printf("# Number of records per req: %"PRIu64"\n",
printf("# Number of records per req: %ud\n",
arguments->num_of_RPR);
printf("# Max SQL length: %"PRIu64"\n",
arguments->max_sql_len);
......@@ -1073,10 +1095,8 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
printf("# Print debug info: %d\n", arguments->debug_print);
printf("# Print verbose info: %d\n", arguments->verbose_print);
printf("###################################################################\n");
if (!arguments->answer_yes) {
printf("Press enter key to continue\n\n");
(void) getchar();
}
prompt();
}
}
......@@ -1115,7 +1135,8 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
if (code != 0) {
if (!quiet) {
debugPrint("%s() LN%d - command: %s\n", __func__, __LINE__, command);
errorPrint("Failed to execute %s, reason: %s\n", command, taos_errstr(res));
errorPrint("Failed to execute %s, command length: %d, reason: %s\n",
command, (int)strlen(command), taos_errstr(res));
}
taos_free_result(res);
//taos_close(taos);
......@@ -1132,24 +1153,22 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) {
return 0;
}
static void appendResultBufToFile(char *resultBuf, char *resultFile)
static void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo)
{
FILE *fp = NULL;
if (resultFile[0] != 0) {
fp = fopen(resultFile, "at");
if (fp == NULL) {
pThreadInfo->fp = fopen(pThreadInfo->filePath, "at");
if (pThreadInfo->fp == NULL) {
errorPrint(
"%s() LN%d, failed to open result file: %s, result will not save to file\n",
__func__, __LINE__, resultFile);
__func__, __LINE__, pThreadInfo->filePath);
return;
}
}
fprintf(fp, "%s", resultBuf);
tmfclose(fp);
fprintf(pThreadInfo->fp, "%s", resultBuf);
tmfclose(pThreadInfo->fp);
pThreadInfo->fp = NULL;
}
static void appendResultToFile(TAOS_RES *res, char* resultFile) {
static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
TAOS_ROW row = NULL;
int num_rows = 0;
int num_fields = taos_field_count(res);
......@@ -1167,10 +1186,11 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
// fetch the records row by row
while((row = taos_fetch_row(res))) {
if (totalLen >= 100*1024*1024 - 32000) {
appendResultBufToFile(databuf, resultFile);
totalLen = 0;
memset(databuf, 0, 100*1024*1024);
if ((strlen(pThreadInfo->filePath) > 0)
&& (totalLen >= 100*1024*1024 - 32000)) {
appendResultBufToFile(databuf, pThreadInfo);
totalLen = 0;
memset(databuf, 0, 100*1024*1024);
}
num_rows++;
int len = taos_print_row(temp, row, fields, num_fields);
......@@ -1181,8 +1201,10 @@ static void appendResultToFile(TAOS_RES *res, char* resultFile) {
}
verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n",
__func__, __LINE__, databuf, resultFile);
appendResultBufToFile(databuf, resultFile);
__func__, __LINE__, databuf, pThreadInfo->filePath);
if (strlen(pThreadInfo->filePath) > 0) {
appendResultBufToFile(databuf, pThreadInfo);
}
free(databuf);
}
......@@ -1198,16 +1220,14 @@ static void selectAndGetResult(
return;
}
if ((strlen(pThreadInfo->fp))) {
appendResultToFile(res, pThreadInfo->fp);
}
fetchResult(res, pThreadInfo);
taos_free_result(res);
} else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) {
int retCode = postProceSql(
g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port,
command,
pThreadInfo->fp);
pThreadInfo);
if (0 != retCode) {
printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID);
}
......@@ -1334,16 +1354,18 @@ static int printfInsertMeta() {
printf("interface: \033[33m%s\033[0m\n",
(g_args.iface==TAOSC_IFACE)?"taosc":(g_args.iface==REST_IFACE)?"rest":"stmt");
printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port);
printf("host: \033[33m%s:%u\033[0m\n",
g_Dbs.host, g_Dbs.port);
printf("user: \033[33m%s\033[0m\n", g_Dbs.user);
printf("password: \033[33m%s\033[0m\n", g_Dbs.password);
printf("configDir: \033[33m%s\033[0m\n", configDir);
printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile);
printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount);
printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl);
printf("thread num of create table: \033[33m%d\033[0m\n",
g_Dbs.threadCountByCreateTbl);
printf("top insert interval: \033[33m%"PRIu64"\033[0m\n",
g_args.insert_interval);
printf("number of records per req: \033[33m%"PRIu64"\033[0m\n",
printf("number of records per req: \033[33m%ud\033[0m\n",
g_args.num_of_RPR);
printf("max sql length: \033[33m%"PRIu64"\033[0m\n",
g_args.max_sql_len);
......@@ -1352,7 +1374,8 @@ static int printfInsertMeta() {
for (int i = 0; i < g_Dbs.dbCount; i++) {
printf("database[\033[33m%d\033[0m]:\n", i);
printf(" database[%d] name: \033[33m%s\033[0m\n", i, g_Dbs.db[i].dbName);
printf(" database[%d] name: \033[33m%s\033[0m\n",
i, g_Dbs.db[i].dbName);
if (0 == g_Dbs.db[i].drop) {
printf(" drop: \033[33mno\033[0m\n");
} else {
......@@ -1360,40 +1383,51 @@ static int printfInsertMeta() {
}
if (g_Dbs.db[i].dbCfg.blocks > 0) {
printf(" blocks: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.blocks);
printf(" blocks: \033[33m%d\033[0m\n",
g_Dbs.db[i].dbCfg.blocks);
}
if (g_Dbs.db[i].dbCfg.cache > 0) {
printf(" cache: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.cache);
printf(" cache: \033[33m%d\033[0m\n",
g_Dbs.db[i].dbCfg.cache);
}
if (g_Dbs.db[i].dbCfg.days > 0) {
printf(" days: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.days);
printf(" days: \033[33m%d\033[0m\n",
g_Dbs.db[i].dbCfg.days);
}
if (g_Dbs.db[i].dbCfg.keep > 0) {
printf(" keep: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.keep);
printf(" keep: \033[33m%d\033[0m\n",
g_Dbs.db[i].dbCfg.keep);
}
if (g_Dbs.db[i].dbCfg.replica > 0) {
printf(" replica: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.replica);
printf(" replica: \033[33m%d\033[0m\n",
g_Dbs.db[i].dbCfg.replica);
}
if (g_Dbs.db[i].dbCfg.update > 0) {
printf(" update: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.update);
printf(" update: \033[33m%d\033[0m\n",
g_Dbs.db[i].dbCfg.update);
}
if (g_Dbs.db[i].dbCfg.minRows > 0) {
printf(" minRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.minRows);
printf(" minRows: \033[33m%d\033[0m\n",
g_Dbs.db[i].dbCfg.minRows);
}
if (g_Dbs.db[i].dbCfg.maxRows > 0) {
printf(" maxRows: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.maxRows);
printf(" maxRows: \033[33m%d\033[0m\n",
g_Dbs.db[i].dbCfg.maxRows);
}
if (g_Dbs.db[i].dbCfg.comp > 0) {
printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp);
}
if (g_Dbs.db[i].dbCfg.walLevel > 0) {
printf(" walLevel: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.walLevel);
printf(" walLevel: \033[33m%d\033[0m\n",
g_Dbs.db[i].dbCfg.walLevel);
}
if (g_Dbs.db[i].dbCfg.fsync > 0) {
printf(" fsync: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.fsync);
printf(" fsync: \033[33m%d\033[0m\n",
g_Dbs.db[i].dbCfg.fsync);
}
if (g_Dbs.db[i].dbCfg.quorum > 0) {
printf(" quorum: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.quorum);
printf(" quorum: \033[33m%d\033[0m\n",
g_Dbs.db[i].dbCfg.quorum);
}
if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
......@@ -1437,9 +1471,9 @@ static int printfInsertMeta() {
g_Dbs.db[i].superTbls[j].childTblPrefix);
printf(" dataSource: \033[33m%s\033[0m\n",
g_Dbs.db[i].superTbls[j].dataSource);
printf(" insertMode: \033[33m%s\033[0m\n",
(g_Dbs.db[i].superTbls[j].insertMode==TAOSC_IFACE)?"taosc":
(g_Dbs.db[i].superTbls[j].insertMode==REST_IFACE)?"rest":"stmt");
printf(" iface: \033[33m%s\033[0m\n",
(g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc":
(g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt");
if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) {
printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblLimit);
......@@ -1457,7 +1491,7 @@ static int printfInsertMeta() {
printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
}
*/
printf(" interlaceRows: \033[33m%"PRIu64"\033[0m\n",
printf(" interlaceRows: \033[33m%ud\033[0m\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
......@@ -1535,7 +1569,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
fprintf(fp, "number of records per req: %"PRIu64"\n", g_args.num_of_RPR);
fprintf(fp, "number of records per req: %ud\n", g_args.num_of_RPR);
fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len);
fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
......@@ -1587,21 +1621,26 @@ static void printfInsertMetaToFile(FILE* fp) {
if (g_Dbs.db[i].dbCfg.precision[0] != 0) {
if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2))
|| (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) {
fprintf(fp, " precision: %s\n", g_Dbs.db[i].dbCfg.precision);
fprintf(fp, " precision: %s\n",
g_Dbs.db[i].dbCfg.precision);
} else {
fprintf(fp, " precision error: %s\n", g_Dbs.db[i].dbCfg.precision);
fprintf(fp, " precision error: %s\n",
g_Dbs.db[i].dbCfg.precision);
}
}
fprintf(fp, " super table count: %"PRIu64"\n", g_Dbs.db[i].superTblCount);
for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
fprintf(fp, " super table[%"PRIu64"]:\n", j);
fprintf(fp, " super table count: %"PRIu64"\n",
g_Dbs.db[i].superTblCount);
for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
fprintf(fp, " super table[%d]:\n", j);
fprintf(fp, " stbName: %s\n", g_Dbs.db[i].superTbls[j].sTblName);
fprintf(fp, " stbName: %s\n",
g_Dbs.db[i].superTbls[j].sTblName);
if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
fprintf(fp, " autoCreateTable: %s\n", "no");
} else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) {
} else if (AUTO_CREATE_SUBTBL
== g_Dbs.db[i].superTbls[j].autoCreateTable) {
fprintf(fp, " autoCreateTable: %s\n", "yes");
} else {
fprintf(fp, " autoCreateTable: %s\n", "error");
......@@ -1609,7 +1648,8 @@ static void printfInsertMetaToFile(FILE* fp) {
if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
fprintf(fp, " childTblExists: %s\n", "no");
} else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) {
} else if (TBL_ALREADY_EXISTS
== g_Dbs.db[i].superTbls[j].childTblExists) {
fprintf(fp, " childTblExists: %s\n", "yes");
} else {
fprintf(fp, " childTblExists: %s\n", "error");
......@@ -1621,12 +1661,12 @@ static void printfInsertMetaToFile(FILE* fp) {
g_Dbs.db[i].superTbls[j].childTblPrefix);
fprintf(fp, " dataSource: %s\n",
g_Dbs.db[i].superTbls[j].dataSource);
fprintf(fp, " insertMode: %s\n",
(g_Dbs.db[i].superTbls[j].insertMode==TAOSC_IFACE)?"taosc":
(g_Dbs.db[i].superTbls[j].insertMode==REST_IFACE)?"rest":"stmt");
fprintf(fp, " iface: %s\n",
(g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc":
(g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt");
fprintf(fp, " insertRows: %"PRId64"\n",
g_Dbs.db[i].superTbls[j].insertRows);
fprintf(fp, " interlace rows: %"PRIu64"\n",
fprintf(fp, " interlace rows: %ud\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
fprintf(fp, " stable insert interval: %"PRIu64"\n",
......@@ -1639,7 +1679,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " multiThreadWriteOneTbl: yes\n");
}
*/
fprintf(fp, " interlaceRows: %"PRIu64"\n",
fprintf(fp, " interlaceRows: %ud\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
fprintf(fp, " disorderRange: %d\n",
g_Dbs.db[i].superTbls[j].disorderRange);
......@@ -1666,7 +1706,8 @@ static void printfInsertMetaToFile(FILE* fp) {
if ((0 == strncasecmp(
g_Dbs.db[i].superTbls[j].columns[k].dataType,
"binary", strlen("binary")))
|| (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType,
|| (0 == strncasecmp(
g_Dbs.db[i].superTbls[j].columns[k].dataType,
"nchar", strlen("nchar")))) {
fprintf(fp, "column[%d]:%s(%d) ", k,
g_Dbs.db[i].superTbls[j].columns[k].dataType,
......@@ -1686,7 +1727,8 @@ static void printfInsertMetaToFile(FILE* fp) {
"binary", strlen("binary")))
|| (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType,
"nchar", strlen("nchar")))) {
fprintf(fp, "tag[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType,
fprintf(fp, "tag[%d]:%s(%d) ",
k, g_Dbs.db[i].superTbls[j].tags[k].dataType,
g_Dbs.db[i].superTbls[j].tags[k].dataLen);
} else {
fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType);
......@@ -1722,7 +1764,7 @@ static void printfQueryMeta() {
printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
g_queryInfo.specifiedQueryInfo.queryInterval);
printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
printf("concurrent: \033[33m%"PRIu64"\033[0m\n",
printf("concurrent: \033[33m%d\033[0m\n",
g_queryInfo.specifiedQueryInfo.concurrent);
printf("mod: \033[33m%s\033[0m\n",
(g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync");
......@@ -2019,13 +2061,13 @@ static void printfQuerySystemInfo(TAOS * taos) {
// show variables
res = taos_query(taos, "show variables;");
//appendResultToFile(res, filename);
//fetchResult(res, filename);
xDumpResultToFile(filename, res);
// show dnodes
res = taos_query(taos, "show dnodes;");
xDumpResultToFile(filename, res);
//appendResultToFile(res, filename);
//fetchResult(res, filename);
// show databases
res = taos_query(taos, "show databases;");
......@@ -2061,7 +2103,7 @@ static void printfQuerySystemInfo(TAOS * taos) {
}
static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port,
char* sqlstr, char *resultFile)
char* sqlstr, threadInfo *pThreadInfo)
{
char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s";
......@@ -2197,8 +2239,8 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port
response_buf[RESP_BUF_LEN - 1] = '\0';
printf("Response:\n%s\n", response_buf);
if (resultFile) {
appendResultBufToFile(response_buf, resultFile);
if (strlen(pThreadInfo->filePath) > 0) {
appendResultBufToFile(response_buf, pThreadInfo);
}
free(request_buf);
......@@ -2960,19 +3002,19 @@ static int startMultiThreadCreateChildTable(
int64_t b = 0;
b = ntables % threads;
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infos + i;
t_info->threadID = i;
tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
t_info->superTblInfo = superTblInfo;
for (int64_t i = 0; i < threads; i++) {
threadInfo *pThreadInfo = infos + i;
pThreadInfo->threadID = i;
tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE);
pThreadInfo->superTblInfo = superTblInfo;
verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name);
t_info->taos = taos_connect(
pThreadInfo->taos = taos_connect(
g_Dbs.host,
g_Dbs.user,
g_Dbs.password,
db_name,
g_Dbs.port);
if (t_info->taos == NULL) {
if (pThreadInfo->taos == NULL) {
errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n",
__func__, __LINE__, taos_errstr(NULL));
free(pids);
......@@ -2980,14 +3022,14 @@ static int startMultiThreadCreateChildTable(
return -1;
}
t_info->start_table_from = startFrom;
t_info->ntables = i<b?a+1:a;
t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
startFrom = t_info->end_table_to + 1;
t_info->use_metric = true;
t_info->cols = cols;
t_info->minDelay = UINT64_MAX;
pthread_create(pids + i, NULL, createTable, t_info);
pThreadInfo->start_table_from = startFrom;
pThreadInfo->ntables = i<b?a+1:a;
pThreadInfo->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
startFrom = pThreadInfo->end_table_to + 1;
pThreadInfo->use_metric = true;
pThreadInfo->cols = cols;
pThreadInfo->minDelay = UINT64_MAX;
pthread_create(pids + i, NULL, createTable, pThreadInfo);
}
for (int i = 0; i < threads; i++) {
......@@ -2995,8 +3037,8 @@ static int startMultiThreadCreateChildTable(
}
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infos + i;
taos_close(t_info->taos);
threadInfo *pThreadInfo = infos + i;
taos_close(pThreadInfo->taos);
}
free(pids);
......@@ -3457,19 +3499,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
}
g_args.interlace_rows = interlaceRows->valueint;
// rows per table need be less than insert batch
if (g_args.interlace_rows > g_args.num_of_RPR) {
printf("NOTICE: interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
g_args.interlace_rows, g_args.num_of_RPR);
printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
g_args.num_of_RPR);
if (!g_args.answer_yes) {
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
}
g_args.interlace_rows = g_args.num_of_RPR;
}
} else if (!interlaceRows) {
g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req
} else {
......@@ -3501,6 +3530,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
__func__, __LINE__);
goto PARSE_OVER;
} else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) {
printf("NOTICE: number of records per request value %"PRIu64" > %d\n\n",
numRecPerReq->valueint, MAX_RECORDS_PER_REQ);
printf(" number of records per request value will be set to %d\n\n",
MAX_RECORDS_PER_REQ);
prompt();
numRecPerReq->valueint = MAX_RECORDS_PER_REQ;
}
g_args.num_of_RPR = numRecPerReq->valueint;
......@@ -3524,12 +3558,22 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_args.answer_yes = false;
}
} else if (!answerPrompt) {
g_args.answer_yes = false;
g_args.answer_yes = true; // default is no, mean answer_yes.
} else {
printf("ERROR: failed to read json, confirm_parameter_prompt not found\n");
errorPrint("%s", "failed to read json, confirm_parameter_prompt input mistake\n");
goto PARSE_OVER;
}
// rows per table need be less than insert batch
if (g_args.interlace_rows > g_args.num_of_RPR) {
printf("NOTICE: interlace rows value %ud > num_of_records_per_req %ud\n\n",
g_args.interlace_rows, g_args.num_of_RPR);
printf(" interlace rows value will be set to num_of_records_per_req %ud\n\n",
g_args.num_of_RPR);
prompt();
g_args.interlace_rows = g_args.num_of_RPR;
}
cJSON* dbs = cJSON_GetObjectItem(root, "databases");
if (!dbs || dbs->type != cJSON_Array) {
printf("ERROR: failed to read json, databases not found\n");
......@@ -3839,22 +3883,22 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , rest, stmt
if (insertMode && insertMode->type == cJSON_String
&& insertMode->valuestring != NULL) {
if (0 == strcasecmp(insertMode->valuestring, "taosc")) {
g_Dbs.db[i].superTbls[j].insertMode = TAOSC_IFACE;
} else if (0 == strcasecmp(insertMode->valuestring, "rest")) {
g_Dbs.db[i].superTbls[j].insertMode = REST_IFACE;
} else if (0 == strcasecmp(insertMode->valuestring, "stmt")) {
g_Dbs.db[i].superTbls[j].insertMode = STMT_IFACE;
cJSON *stbIface = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , rest, stmt
if (stbIface && stbIface->type == cJSON_String
&& stbIface->valuestring != NULL) {
if (0 == strcasecmp(stbIface->valuestring, "taosc")) {
g_Dbs.db[i].superTbls[j].iface= TAOSC_IFACE;
} else if (0 == strcasecmp(stbIface->valuestring, "rest")) {
g_Dbs.db[i].superTbls[j].iface= REST_IFACE;
} else if (0 == strcasecmp(stbIface->valuestring, "stmt")) {
g_Dbs.db[i].superTbls[j].iface= STMT_IFACE;
} else {
errorPrint("%s() LN%d, failed to read json, insert_mode %s not recognized\n",
__func__, __LINE__, insertMode->valuestring);
__func__, __LINE__, stbIface->valuestring);
goto PARSE_OVER;
}
} else if (!insertMode) {
g_Dbs.db[i].superTbls[j].insertMode = TAOSC_IFACE;
} else if (!stbIface) {
g_Dbs.db[i].superTbls[j].iface = TAOSC_IFACE;
} else {
errorPrint("%s", "failed to read json, insert_mode not found\n");
goto PARSE_OVER;
......@@ -3991,14 +4035,12 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
g_Dbs.db[i].superTbls[j].interlaceRows = stbInterlaceRows->valueint;
// rows per table need be less than insert batch
if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) {
printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR);
printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %ud > num_of_records_per_req %ud\n\n",
i, j, g_Dbs.db[i].superTbls[j].interlaceRows,
g_args.num_of_RPR);
if (!g_args.answer_yes) {
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
}
printf(" interlace rows value will be set to num_of_records_per_req %ud\n\n",
g_args.num_of_RPR);
prompt();
g_Dbs.db[i].superTbls[j].interlaceRows = g_args.num_of_RPR;
}
} else if (!stbInterlaceRows) {
......@@ -4194,7 +4236,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
"query_times");
if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) {
if (specifiedQueryTimes->valueint <= 0) {
errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
errorPrint(
"%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n",
__func__, __LINE__, specifiedQueryTimes->valueint);
goto PARSE_OVER;
......@@ -4211,7 +4254,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent");
if (concurrent && concurrent->type == cJSON_Number) {
if (concurrent->valueint <= 0) {
errorPrint("%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n",
errorPrint(
"%s() LN%d, query sqlCount %"PRIu64" or concurrent %d is not correct.\n",
__func__, __LINE__,
g_queryInfo.specifiedQueryInfo.sqlCount,
g_queryInfo.specifiedQueryInfo.concurrent);
......@@ -4250,15 +4294,15 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* restart = cJSON_GetObjectItem(specifiedQuery, "restart");
if (restart && restart->type == cJSON_String && restart->valuestring != NULL) {
if (0 == strcmp("yes", restart->valuestring)) {
g_queryInfo.specifiedQueryInfo.subscribeRestart = 1;
g_queryInfo.specifiedQueryInfo.subscribeRestart = true;
} else if (0 == strcmp("no", restart->valuestring)) {
g_queryInfo.specifiedQueryInfo.subscribeRestart = 0;
g_queryInfo.specifiedQueryInfo.subscribeRestart = false;
} else {
printf("ERROR: failed to read json, subscribe restart error\n");
goto PARSE_OVER;
}
} else {
g_queryInfo.specifiedQueryInfo.subscribeRestart = 1;
g_queryInfo.specifiedQueryInfo.subscribeRestart = true;
}
cJSON* keepProgress = cJSON_GetObjectItem(specifiedQuery, "keepProgress");
......@@ -4278,24 +4322,28 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
// sqls
cJSON* superSqls = cJSON_GetObjectItem(specifiedQuery, "sqls");
if (!superSqls) {
cJSON* specifiedSqls = cJSON_GetObjectItem(specifiedQuery, "sqls");
if (!specifiedSqls) {
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (superSqls->type != cJSON_Array) {
} else if (specifiedSqls->type != cJSON_Array) {
errorPrint("%s() LN%d, failed to read json, super sqls not found\n",
__func__, __LINE__);
goto PARSE_OVER;
} else {
int superSqlSize = cJSON_GetArraySize(superSqls);
if (superSqlSize > MAX_QUERY_SQL_COUNT) {
errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n",
__func__, __LINE__, MAX_QUERY_SQL_COUNT);
int superSqlSize = cJSON_GetArraySize(specifiedSqls);
if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent
> MAX_QUERY_SQL_COUNT) {
errorPrint("%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n",
__func__, __LINE__,
superSqlSize,
g_queryInfo.specifiedQueryInfo.concurrent,
MAX_QUERY_SQL_COUNT);
goto PARSE_OVER;
}
g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize;
for (int j = 0; j < superSqlSize; ++j) {
cJSON* sql = cJSON_GetArrayItem(superSqls, j);
cJSON* sql = cJSON_GetArrayItem(specifiedSqls, j);
if (sql == NULL) continue;
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
......@@ -4303,7 +4351,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
printf("ERROR: failed to read json, sql not found\n");
goto PARSE_OVER;
}
tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j],
sqlStr->valuestring, MAX_QUERY_SQL_LENGTH);
cJSON* resubAfterConsume =
cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume");
......@@ -4318,10 +4367,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
}
cJSON *result = cJSON_GetObjectItem(sql, "result");
if (NULL != result && result->type == cJSON_String && result->valuestring != NULL) {
tstrncpy(g_queryInfo.specifiedQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN);
if ((NULL != result) && (result->type == cJSON_String)
&& (result->valuestring != NULL)) {
tstrncpy(g_queryInfo.specifiedQueryInfo.result[j],
result->valuestring, MAX_FILE_NAME_LEN);
} else if (NULL == result) {
memset(g_queryInfo.specifiedQueryInfo.result[j], 0, MAX_FILE_NAME_LEN);
memset(g_queryInfo.specifiedQueryInfo.result[j],
0, MAX_FILE_NAME_LEN);
} else {
printf("ERROR: failed to read json, super query result file not found\n");
goto PARSE_OVER;
......@@ -4428,43 +4480,55 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if (subrestart && subrestart->type == cJSON_String
&& subrestart->valuestring != NULL) {
if (0 == strcmp("yes", subrestart->valuestring)) {
g_queryInfo.superQueryInfo.subscribeRestart = 1;
g_queryInfo.superQueryInfo.subscribeRestart = true;
} else if (0 == strcmp("no", subrestart->valuestring)) {
g_queryInfo.superQueryInfo.subscribeRestart = 0;
g_queryInfo.superQueryInfo.subscribeRestart = false;
} else {
printf("ERROR: failed to read json, subscribe restart error\n");
goto PARSE_OVER;
}
} else {
g_queryInfo.superQueryInfo.subscribeRestart = 1;
g_queryInfo.superQueryInfo.subscribeRestart = true;
}
cJSON* subkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress");
if (subkeepProgress &&
subkeepProgress->type == cJSON_String
&& subkeepProgress->valuestring != NULL) {
if (0 == strcmp("yes", subkeepProgress->valuestring)) {
cJSON* superkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress");
if (superkeepProgress &&
superkeepProgress->type == cJSON_String
&& superkeepProgress->valuestring != NULL) {
if (0 == strcmp("yes", superkeepProgress->valuestring)) {
g_queryInfo.superQueryInfo.subscribeKeepProgress = 1;
} else if (0 == strcmp("no", subkeepProgress->valuestring)) {
} else if (0 == strcmp("no", superkeepProgress->valuestring)) {
g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
} else {
printf("ERROR: failed to read json, subscribe keepProgress error\n");
printf("ERROR: failed to read json, subscribe super table keepProgress error\n");
goto PARSE_OVER;
}
} else {
g_queryInfo.superQueryInfo.subscribeKeepProgress = 0;
}
// sqls
cJSON* subsqls = cJSON_GetObjectItem(superQuery, "sqls");
if (!subsqls) {
cJSON* superResubAfterConsume =
cJSON_GetObjectItem(superQuery, "resubAfterConsume");
if (superResubAfterConsume
&& superResubAfterConsume->type == cJSON_Number) {
g_queryInfo.superQueryInfo.resubAfterConsume =
superResubAfterConsume->valueint;
} else if (!superResubAfterConsume) {
//printf("failed to read json, subscribe interval no found\n");
////goto PARSE_OVER;
g_queryInfo.superQueryInfo.resubAfterConsume = 1;
}
// supert table sqls
cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls");
if (!superSqls) {
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (subsqls->type != cJSON_Array) {
} else if (superSqls->type != cJSON_Array) {
errorPrint("%s() LN%d: failed to read json, super sqls not found\n",
__func__, __LINE__);
goto PARSE_OVER;
} else {
int superSqlSize = cJSON_GetArraySize(subsqls);
int superSqlSize = cJSON_GetArraySize(superSqls);
if (superSqlSize > MAX_QUERY_SQL_COUNT) {
errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n",
__func__, __LINE__, MAX_QUERY_SQL_COUNT);
......@@ -4473,7 +4537,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.superQueryInfo.sqlCount = superSqlSize;
for (int j = 0; j < superSqlSize; ++j) {
cJSON* sql = cJSON_GetArrayItem(subsqls, j);
cJSON* sql = cJSON_GetArrayItem(superSqls, j);
if (sql == NULL) continue;
cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql");
......@@ -4486,18 +4550,6 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring,
MAX_QUERY_SQL_LENGTH);
cJSON* superResubAfterConsume =
cJSON_GetObjectItem(sql, "resubAfterConsume");
if (superResubAfterConsume
&& superResubAfterConsume->type == cJSON_Number) {
g_queryInfo.superQueryInfo.resubAfterConsume[j] =
superResubAfterConsume->valueint;
} else if (!superResubAfterConsume) {
//printf("failed to read json, subscribe interval no found\n");
//goto PARSE_OVER;
g_queryInfo.superQueryInfo.resubAfterConsume[j] = 1;
}
cJSON *result = cJSON_GetObjectItem(sql, "result");
if (result != NULL && result->type == cJSON_String
&& result->valuestring != NULL){
......@@ -4816,11 +4868,11 @@ static int64_t execInsert(threadInfo *pThreadInfo, uint64_t k)
verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID,
__func__, __LINE__, pThreadInfo->buffer);
if (superTblInfo) {
if (superTblInfo->insertMode == TAOSC_IFACE) {
if (superTblInfo->iface == TAOSC_IFACE) {
affectedRows = queryDbExec(
pThreadInfo->taos,
pThreadInfo->buffer, INSERT_TYPE, false);
} else if (superTblInfo->insertMode == REST_IFACE) {
} else if (superTblInfo->iface == REST_IFACE) {
if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port,
pThreadInfo->buffer, NULL /* not set result file */)) {
affectedRows = -1;
......@@ -4829,7 +4881,7 @@ static int64_t execInsert(threadInfo *pThreadInfo, uint64_t k)
} else {
affectedRows = k;
}
} else if (superTblInfo->insertMode == STMT_IFACE) {
} else if (superTblInfo->iface == STMT_IFACE) {
debugPrint("%s() LN%d, stmt=%p", __func__, __LINE__, pThreadInfo->stmt);
if (0 != taos_stmt_execute(pThreadInfo->stmt)) {
errorPrint("%s() LN%d, failied to execute insert statement\n",
......@@ -4840,7 +4892,7 @@ static int64_t execInsert(threadInfo *pThreadInfo, uint64_t k)
affectedRows = k;
} else {
errorPrint("%s() LN%d: unknown insert mode: %d\n",
__func__, __LINE__, superTblInfo->insertMode);
__func__, __LINE__, superTblInfo->iface);
affectedRows = 0;
}
} else {
......@@ -4850,10 +4902,12 @@ static int64_t execInsert(threadInfo *pThreadInfo, uint64_t k)
return affectedRows;
}
static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t tableSeq)
static void getTableName(char *pTblName,
threadInfo* pThreadInfo, uint64_t tableSeq)
{
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
if (superTblInfo) {
if ((superTblInfo)
&& (AUTO_CREATE_SUBTBL != superTblInfo->autoCreateTable)) {
if (superTblInfo->childTblLimit > 0) {
snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
superTblInfo->childTblName +
......@@ -4874,7 +4928,7 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t table
}
static int64_t generateDataTailWithoutStb(
uint64_t batch, char* buffer,
uint32_t batch, char* buffer,
int64_t remainderBufLen, int64_t insertRows,
uint64_t startFrom, int64_t startTime,
/* int64_t *pSamplePos, */int64_t *dataLen) {
......@@ -4882,7 +4936,7 @@ static int64_t generateDataTailWithoutStb(
uint64_t len = 0;
char *pstr = buffer;
verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch);
verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch);
int64_t k = 0;
for (k = 0; k < batch;) {
......@@ -4894,22 +4948,11 @@ static int64_t generateDataTailWithoutStb(
char **data_type = g_args.datatype;
int lenOfBinary = g_args.len_of_binary;
int64_t randTail = DEFAULT_TIMESTAMP_STEP * k;
if (g_args.disorderRatio != 0) {
int rand_num = taosRandom() % 100;
if (rand_num < g_args.disorderRatio) {
randTail = (randTail +
(taosRandom() % g_args.disorderRange + 1)) * (-1);
debugPrint("rand data generated, back %"PRId64"\n", randTail);
}
} else {
randTail = DEFAULT_TIMESTAMP_STEP * k;
}
retLen = generateData(data, data_type,
startTime + randTail,
startTime + getTSRandTail(
(int64_t)DEFAULT_TIMESTAMP_STEP, k,
g_args.disorderRatio,
g_args.disorderRange),
lenOfBinary);
if (len > remainderBufLen)
......@@ -4934,9 +4977,25 @@ static int64_t generateDataTailWithoutStb(
return k;
}
static int64_t generateStbDataTail(
static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq,
int disorderRatio, int disorderRange)
{
int64_t randTail = timeStampStep * seq;
if (disorderRatio > 0) {
int rand_num = taosRandom() % 100;
if(rand_num < disorderRatio) {
randTail = (randTail +
(taosRandom() % disorderRange + 1)) * (-1);
debugPrint("rand data generated, back %"PRId64"\n", randTail);
}
}
return randTail;
}
static int32_t generateStbDataTail(
SSuperTable* superTblInfo,
uint64_t batch, char* buffer,
uint32_t batch, char* buffer,
int64_t remainderBufLen, int64_t insertRows,
uint64_t startFrom, int64_t startTime,
int64_t *pSamplePos, int64_t *dataLen) {
......@@ -4944,37 +5003,35 @@ static int64_t generateStbDataTail(
char *pstr = buffer;
verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch);
bool tsRand;
if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) {
tsRand = true;
} else {
tsRand = false;
}
verbosePrint("%s() LN%d batch=%ud\n", __func__, __LINE__, batch);
int64_t k = 0;
int32_t k = 0;
for (k = 0; k < batch;) {
char data[MAX_DATA_SIZE];
memset(data, 0, MAX_DATA_SIZE);
int64_t retLen = 0;
if (0 == strncasecmp(superTblInfo->dataSource,
"sample", strlen("sample"))) {
retLen = getRowDataFromSample(
if (tsRand) {
retLen = generateStbRowData(superTblInfo, data,
startTime + getTSRandTail(
superTblInfo->timeStampStep, k,
superTblInfo->disorderRatio,
superTblInfo->disorderRange)
);
} else {
retLen = getRowDataFromSample(
data,
remainderBufLen,
startTime + superTblInfo->timeStampStep * k,
superTblInfo,
pSamplePos);
} else if (0 == strncasecmp(superTblInfo->dataSource,
"rand", strlen("rand"))) {
int64_t randTail = superTblInfo->timeStampStep * k;
if (superTblInfo->disorderRatio > 0) {
int rand_num = taosRandom() % 100;
if(rand_num < superTblInfo->disorderRatio) {
randTail = (randTail +
(taosRandom() % superTblInfo->disorderRange + 1)) * (-1);
debugPrint("rand data generated, back %"PRId64"\n", randTail);
}
}
int64_t d = startTime + randTail;
retLen = generateStbRowData(superTblInfo, data, d);
}
if (retLen > remainderBufLen) {
......@@ -4986,7 +5043,7 @@ static int64_t generateStbDataTail(
len += retLen;
remainderBufLen -= retLen;
verbosePrint("%s() LN%d len=%"PRIu64" k=%"PRIu64" \nbuffer=%s\n",
verbosePrint("%s() LN%d len=%"PRIu64" k=%ud \nbuffer=%s\n",
__func__, __LINE__, len, k, buffer);
startFrom ++;
......@@ -5083,8 +5140,11 @@ static int generateStbSQLHead(
return len;
}
static int64_t generateInterlaceDataBuffer(
char *tableName, uint64_t batchPerTbl, uint64_t i, uint64_t batchPerTblTimes,
static int32_t generateStbInterlaceData(
SSuperTable *superTblInfo,
char *tableName, uint32_t batchPerTbl,
uint64_t i,
uint32_t batchPerTblTimes,
uint64_t tableSeq,
threadInfo *pThreadInfo, char *buffer,
int64_t insertRows,
......@@ -5093,12 +5153,11 @@ static int64_t generateInterlaceDataBuffer(
{
assert(buffer);
char *pstr = buffer;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
int headLen = generateStbSQLHead(
superTblInfo,
tableName, tableSeq, pThreadInfo->db_name,
pstr, *pRemainderBufLen);
pstr, *pRemainderBufLen);
if (headLen <= 0) {
return 0;
......@@ -5112,34 +5171,68 @@ static int64_t generateInterlaceDataBuffer(
int64_t dataLen = 0;
verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%"PRIu64" batchPerTbl = %"PRIu64"\n",
verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%ud batchPerTbl = %ud\n",
pThreadInfo->threadID, __func__, __LINE__,
i, batchPerTblTimes, batchPerTbl);
int64_t k;
if (superTblInfo) {
if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) {
if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) {
startTime = taosGetTimestamp(pThreadInfo->time_precision);
}
}
k = generateStbDataTail(
int32_t k = generateStbDataTail(
superTblInfo,
batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0,
startTime,
&(pThreadInfo->samplePos), &dataLen);
if (k == batchPerTbl) {
pstr += dataLen;
*pRemainderBufLen -= dataLen;
} else {
startTime = 1500000000000;
k = generateDataTailWithoutStb(
debugPrint("%s() LN%d, generated data tail: %ud, not equal batch per table: %ud\n",
__func__, __LINE__, k, batchPerTbl);
pstr -= headLen;
pstr[0] = '\0';
k = 0;
}
return k;
}
static int64_t generateInterlaceDataWithoutStb(
char *tableName, uint32_t batchPerTbl,
uint64_t tableSeq,
char *dbName, char *buffer,
int64_t insertRows,
uint64_t *pRemainderBufLen)
{
assert(buffer);
char *pstr = buffer;
int headLen = generateSQLHeadWithoutStb(
tableName, dbName,
pstr, *pRemainderBufLen);
if (headLen <= 0) {
return 0;
}
pstr += headLen;
*pRemainderBufLen -= headLen;
int64_t dataLen = 0;
int64_t startTime = 1500000000000;
int64_t k = generateDataTailWithoutStb(
batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0,
startTime,
/* &(pThreadInfo->samplePos), */&dataLen);
}
&dataLen);
if (k == batchPerTbl) {
pstr += dataLen;
*pRemainderBufLen -= dataLen;
} else {
debugPrint("%s() LN%d, generated data tail: %"PRIu64", not equal batch per table: %"PRIu64"\n",
debugPrint("%s() LN%d, generated data tail: %"PRIu64", not equal batch per table: %ud\n",
__func__, __LINE__, k, batchPerTbl);
pstr -= headLen;
pstr[0] = '\0';
......@@ -5149,33 +5242,88 @@ static int64_t generateInterlaceDataBuffer(
return k;
}
static int64_t generateProgressiveDataBuffer(
static int32_t prepareStbStmt(SSuperTable *stbInfo,
TAOS_STMT *stmt,
char *tableName, uint32_t batch, uint64_t insertRows,
int64_t startTime, char *buffer)
{
uint32_t k;
int ret;
char *pstr = buffer;
pstr += sprintf(pstr, "INSERT INTO %s values(?", tableName);
for (int i = 0; i < stbInfo->columnCount; i++) {
pstr += sprintf(pstr, ",?");
}
pstr += sprintf(pstr, ")");
ret = taos_stmt_prepare(stmt, buffer, 0);
if (ret != 0){
errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n",
ret, taos_errstr(NULL));
return ret;
}
void *bindArray = malloc(sizeof(TAOS_BIND) * (stbInfo->columnCount + 1));
if (bindArray == NULL) {
errorPrint("Failed to allocate %d bind params\n", batch);
return -1;
}
bool tsRand;
if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) {
tsRand = true;
} else {
tsRand = false;
}
for (k = 0; k < batch; k++) {
/* columnCount + 1 (ts) */
for (int i = 0; i <= stbInfo->columnCount; i ++) {
TAOS_BIND *bind = (TAOS_BIND *)bindArray + (sizeof(TAOS_BIND) * i);
if (i == 0) {
bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
int64_t ts;
if (tsRand) {
ts = startTime + getTSRandTail(
stbInfo->timeStampStep, k,
stbInfo->disorderRatio,
stbInfo->disorderRange);
} else {
ts = startTime + stbInfo->timeStampStep * k;
}
bind->buffer = &ts;
} else {
}
}
// if msg > 3MB, break
}
taos_stmt_bind_param(stmt, bindArray);
taos_stmt_add_batch(stmt);
return k;
}
static int32_t generateStbProgressiveData(
SSuperTable *superTblInfo,
char *tableName,
int64_t tableSeq,
threadInfo *pThreadInfo, char *buffer,
char *dbName, char *buffer,
int64_t insertRows,
uint64_t startFrom, int64_t startTime, int64_t *pSamplePos,
int64_t *pRemainderBufLen)
{
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
assert(buffer != NULL);
char *pstr = buffer;
memset(buffer, 0, *pRemainderBufLen);
int64_t headLen;
if (superTblInfo) {
headLen = generateStbSQLHead(
int64_t headLen = generateStbSQLHead(
superTblInfo,
tableName, tableSeq, pThreadInfo->db_name,
tableName, tableSeq, dbName,
buffer, *pRemainderBufLen);
} else {
headLen = generateSQLHeadWithoutStb(
tableName, pThreadInfo->db_name,
buffer, *pRemainderBufLen);
}
if (headLen <= 0) {
return 0;
......@@ -5184,22 +5332,48 @@ static int64_t generateProgressiveDataBuffer(
*pRemainderBufLen -= headLen;
int64_t dataLen;
int64_t k;
if (superTblInfo) {
k = generateStbDataTail(superTblInfo,
return generateStbDataTail(superTblInfo,
g_args.num_of_RPR, pstr, *pRemainderBufLen,
insertRows, startFrom,
startTime,
pSamplePos, &dataLen);
} else {
k = generateDataTailWithoutStb(
}
static int64_t prepareStmtWithoutStb(char *tableName)
{
return -1;
}
static int64_t generateProgressiveDataWithoutStb(
char *tableName,
/* int64_t tableSeq, */
threadInfo *pThreadInfo, char *buffer,
int64_t insertRows,
uint64_t startFrom, int64_t startTime, /*int64_t *pSamplePos, */
int64_t *pRemainderBufLen)
{
assert(buffer != NULL);
char *pstr = buffer;
memset(buffer, 0, *pRemainderBufLen);
int64_t headLen = generateSQLHeadWithoutStb(
tableName, pThreadInfo->db_name,
buffer, *pRemainderBufLen);
if (headLen <= 0) {
return 0;
}
pstr += headLen;
*pRemainderBufLen -= headLen;
int64_t dataLen;
return generateDataTailWithoutStb(
g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, startFrom,
startTime,
/*pSamplePos, */&dataLen);
}
return k;
}
static void printStatPerThread(threadInfo *pThreadInfo)
......@@ -5217,7 +5391,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->threadID, __func__, __LINE__);
int64_t insertRows;
uint64_t interlaceRows;
uint32_t interlaceRows;
uint64_t maxSqlLen;
int64_t nTimeStampStep;
uint64_t insert_interval;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
......@@ -5230,26 +5407,38 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
} else {
interlaceRows = superTblInfo->interlaceRows;
}
maxSqlLen = superTblInfo->maxSqlLen;
nTimeStampStep = superTblInfo->timeStampStep;
insert_interval = superTblInfo->insertInterval;
} else {
insertRows = g_args.num_of_DPT;
interlaceRows = g_args.interlace_rows;
maxSqlLen = g_args.max_sql_len;
nTimeStampStep = DEFAULT_TIMESTAMP_STEP;
insert_interval = g_args.insert_interval;
}
debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__,
pThreadInfo->start_table_from,
pThreadInfo->ntables, insertRows);
if (interlaceRows > insertRows)
interlaceRows = insertRows;
if (interlaceRows > g_args.num_of_RPR)
interlaceRows = g_args.num_of_RPR;
int progOrInterlace;
uint32_t batchPerTbl = interlaceRows;
uint32_t batchPerTblTimes;
if (interlaceRows > 0) {
progOrInterlace= INTERLACE_INSERT_MODE;
if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
batchPerTblTimes =
g_args.num_of_RPR / interlaceRows;
} else {
progOrInterlace = PROGRESSIVE_INSERT_MODE;
batchPerTblTimes = 1;
}
uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
pThreadInfo->buffer = calloc(maxSqlLen, 1);
if (NULL == pThreadInfo->buffer) {
errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n",
......@@ -5257,16 +5446,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
return NULL;
}
char tableName[TSDB_TABLE_NAME_LEN];
pThreadInfo->totalInsertRows = 0;
pThreadInfo->totalAffectedRows = 0;
int64_t nTimeStampStep =
superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
uint64_t insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
uint64_t st = 0;
uint64_t et = UINT64_MAX;
......@@ -5275,31 +5457,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
uint64_t endTs;
uint64_t tableSeq = pThreadInfo->start_table_from;
debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__,
pThreadInfo->start_table_from,
pThreadInfo->ntables, insertRows);
int64_t startTime = pThreadInfo->start_time;
uint64_t batchPerTbl = interlaceRows;
uint64_t batchPerTblTimes;
if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
batchPerTblTimes =
g_args.num_of_RPR / interlaceRows;
} else {
batchPerTblTimes = 1;
}
uint64_t generatedRecPerTbl = 0;
bool flagSleep = true;
uint64_t sleepTimeTotal = 0;
char *strInsertInto = "insert into ";
int nInsertBufLen = strlen(strInsertInto);
while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) {
if ((flagSleep) && (insert_interval)) {
st = taosGetTimestampMs();
......@@ -5311,13 +5474,16 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
char *pstr = pThreadInfo->buffer;
int len = snprintf(pstr, nInsertBufLen + 1, "%s", strInsertInto);
int len = snprintf(pstr,
strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO);
pstr += len;
remainderBufLen -= len;
uint64_t recOfBatch = 0;
uint32_t recOfBatch = 0;
for (uint32_t i = 0; i < batchPerTblTimes; i ++) {
char tableName[TSDB_TABLE_NAME_LEN];
for (uint64_t i = 0; i < batchPerTblTimes; i ++) {
getTableName(tableName, pThreadInfo, tableSeq);
if (0 == strlen(tableName)) {
errorPrint("[%d] %s() LN%d, getTableName return null\n",
......@@ -5327,18 +5493,31 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
uint64_t oldRemainderLen = remainderBufLen;
int64_t generated = generateInterlaceDataBuffer(
tableName, batchPerTbl, i, batchPerTblTimes,
tableSeq,
pThreadInfo, pstr,
insertRows,
startTime,
&remainderBufLen);
debugPrint("[%d] %s() LN%d, generated records is %"PRId64"\n",
int32_t generated;
if (superTblInfo) {
generated = generateStbInterlaceData(
superTblInfo,
tableName, batchPerTbl, i,
batchPerTblTimes,
tableSeq,
pThreadInfo, pstr,
insertRows,
startTime,
&remainderBufLen);
} else {
generated = generateInterlaceDataWithoutStb(
tableName, batchPerTbl,
tableSeq,
pThreadInfo->db_name, pstr,
insertRows,
&remainderBufLen);
}
debugPrint("[%d] %s() LN%d, generated records is %d\n",
pThreadInfo->threadID, __func__, __LINE__, generated);
if (generated < 0) {
errorPrint("[%d] %s() LN%d, generated records is %"PRId64"\n",
errorPrint("[%d] %s() LN%d, generated records is %d\n",
pThreadInfo->threadID, __func__, __LINE__, generated);
goto free_of_interlace;
} else if (generated == 0) {
......@@ -5350,12 +5529,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pstr += (oldRemainderLen - remainderBufLen);
// startTime += batchPerTbl * superTblInfo->timeStampStep;
pThreadInfo->totalInsertRows += batchPerTbl;
verbosePrint("[%d] %s() LN%d batchPerTbl=%"PRId64" recOfBatch=%"PRId64"\n",
verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n",
pThreadInfo->threadID, __func__, __LINE__,
batchPerTbl, recOfBatch);
if (progOrInterlace == INTERLACE_INSERT_MODE) {
if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) {
if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) {
// turn to first table
tableSeq = pThreadInfo->start_table_from;
generatedRecPerTbl += batchPerTbl;
......@@ -5367,13 +5545,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
if (generatedRecPerTbl >= insertRows)
break;
int remainRows = insertRows - generatedRecPerTbl;
int64_t remainRows = insertRows - generatedRecPerTbl;
if ((remainRows > 0) && (batchPerTbl > remainRows))
batchPerTbl = remainRows;
if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR)
break;
}
}
verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n",
......@@ -5384,7 +5561,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
break;
}
verbosePrint("[%d] %s() LN%d recOfBatch=%"PRIu64" totalInsertRows=%"PRIu64"\n",
verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__, recOfBatch,
pThreadInfo->totalInsertRows);
verbosePrint("[%d] %s() LN%d, buffer=%s\n",
......@@ -5393,7 +5570,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
startTs = taosGetTimestampMs();
if (recOfBatch == 0) {
errorPrint("[%d] %s() LN%d try inserting records of batch is %"PRIu64"\n",
errorPrint("[%d] %s() LN%d try inserting records of batch is %d\n",
pThreadInfo->threadID, __func__, __LINE__,
recOfBatch);
errorPrint("%s\n", "\tPlease check if the batch or the buffer length is proper value!\n");
......@@ -5415,7 +5592,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pThreadInfo->totalDelay += delay;
if (recOfBatch != affectedRows) {
errorPrint("[%d] %s() LN%d execInsert insert %"PRIu64", affected rows: %"PRId64"\n%s\n",
errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n",
pThreadInfo->threadID, __func__, __LINE__,
recOfBatch, affectedRows, pThreadInfo->buffer);
goto free_of_interlace;
......@@ -5476,12 +5653,6 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
uint64_t startTs = taosGetTimestampMs();
uint64_t endTs;
/* int insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
uint64_t st = 0;
uint64_t et = 0xffffffff;
*/
pThreadInfo->totalInsertRows = 0;
pThreadInfo->totalAffectedRows = 0;
......@@ -5501,18 +5672,41 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
int64_t remainderBufLen = maxSqlLen;
char *pstr = pThreadInfo->buffer;
int nInsertBufLen = strlen("insert into ");
int len = snprintf(pstr, nInsertBufLen + 1, "%s", "insert into ");
int len = snprintf(pstr,
strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO);
pstr += len;
remainderBufLen -= len;
int64_t generated = generateProgressiveDataBuffer(
tableName, tableSeq, pThreadInfo, pstr, insertRows,
i, start_time,
&(pThreadInfo->samplePos),
&remainderBufLen);
int32_t generated;
if (superTblInfo) {
if (superTblInfo->iface == STMT_IFACE) {
generated = prepareStbStmt(superTblInfo,
pThreadInfo->stmt,
tableName, g_args.num_of_RPR,
insertRows, start_time, pstr);
} else {
generated = generateStbProgressiveData(
superTblInfo,
tableName, tableSeq, pThreadInfo->db_name, pstr,
insertRows, i, start_time,
&(pThreadInfo->samplePos),
&remainderBufLen);
}
} else {
if (g_args.iface == STMT_IFACE) {
generated = prepareStmtWithoutStb(tableName);
} else {
generated = generateProgressiveDataWithoutStb(
tableName,
/* tableSeq, */
pThreadInfo, pstr, insertRows,
i, start_time,
/* &(pThreadInfo->samplePos), */
&remainderBufLen);
}
}
if (generated > 0)
i += generated;
else
......@@ -5579,7 +5773,7 @@ static void* syncWrite(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
int interlaceRows;
uint32_t interlaceRows;
if (superTblInfo) {
if ((superTblInfo->interlaceRows == 0)
......@@ -5710,7 +5904,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
char* precision,SSuperTable* superTblInfo) {
//TAOS* taos;
//if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) {
//if (0 == strncasecmp(superTblInfo->iface, "taosc", 5)) {
// taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port);
// if (NULL == taos) {
// printf("connect to server fail, reason: %s\n", taos_errstr(NULL));
......@@ -5812,19 +6006,13 @@ static void startMultiThreadInsertData(int threads, char* db_name,
&& ((superTblInfo->childTblOffset + superTblInfo->childTblLimit )
> superTblInfo->childTblCount)) {
printf("WARNING: specified offset + limit > child table count!\n");
if (!g_args.answer_yes) {
printf(" Press enter key to continue or Ctrl-C to stop\n\n");
(void)getchar();
}
prompt();
}
if ((superTblInfo->childTblExists != TBL_NO_EXISTS)
&& (0 == superTblInfo->childTblLimit)) {
printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n");
if (!g_args.answer_yes) {
printf(" Press enter key to continue or Ctrl-C to stop\n\n");
(void)getchar();
}
prompt();
}
superTblInfo->childTblName = (char*)calloc(1,
......@@ -5861,8 +6049,9 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
if ((superTblInfo)
&& (superTblInfo->insertMode == REST_IFACE)) {
if (convertHostToServAddr(g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) {
&& (superTblInfo->iface == REST_IFACE)) {
if (convertHostToServAddr(
g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) {
exit(-1);
}
}
......@@ -5877,22 +6066,22 @@ static void startMultiThreadInsertData(int threads, char* db_name,
memset(infos, 0, threads * sizeof(threadInfo));
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infos + i;
t_info->threadID = i;
tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
t_info->time_precision = timePrec;
t_info->superTblInfo = superTblInfo;
threadInfo *pThreadInfo = infos + i;
pThreadInfo->threadID = i;
tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE);
pThreadInfo->time_precision = timePrec;
pThreadInfo->superTblInfo = superTblInfo;
t_info->start_time = start_time;
t_info->minDelay = UINT64_MAX;
pThreadInfo->start_time = start_time;
pThreadInfo->minDelay = UINT64_MAX;
if ((NULL == superTblInfo) ||
(superTblInfo->insertMode != REST_IFACE)) {
(superTblInfo->iface != REST_IFACE)) {
//t_info->taos = taos;
t_info->taos = taos_connect(
pThreadInfo->taos = taos_connect(
g_Dbs.host, g_Dbs.user,
g_Dbs.password, db_name, g_Dbs.port);
if (NULL == t_info->taos) {
if (NULL == pThreadInfo->taos) {
errorPrint(
"%s() LN%d, connect to server fail from insert sub thread, reason: %s\n",
__func__, __LINE__,
......@@ -5901,9 +6090,9 @@ static void startMultiThreadInsertData(int threads, char* db_name,
exit(-1);
}
if ((superTblInfo) && (superTblInfo->insertMode == STMT_IFACE)) {
t_info->stmt = taos_stmt_init(t_info->taos);
if (NULL == t_info->stmt) {
if ((superTblInfo) && (superTblInfo->iface == STMT_IFACE)) {
pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos);
if (NULL == pThreadInfo->stmt) {
errorPrint(
"%s() LN%d, failed init stmt, reason: %s\n",
__func__, __LINE__,
......@@ -5914,27 +6103,27 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
} else {
t_info->taos = NULL;
pThreadInfo->taos = NULL;
}
/* if ((NULL == superTblInfo)
|| (0 == superTblInfo->multiThreadWriteOneTbl)) {
*/
t_info->start_table_from = startFrom;
t_info->ntables = i<b?a+1:a;
t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
startFrom = t_info->end_table_to + 1;
pThreadInfo->start_table_from = startFrom;
pThreadInfo->ntables = i<b?a+1:a;
pThreadInfo->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
startFrom = pThreadInfo->end_table_to + 1;
/* } else {
t_info->start_table_from = 0;
t_info->ntables = superTblInfo->childTblCount;
t_info->start_time = t_info->start_time + rand_int() % 10000 - rand_tinyint();
pThreadInfo->start_table_from = 0;
pThreadInfo->ntables = superTblInfo->childTblCount;
pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint();
}
*/
tsem_init(&(t_info->lock_sem), 0, 0);
tsem_init(&(pThreadInfo->lock_sem), 0, 0);
if (ASYNC_MODE == g_Dbs.asyncMode) {
pthread_create(pids + i, NULL, asyncWrite, t_info);
pthread_create(pids + i, NULL, asyncWrite, pThreadInfo);
} else {
pthread_create(pids + i, NULL, syncWrite, t_info);
pthread_create(pids + i, NULL, syncWrite, pThreadInfo);
}
}
......@@ -5949,31 +6138,32 @@ static void startMultiThreadInsertData(int threads, char* db_name,
double avgDelay = 0;
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infos + i;
threadInfo *pThreadInfo = infos + i;
tsem_destroy(&(t_info->lock_sem));
tsem_destroy(&(pThreadInfo->lock_sem));
if (t_info->stmt) {
taos_stmt_close(t_info->stmt);
if (pThreadInfo->stmt) {
taos_stmt_close(pThreadInfo->stmt);
}
taos_close(t_info->taos);
tsem_destroy(&(pThreadInfo->lock_sem));
taos_close(pThreadInfo->taos);
debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n",
__func__, __LINE__,
t_info->threadID, t_info->totalInsertRows,
t_info->totalAffectedRows);
pThreadInfo->threadID, pThreadInfo->totalInsertRows,
pThreadInfo->totalAffectedRows);
if (superTblInfo) {
superTblInfo->totalAffectedRows += t_info->totalAffectedRows;
superTblInfo->totalInsertRows += t_info->totalInsertRows;
superTblInfo->totalAffectedRows += pThreadInfo->totalAffectedRows;
superTblInfo->totalInsertRows += pThreadInfo->totalInsertRows;
} else {
g_args.totalAffectedRows += t_info->totalAffectedRows;
g_args.totalInsertRows += t_info->totalInsertRows;
g_args.totalAffectedRows += pThreadInfo->totalAffectedRows;
g_args.totalInsertRows += pThreadInfo->totalInsertRows;
}
totalDelay += t_info->totalDelay;
cntDelay += t_info->cntDelay;
if (t_info->maxDelay > maxDelay) maxDelay = t_info->maxDelay;
if (t_info->minDelay < minDelay) minDelay = t_info->minDelay;
totalDelay += pThreadInfo->totalDelay;
cntDelay += pThreadInfo->cntDelay;
if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay;
if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay;
}
cntDelay -= 1;
......@@ -6029,26 +6219,26 @@ static void startMultiThreadInsertData(int threads, char* db_name,
static void *readTable(void *sarg) {
#if 1
threadInfo *rinfo = (threadInfo *)sarg;
TAOS *taos = rinfo->taos;
threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS *taos = pThreadInfo->taos;
char command[BUFFER_SIZE] = "\0";
uint64_t sTime = rinfo->start_time;
char *tb_prefix = rinfo->tb_prefix;
FILE *fp = fopen(rinfo->fp, "a");
uint64_t sTime = pThreadInfo->start_time;
char *tb_prefix = pThreadInfo->tb_prefix;
FILE *fp = fopen(pThreadInfo->filePath, "a");
if (NULL == fp) {
errorPrint( "fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
return NULL;
}
int64_t num_of_DPT;
/* if (rinfo->superTblInfo) {
num_of_DPT = rinfo->superTblInfo->insertRows; // nrecords_per_table;
/* if (pThreadInfo->superTblInfo) {
num_of_DPT = pThreadInfo->superTblInfo->insertRows; // nrecords_per_table;
} else {
*/
num_of_DPT = g_args.num_of_DPT;
// }
int64_t num_of_tables = rinfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t totalData = num_of_DPT * num_of_tables;
bool do_aggreFunc = g_Dbs.do_aggreFunc;
......@@ -6101,17 +6291,17 @@ static void *readTable(void *sarg) {
static void *readMetric(void *sarg) {
#if 1
threadInfo *rinfo = (threadInfo *)sarg;
TAOS *taos = rinfo->taos;
threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS *taos = pThreadInfo->taos;
char command[BUFFER_SIZE] = "\0";
FILE *fp = fopen(rinfo->fp, "a");
FILE *fp = fopen(pThreadInfo->filePath, "a");
if (NULL == fp) {
printf("fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno));
printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno));
return NULL;
}
int64_t num_of_DPT = rinfo->superTblInfo->insertRows;
int64_t num_of_tables = rinfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t num_of_DPT = pThreadInfo->superTblInfo->insertRows;
int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t totalData = num_of_DPT * num_of_tables;
bool do_aggreFunc = g_Dbs.do_aggreFunc;
......@@ -6172,6 +6362,13 @@ static void *readMetric(void *sarg) {
return NULL;
}
static void prompt()
{
if (!g_args.answer_yes) {
printf(" Press enter key to continue or Ctrl-C to stop\n\n");
(void)getchar();
}
}
static int insertTestProcess() {
......@@ -6192,10 +6389,7 @@ static int insertTestProcess() {
if (g_fpOfInsertResult)
printfInsertMetaToFile(g_fpOfInsertResult);
if (!g_args.answer_yes) {
printf("Press enter key to continue\n\n");
(void)getchar();
}
prompt();
init_rand_data();
......@@ -6306,8 +6500,8 @@ static void *specifiedTableQuery(void *sarg) {
uint64_t lastPrintTime = taosGetTimestampMs();
uint64_t startTs = taosGetTimestampMs();
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->fp, "%s-%d",
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq] != NULL) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
}
......@@ -6407,8 +6601,8 @@ static void *superTableQuery(void *sarg) {
for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) {
memset(sqlstr,0,sizeof(sqlstr));
replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i);
if (g_queryInfo.superQueryInfo.result[j][0] != 0) {
sprintf(pThreadInfo->fp, "%s-%d",
if (g_queryInfo.superQueryInfo.result[j] != NULL) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.superQueryInfo.result[j],
pThreadInfo->threadID);
}
......@@ -6465,10 +6659,7 @@ static int queryTestProcess() {
&g_queryInfo.superQueryInfo.childTblCount);
}
if (!g_args.answer_yes) {
printf("Press enter key to continue\n\n");
(void)getchar();
}
prompt();
if (g_args.debug_print || g_args.verbose_print) {
printfQuerySystemInfo(taos);
......@@ -6501,9 +6692,9 @@ static int queryTestProcess() {
for (uint64_t i = 0; i < nSqlCount; i++) {
for (int j = 0; j < nConcurrent; j++) {
uint64_t seq = i * nConcurrent + j;
threadInfo *t_info = infos + seq;
t_info->threadID = seq;
t_info->querySeq = i;
threadInfo *pThreadInfo = infos + seq;
pThreadInfo->threadID = seq;
pThreadInfo->querySeq = i;
if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
......@@ -6520,10 +6711,10 @@ static int queryTestProcess() {
}
}
t_info->taos = NULL;// TODO: workaround to use separate taos connection;
pThreadInfo->taos = NULL;// TODO: workaround to use separate taos connection;
pthread_create(pids + seq, NULL, specifiedTableQuery,
t_info);
pThreadInfo);
}
}
} else {
......@@ -6563,15 +6754,15 @@ static int queryTestProcess() {
uint64_t startFrom = 0;
for (int i = 0; i < threads; i++) {
threadInfo *t_info = infosOfSub + i;
t_info->threadID = i;
threadInfo *pThreadInfo = infosOfSub + i;
pThreadInfo->threadID = i;
t_info->start_table_from = startFrom;
t_info->ntables = i<b?a+1:a;
t_info->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
startFrom = t_info->end_table_to + 1;
t_info->taos = NULL; // TODO: workaround to use separate taos connection;
pthread_create(pidsOfSub + i, NULL, superTableQuery, t_info);
pThreadInfo->start_table_from = startFrom;
pThreadInfo->ntables = i<b?a+1:a;
pThreadInfo->end_table_to = i < b ? startFrom + a : startFrom + a - 1;
startFrom = pThreadInfo->end_table_to + 1;
pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo);
}
g_queryInfo.superQueryInfo.threadCnt = threads;
......@@ -6618,7 +6809,7 @@ static void stable_sub_callback(
}
if (param)
appendResultToFile(res, ((threadInfo *)param)->fp);
fetchResult(res, (threadInfo *)param);
// tao_unscribe() will free result.
}
......@@ -6631,7 +6822,7 @@ static void specified_sub_callback(
}
if (param)
appendResultToFile(res, ((threadInfo *)param)->fp);
fetchResult(res, (threadInfo *)param);
// tao_unscribe() will free result.
}
......@@ -6675,6 +6866,7 @@ static void *superSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
char subSqlstr[MAX_QUERY_SQL_LENGTH];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0};
uint64_t tsubSeq;
if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) {
errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n",
......@@ -6684,18 +6876,15 @@ static void *superSubscribe(void *sarg) {
}
if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
pThreadInfo->taos = taos_connect(g_queryInfo.host,
g_queryInfo.user,
g_queryInfo.password,
g_queryInfo.dbName,
g_queryInfo.port);
if (taos == NULL) {
if (pThreadInfo->taos == NULL) {
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
} else {
pThreadInfo->taos = taos;
}
}
......@@ -6711,6 +6900,8 @@ static void *superSubscribe(void *sarg) {
char topic[32] = {0};
for (uint64_t i = pThreadInfo->start_table_from;
i <= pThreadInfo->end_table_to; i++) {
tsubSeq = i - pThreadInfo->start_table_from;
verbosePrint("%s() LN%d, [%d], start=%"PRId64" end=%"PRId64" i=%"PRIu64"\n",
__func__, __LINE__,
pThreadInfo->threadID,
......@@ -6723,19 +6914,19 @@ static void *superSubscribe(void *sarg) {
g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq],
subSqlstr, i);
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->fp, "%s-%d",
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
}
debugPrint("%s() LN%d, [%d] subSqlstr: %s\n",
__func__, __LINE__, pThreadInfo->threadID, subSqlstr);
tsub[i] = subscribeImpl(
tsub[tsubSeq] = subscribeImpl(
STABLE_CLASS,
pThreadInfo, subSqlstr, topic,
g_queryInfo.superQueryInfo.subscribeRestart,
g_queryInfo.superQueryInfo.subscribeInterval);
if (NULL == tsub[i]) {
if (NULL == tsub[tsubSeq]) {
taos_close(pThreadInfo->taos);
return NULL;
}
......@@ -6748,57 +6939,66 @@ static void *superSubscribe(void *sarg) {
}
TAOS_RES* res = NULL;
uint64_t st = 0, et = 0;
while(1) {
for (uint64_t i = pThreadInfo->start_table_from;
i <= pThreadInfo->end_table_to; i++) {
if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) {
continue;
}
tsubSeq = i - pThreadInfo->start_table_from;
if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) {
continue;
}
res = taos_consume(tsub[i]);
if (res) {
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->fp, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
appendResultToFile(res, pThreadInfo->fp);
}
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->fp, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
appendResultToFile(res, pThreadInfo->fp);
}
consumed[i] ++;
st = taosGetTimestampMs();
performancePrint("st: %"PRIu64" et: %"PRIu64" st-et: %"PRIu64"\n", st, et, (st - et));
res = taos_consume(tsub[tsubSeq]);
et = taosGetTimestampMs();
performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st));
if ((g_queryInfo.superQueryInfo.subscribeKeepProgress)
&& (consumed[i] >=
g_queryInfo.superQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) {
printf("keepProgress:%d, resub super table query: %"PRIu64"\n",
g_queryInfo.superQueryInfo.subscribeKeepProgress,
pThreadInfo->querySeq);
taos_unsubscribe(tsub,
if (res) {
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
fetchResult(res, pThreadInfo);
}
if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
fetchResult(res, pThreadInfo);
}
consumed[tsubSeq] ++;
if ((g_queryInfo.superQueryInfo.subscribeKeepProgress)
&& (consumed[tsubSeq] >=
g_queryInfo.superQueryInfo.resubAfterConsume)) {
printf("keepProgress:%d, resub super table query: %"PRIu64"\n",
g_queryInfo.superQueryInfo.subscribeKeepProgress,
pThreadInfo->querySeq);
taos_unsubscribe(tsub[tsubSeq],
g_queryInfo.superQueryInfo.subscribeKeepProgress);
consumed[i]= 0;
tsub[i] = subscribeImpl(
STABLE_CLASS,
pThreadInfo, subSqlstr, topic,
g_queryInfo.superQueryInfo.subscribeRestart,
g_queryInfo.superQueryInfo.subscribeInterval
);
if (NULL == tsub[i]) {
taos_close(pThreadInfo->taos);
return NULL;
}
}
}
consumed[tsubSeq]= 0;
tsub[tsubSeq] = subscribeImpl(
STABLE_CLASS,
pThreadInfo, subSqlstr, topic,
g_queryInfo.superQueryInfo.subscribeRestart,
g_queryInfo.superQueryInfo.subscribeInterval
);
if (NULL == tsub[tsubSeq]) {
taos_close(pThreadInfo->taos);
return NULL;
}
}
}
}
}
taos_free_result(res);
for (uint64_t i = pThreadInfo->start_table_from;
i <= pThreadInfo->end_table_to; i++) {
taos_unsubscribe(tsub[i], 0);
tsubSeq = i - pThreadInfo->start_table_from;
taos_unsubscribe(tsub[tsubSeq], 0);
}
taos_close(pThreadInfo->taos);
......@@ -6807,21 +7007,18 @@ static void *superSubscribe(void *sarg) {
static void *specifiedSubscribe(void *sarg) {
threadInfo *pThreadInfo = (threadInfo *)sarg;
TAOS_SUB* tsub = NULL;
// TAOS_SUB* tsub = NULL;
if (pThreadInfo->taos == NULL) {
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
pThreadInfo->taos = taos_connect(g_queryInfo.host,
g_queryInfo.user,
g_queryInfo.password,
g_queryInfo.dbName,
g_queryInfo.port);
if (taos == NULL) {
if (pThreadInfo->taos == NULL) {
errorPrint("[%d] Failed to connect to TDengine, reason:%s\n",
pThreadInfo->threadID, taos_errstr(NULL));
return NULL;
} else {
pThreadInfo->taos = taos;
}
}
......@@ -6833,69 +7030,70 @@ static void *specifiedSubscribe(void *sarg) {
return NULL;
}
char topic[32] = {0};
sprintf(topic, "taosdemo-subscribe-%"PRIu64"", pThreadInfo->querySeq);
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->fp, "%s-%d",
sprintf(g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
"taosdemo-subscribe-%"PRIu64"-%d",
pThreadInfo->querySeq,
pThreadInfo->threadID);
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq] != NULL) {
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
}
tsub = subscribeImpl(
SPECIFIED_CLASS, pThreadInfo,
g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl(
SPECIFIED_CLASS, pThreadInfo,
g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
topic,
g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
g_queryInfo.specifiedQueryInfo.subscribeRestart,
g_queryInfo.specifiedQueryInfo.subscribeInterval);
if (NULL == tsub) {
if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) {
taos_close(pThreadInfo->taos);
return NULL;
}
// start loop to consume result
TAOS_RES* res = NULL;
int consumed;
g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0;
while(1) {
if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) {
continue;
}
res = taos_consume(tsub);
if (res) {
g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = taos_consume(
g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]);
if (g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]) {
if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) {
sprintf(pThreadInfo->fp, "%s-%d",
sprintf(pThreadInfo->filePath, "%s-%d",
g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq],
pThreadInfo->threadID);
appendResultToFile(res, pThreadInfo->fp);
fetchResult(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], pThreadInfo);
}
consumed ++;
g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++;
if ((g_queryInfo.specifiedQueryInfo.subscribeKeepProgress)
&& (consumed >=
&& (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] >=
g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) {
printf("keepProgress:%d, resub specified query: %"PRIu64"\n",
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress,
pThreadInfo->querySeq);
consumed = 0;
taos_unsubscribe(tsub,
g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0;
taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID],
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
tsub = subscribeImpl(
g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl(
SPECIFIED_CLASS,
pThreadInfo,
g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq],
topic,
g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID],
g_queryInfo.specifiedQueryInfo.subscribeRestart,
g_queryInfo.specifiedQueryInfo.subscribeInterval);
if (NULL == tsub) {
if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) {
taos_close(pThreadInfo->taos);
return NULL;
}
}
}
}
taos_free_result(res);
taos_unsubscribe(tsub, 0);
taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]);
taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->querySeq], 0);
taos_close(pThreadInfo->taos);
return NULL;
......@@ -6906,10 +7104,7 @@ static int subscribeTestProcess() {
printfQueryMeta();
resetAfterAnsiEscape();
if (!g_args.answer_yes) {
printf("Press enter key to continue\n\n");
(void) getchar();
}
prompt();
TAOS * taos = NULL;
taos = taos_connect(g_queryInfo.host,
......@@ -6968,18 +7163,18 @@ static int subscribeTestProcess() {
for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) {
uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j;
threadInfo *t_info = infos + seq;
t_info->threadID = seq;
t_info->querySeq = i;
t_info->taos = NULL; // TODO: workaround to use separate taos connection;
pthread_create(pids + seq, NULL, specifiedSubscribe, t_info);
threadInfo *pThreadInfo = infos + seq;
pThreadInfo->threadID = seq;
pThreadInfo->querySeq = i;
pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo);
}
}
}
//==== create threads for super table query
if (g_queryInfo.superQueryInfo.sqlCount <= 0) {
printf("%s() LN%d, super table query sqlCount %"PRIu64".\n",
debugPrint("%s() LN%d, super table query sqlCount %"PRIu64".\n",
__func__, __LINE__,
g_queryInfo.superQueryInfo.sqlCount);
} else {
......@@ -7018,17 +7213,17 @@ static int subscribeTestProcess() {
uint64_t startFrom = 0;
for (int j = 0; j < threads; j++) {
uint64_t seq = i * threads + j;
threadInfo *t_info = infosOfStable + seq;
t_info->threadID = seq;
t_info->querySeq = i;
t_info->start_table_from = startFrom;
t_info->ntables = j<b?a+1:a;
t_info->end_table_to = j<b?startFrom+a:startFrom+a-1;
startFrom = t_info->end_table_to + 1;
t_info->taos = NULL; // TODO: workaround to use separate taos connection;
threadInfo *pThreadInfo = infosOfStable + seq;
pThreadInfo->threadID = seq;
pThreadInfo->querySeq = i;
pThreadInfo->start_table_from = startFrom;
pThreadInfo->ntables = j<b?a+1:a;
pThreadInfo->end_table_to = j<b?startFrom+a:startFrom+a-1;
startFrom = pThreadInfo->end_table_to + 1;
pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection;
pthread_create(pidsOfStable + seq,
NULL, superSubscribe, t_info);
NULL, superSubscribe, pThreadInfo);
}
}
......@@ -7144,7 +7339,7 @@ static void setParaFromArg(){
tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix,
g_args.tb_prefix, MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].insertMode = g_args.iface;
g_Dbs.db[0].superTbls[0].iface = g_args.iface;
tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp,
"2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].timeStampStep = DEFAULT_TIMESTAMP_STEP;
......@@ -7167,17 +7362,21 @@ static void setParaFromArg(){
if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) {
g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR;
} else {
for (int i = g_Dbs.db[0].superTbls[0].columnCount; i < g_args.num_of_CPR; i++) {
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", MAX_TB_NAME_SIZE);
for (int i = g_Dbs.db[0].superTbls[0].columnCount;
i < g_args.num_of_CPR; i++) {
tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType,
"INT", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0;
g_Dbs.db[0].superTbls[0].columnCount++;
}
}
tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, "INT", MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType,
"INT", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0;
tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", MAX_TB_NAME_SIZE);
tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType,
"BINARY", MAX_TB_NAME_SIZE);
g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary;
g_Dbs.db[0].superTbls[0].tagCount = 2;
} else {
......@@ -7303,47 +7502,47 @@ static void queryResult() {
// query data
pthread_t read_id;
threadInfo *rInfo = malloc(sizeof(threadInfo));
assert(rInfo);
rInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
rInfo->start_table_from = 0;
threadInfo *pThreadInfo = malloc(sizeof(threadInfo));
assert(pThreadInfo);
pThreadInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000
pThreadInfo->start_table_from = 0;
//rInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
//pThreadInfo->do_aggreFunc = g_Dbs.do_aggreFunc;
if (g_args.use_metric) {
rInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount;
rInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
tstrncpy(rInfo->tb_prefix,
pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount;
pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1;
pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0];
tstrncpy(pThreadInfo->tb_prefix,
g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE);
} else {
rInfo->ntables = g_args.num_of_tables;
rInfo->end_table_to = g_args.num_of_tables -1;
tstrncpy(rInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
pThreadInfo->ntables = g_args.num_of_tables;
pThreadInfo->end_table_to = g_args.num_of_tables -1;
tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE);
}
rInfo->taos = taos_connect(
pThreadInfo->taos = taos_connect(
g_Dbs.host,
g_Dbs.user,
g_Dbs.password,
g_Dbs.db[0].dbName,
g_Dbs.port);
if (rInfo->taos == NULL) {
if (pThreadInfo->taos == NULL) {
errorPrint( "Failed to connect to TDengine, reason:%s\n",
taos_errstr(NULL));
free(rInfo);
free(pThreadInfo);
exit(-1);
}
tstrncpy(rInfo->fp, g_Dbs.resultFile, MAX_FILE_NAME_LEN);
tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN);
if (!g_Dbs.use_metric) {
pthread_create(&read_id, NULL, readTable, rInfo);
pthread_create(&read_id, NULL, readTable, pThreadInfo);
} else {
pthread_create(&read_id, NULL, readMetric, rInfo);
pthread_create(&read_id, NULL, readMetric, pThreadInfo);
}
pthread_join(read_id, NULL);
taos_close(rInfo->taos);
free(rInfo);
taos_close(pThreadInfo->taos);
free(pThreadInfo);
}
static void testCmdLine() {
......@@ -7397,6 +7596,9 @@ int main(int argc, char *argv[]) {
} else {
testCmdLine();
}
if (g_dupstr)
free(g_dupstr);
}
return 0;
......
......@@ -35,6 +35,8 @@ void mnodeDropDbFromAcct(SAcctObj *pAcct, SDbObj *pDb);
void mnodeAddUserToAcct(SAcctObj *pAcct, SUserObj *pUser);
void mnodeDropUserFromAcct(SAcctObj *pAcct, SUserObj *pUser);
int32_t mnodeCompactAccts();
#ifdef __cplusplus
}
#endif
......
......@@ -25,6 +25,8 @@ void mnodeCleanupCluster();
void mnodeUpdateClusterId();
const char* mnodeGetClusterId();
int32_t mnodeCompactCluster();
#ifdef __cplusplus
}
#endif
......
......@@ -41,6 +41,8 @@ void mnodeDecDbRef(SDbObj *pDb);
bool mnodeCheckIsMonitorDB(char *db, char *monitordb);
void mnodeDropAllDbs(SAcctObj *pAcct);
int32_t mnodeCompactDbs();
// util func
void mnodeAddSuperTableIntoDb(SDbObj *pDb);
void mnodeRemoveSuperTableFromDb(SDbObj *pDb);
......
......@@ -77,6 +77,7 @@ void * mnodeGetDnodeByEp(char *ep);
void mnodeUpdateDnode(SDnodeObj *pDnode);
int32_t mnodeDropDnode(SDnodeObj *pDnode, void *pMsg);
int32_t mnodeCompactDnodes();
extern int32_t tsAccessSquence;
#ifdef __cplusplus
......
......@@ -50,6 +50,7 @@ char* mnodeGetMnodeMasterEp();
void mnodeGetMnodeInfos(void *mnodes);
void mnodeUpdateMnodeEpSet(SMInfos *pMnodes);
int32_t mnodeCompactMnodes();
#ifdef __cplusplus
}
#endif
......
......@@ -92,6 +92,7 @@ void sdbUpdateMnodeRoles();
int32_t sdbGetReplicaNum();
int32_t sdbInsertRow(SSdbRow *pRow);
int32_t sdbInsertCompactRow(SSdbRow *pRow);
int32_t sdbDeleteRow(SSdbRow *pRow);
int32_t sdbUpdateRow(SSdbRow *pRow);
int32_t sdbInsertRowToQueue(SSdbRow *pRow);
......@@ -106,6 +107,7 @@ int32_t sdbGetId(void *pTable);
uint64_t sdbGetVersion();
bool sdbCheckRowDeleted(void *pTable, void *pRow);
int32_t mnodeCompactWal();
#ifdef __cplusplus
}
#endif
......
......@@ -36,6 +36,7 @@ void mnodeCancelGetNextSuperTable(void *pIter);
void mnodeDropAllChildTables(SDbObj *pDropDb);
void mnodeDropAllSuperTables(SDbObj *pDropDb);
void mnodeDropAllChildTablesInVgroups(SVgObj *pVgroup);
int32_t mnodeCompactTables();
#ifdef __cplusplus
}
......
......@@ -33,6 +33,8 @@ char * mnodeGetUserFromMsg(void *pMnodeMsg);
int32_t mnodeCreateUser(SAcctObj *pAcct, char *name, char *pass, void *pMsg);
void mnodeDropAllUsers(SAcctObj *pAcct);
int32_t mnodeCompactUsers();
#ifdef __cplusplus
}
#endif
......
......@@ -32,6 +32,7 @@ void mnodeDropAllDbVgroups(SDbObj *pDropDb);
void mnodeSendDropAllDbVgroupsMsg(SDbObj *pDropDb);
void mnodeDropAllDnodeVgroups(SDnodeObj *pDropDnode);
//void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb);
int32_t mnodeCompactVgroups();
void * mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup);
void mnodeCancelGetNextVgroup(void *pIter);
......
......@@ -238,6 +238,32 @@ static int32_t mnodeCreateRootAcct() {
return sdbInsertRow(&row);
}
int32_t mnodeCompactAccts() {
void *pIter = NULL;
SAcctObj *pAcct = NULL;
mInfo("start to compact accts table...");
while (1) {
pIter = mnodeGetNextAcct(pIter, &pAcct);
if (pAcct == NULL) break;
SSdbRow row = {
.type = SDB_OPER_GLOBAL,
.pTable = tsAcctSdb,
.pObj = pAcct,
};
mInfo("compact accts %s", pAcct->user);
sdbInsertCompactRow(&row);
}
mInfo("end to compact accts table...");
return 0;
}
#ifndef _ACCT
int32_t acctInit() { return TSDB_CODE_SUCCESS; }
......
......@@ -237,3 +237,27 @@ static int32_t mnodeRetrieveClusters(SShowObj *pShow, char *data, int32_t rows,
pShow->numOfReads += numOfRows;
return numOfRows;
}
int32_t mnodeCompactCluster() {
SClusterObj *pCluster = NULL;
void *pIter;
mInfo("start to compact cluster table...");
pIter = mnodeGetNextCluster(NULL, &pCluster);
while (pCluster) {
SSdbRow row = {
.type = SDB_OPER_GLOBAL,
.pTable = tsClusterSdb,
.pObj = pCluster,
};
sdbInsertCompactRow(&row);
pIter = mnodeGetNextCluster(pIter, &pCluster);
}
mInfo("end to compact cluster table...");
return 0;
}
\ No newline at end of file
......@@ -389,7 +389,7 @@ static void mnodeSetDefaultDbCfg(SDbCfg *pCfg) {
if (pCfg->compression < 0) pCfg->compression = tsCompression;
if (pCfg->walLevel < 0) pCfg->walLevel = tsWAL;
if (pCfg->replications < 0) pCfg->replications = tsReplications;
if (pCfg->quorum < 0) pCfg->quorum = tsQuorum;
if (pCfg->quorum < 0) pCfg->quorum = MIN(tsQuorum, pCfg->replications);
if (pCfg->update < 0) pCfg->update = tsUpdate;
if (pCfg->cacheLastRow < 0) pCfg->cacheLastRow = tsCacheLastRow;
if (pCfg->dbType < 0) pCfg->dbType = 0;
......@@ -1271,3 +1271,30 @@ void mnodeDropAllDbs(SAcctObj *pAcct) {
mInfo("acct:%s, all dbs:%d is dropped from sdb", pAcct->user, numOfDbs);
}
int32_t mnodeCompactDbs() {
void *pIter = NULL;
SDbObj *pDb = NULL;
mInfo("start to compact dbs table...");
while (1) {
pIter = mnodeGetNextDb(pIter, &pDb);
if (pDb == NULL) break;
SSdbRow row = {
.type = SDB_OPER_GLOBAL,
.pTable = tsDbSdb,
.pObj = pDb,
.rowSize = sizeof(SDbObj),
};
mInfo("compact dbs %s", pDb->name);
sdbInsertCompactRow(&row);
}
mInfo("end to compact dbs table...");
return 0;
}
\ No newline at end of file
......@@ -1270,3 +1270,30 @@ char* dnodeRoles[] = {
"vnode",
"any"
};
int32_t mnodeCompactDnodes() {
SDnodeObj *pDnode = NULL;
void * pIter = NULL;
mInfo("start to compact dnodes table...");
while (1) {
pIter = mnodeGetNextDnode(pIter, &pDnode);
if (pDnode == NULL) break;
SSdbRow row = {
.type = SDB_OPER_GLOBAL,
.pTable = tsDnodeSdb,
.pObj = pDnode,
.rowSize = sizeof(SDnodeObj),
};
mInfo("compact dnode %d", pDnode->dnodeId);
sdbInsertCompactRow(&row);
}
mInfo("end to compact dnodes table...");
return 0;
}
\ No newline at end of file
......@@ -57,6 +57,18 @@ static SStep tsMnodeSteps[] = {
{"show", mnodeInitShow, mnodeCleanUpShow}
};
static SStep tsMnodeCompactSteps[] = {
{"cluster", mnodeCompactCluster, NULL},
{"dnodes", mnodeCompactDnodes, NULL},
{"mnodes", mnodeCompactMnodes, NULL},
{"accts", mnodeCompactAccts, NULL},
{"users", mnodeCompactUsers, NULL},
{"dbs", mnodeCompactDbs, NULL},
{"vgroups", mnodeCompactVgroups, NULL},
{"tables", mnodeCompactTables, NULL},
};
static void mnodeInitTimer();
static void mnodeCleanupTimer();
static bool mnodeNeedStart() ;
......@@ -71,6 +83,11 @@ static int32_t mnodeInitComponents() {
return dnodeStepInit(tsMnodeSteps, stepSize);
}
int32_t mnodeCompactComponents() {
int32_t stepSize = sizeof(tsMnodeCompactSteps) / sizeof(SStep);
return dnodeStepInit(tsMnodeCompactSteps, stepSize);
}
int32_t mnodeStartSystem() {
if (tsMgmtIsRunning) {
mInfo("mnode module already started...");
......
......@@ -566,3 +566,30 @@ static int32_t mnodeRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, vo
return numOfRows;
}
int32_t mnodeCompactMnodes() {
void *pIter = NULL;
SMnodeObj *pMnode = NULL;
mInfo("start to compact mnodes table...");
while (1) {
pIter = mnodeGetNextMnode(pIter, &pMnode);
if (pMnode == NULL) break;
SSdbRow row = {
.type = SDB_OPER_GLOBAL,
.pTable = tsMnodeSdb,
.pObj = pMnode,
.rowSize = sizeof(SMnodeObj),
};
mInfo("compact mnode %d", pMnode->mnodeId);
sdbInsertCompactRow(&row);
}
mInfo("end to compact mnodes table...");
return 0;
}
\ No newline at end of file
......@@ -20,6 +20,7 @@
#include "tutil.h"
#include "tref.h"
#include "tbn.h"
#include "tfs.h"
#include "tqueue.h"
#include "twal.h"
#include "tsync.h"
......@@ -450,6 +451,12 @@ int32_t sdbInit() {
}
tsSdbMgmt.status = SDB_STATUS_SERVING;
if (tsCompactMnodeWal) {
mnodeCompactWal();
exit(EXIT_SUCCESS);
}
return TSDB_CODE_SUCCESS;
}
......@@ -726,6 +733,12 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *
}
}
int32_t sdbInsertCompactRow(SSdbRow *pRow) {
SSdbTable *pTable = pRow->pTable;
if (pTable == NULL) return TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE;
return sdbWriteRowToQueue(pRow, SDB_ACTION_INSERT);
}
int32_t sdbInsertRow(SSdbRow *pRow) {
SSdbTable *pTable = pRow->pTable;
if (pTable == NULL) return TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE;
......@@ -1138,3 +1151,46 @@ static void *sdbWorkerFp(void *pWorker) {
int32_t sdbGetReplicaNum() {
return tsSdbMgmt.cfg.replica;
}
int32_t mnodeCompactWal() {
sdbInfo("vgId:1, start compact mnode wal...");
// close old wal
walFsync(tsSdbMgmt.wal, true);
walClose(tsSdbMgmt.wal);
// reset version,then compacted wal log can start from version 1
tsSdbMgmt.version = 0;
// change wal to wal_tmp dir
SWalCfg walCfg = {.vgId = 1, .walLevel = TAOS_WAL_FSYNC, .keep = TAOS_WAL_KEEP, .fsyncPeriod = 0};
char temp[TSDB_FILENAME_LEN] = {0};
sprintf(temp, "%s/wal", tsMnodeTmpDir);
tsSdbMgmt.wal = walOpen(temp, &walCfg);
walRenew(tsSdbMgmt.wal);
// compact memory tables info to wal tmp dir
if (mnodeCompactComponents() != 0) {
tfsRmdir(tsMnodeTmpDir);
return -1;
}
// close wal
walFsync(tsSdbMgmt.wal, true);
walClose(tsSdbMgmt.wal);
// rename old wal to wal_bak
if (taosRename(tsMnodeDir, tsMnodeBakDir) != 0) {
return -1;
}
// rename wal_tmp to wal
if (taosRename(tsMnodeTmpDir, tsMnodeDir) != 0) {
return -1;
}
// del wal_tmp dir
sdbInfo("vgId:1, compact mnode wal success");
return 0;
}
\ No newline at end of file
......@@ -3342,3 +3342,65 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro
return numOfRows;
}
static int32_t mnodeCompactSuperTables() {
void *pIter = NULL;
SSTableObj *pTable = NULL;
mInfo("start to compact super table...");
while (1) {
pIter = mnodeGetNextSuperTable(pIter, &pTable);
if (pTable == NULL) break;
int32_t schemaSize = (pTable->numOfColumns + pTable->numOfTags) * sizeof(SSchema);
SSdbRow row = {
.type = SDB_OPER_GLOBAL,
.pTable = tsSuperTableSdb,
.pObj = pTable,
.rowSize = sizeof(SSTableObj) + schemaSize,
};
mInfo("compact super %" PRIu64, pTable->uid);
sdbInsertCompactRow(&row);
}
mInfo("end to compact super table...");
return 0;
}
static int32_t mnodeCompactChildTables() {
void *pIter = NULL;
SCTableObj *pTable = NULL;
mInfo("start to compact child table...");
while (1) {
pIter = mnodeGetNextChildTable(pIter, &pTable);
if (pTable == NULL) break;
SSdbRow row = {
.type = SDB_OPER_GLOBAL,
.pObj = pTable,
.pTable = tsChildTableSdb,
};
mInfo("compact child %" PRIu64 ":%d", pTable->uid, pTable->tid);
sdbInsertCompactRow(&row);
}
mInfo("end to compact child table...");
return 0;
}
int32_t mnodeCompactTables() {
mnodeCompactSuperTables();
mnodeCompactChildTables();
return 0;
}
\ No newline at end of file
......@@ -617,3 +617,30 @@ static int32_t mnodeProcessAuthMsg(SMnodeMsg *pMsg) {
return mnodeRetriveAuth(pAuthMsg->user, &pAuthRsp->spi, &pAuthRsp->encrypt, pAuthRsp->secret, pAuthRsp->ckey);
}
int32_t mnodeCompactUsers() {
void *pIter = NULL;
SUserObj *pUser = NULL;
mInfo("start to compact users table...");
while (1) {
pIter = mnodeGetNextUser(pIter, &pUser);
if (pUser == NULL) break;
SSdbRow row = {
.type = SDB_OPER_GLOBAL,
.pTable = tsUserSdb,
.pObj = pUser,
.rowSize = sizeof(SUserObj),
};
mInfo("compact users %s", pUser->user);
sdbInsertCompactRow(&row);
}
mInfo("end to compact users table...");
return 0;
}
\ No newline at end of file
......@@ -1302,3 +1302,30 @@ void mnodeSetVgidVer(int8_t *cver, uint64_t iver) {
cver[1] = (int8_t)((int32_t)(iver % 100000) / 100);
cver[2] = (int8_t)(iver % 100);
}
int32_t mnodeCompactVgroups() {
void *pIter = NULL;
SVgObj *pVgroup = NULL;
mInfo("start to compact vgroups table...");
while (1) {
pIter = mnodeGetNextVgroup(pIter, &pVgroup);
if (pVgroup == NULL) break;
SSdbRow row = {
.type = SDB_OPER_GLOBAL,
.pTable = tsVgroupSdb,
.pObj = pVgroup,
.rowSize = sizeof(SVgObj),
};
mInfo("compact vgroups %d", pVgroup->vgId);
sdbInsertCompactRow(&row);
}
mInfo("end to compact vgroups table...");
return 0;
}
\ No newline at end of file
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_OS_MIPS64_H
#define TDENGINE_OS_MIPS64_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#include <stdlib.h>
#include <argp.h>
#include <arpa/inet.h>
#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <endian.h>
#include <errno.h>
#include <float.h>
#include <ifaddrs.h>
#include <libgen.h>
#include <limits.h>
#include <locale.h>
#include <math.h>
#include <netdb.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <pthread.h>
#include <pwd.h>
#include <regex.h>
#include <semaphore.h>
#include <signal.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <strings.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <sys/file.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/sendfile.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/statvfs.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <syslog.h>
#include <termios.h>
#include <unistd.h>
#include <wchar.h>
#include <wordexp.h>
#include <wctype.h>
#include <inttypes.h>
#include <fcntl.h>
#include <sys/utsname.h>
#include <sys/resource.h>
#include <error.h>
#include <linux/sysctl.h>
#include <math.h>
#include <poll.h>
#ifdef __cplusplus
}
#endif
#endif
......@@ -62,7 +62,7 @@ static void* taosRandomRealloc(void* ptr, size_t size, const char* file, uint32_
static char* taosRandomStrdup(const char* str, const char* file, uint32_t line) {
size_t len = strlen(str);
return taosRandomAllocFail(len + 1, file, line) ? NULL : taosStrdupImp(str);
return taosRandomAllocFail(len + 1, file, line) ? NULL : tstrdup(str);
}
static char* taosRandomStrndup(const char* str, size_t size, const char* file, uint32_t line) {
......@@ -70,11 +70,11 @@ static char* taosRandomStrndup(const char* str, size_t size, const char* file, u
if (len > size) {
len = size;
}
return taosRandomAllocFail(len + 1, file, line) ? NULL : taosStrndupImp(str, len);
return taosRandomAllocFail(len + 1, file, line) ? NULL : tstrndup(str, len);
}
static ssize_t taosRandomGetline(char **lineptr, size_t *n, FILE *stream, const char* file, uint32_t line) {
return taosRandomAllocFail(*n, file, line) ? -1 : taosGetlineImp(lineptr, n, stream);
return taosRandomAllocFail(*n, file, line) ? -1 : tgetline(lineptr, n, stream);
}
////////////////////////////////////////////////////////////////////////////////
......@@ -242,7 +242,7 @@ static char* taosStrndupDetectLeak(const char* str, size_t size, const char* fil
static ssize_t taosGetlineDetectLeak(char **lineptr, size_t *n, FILE *stream, const char* file, uint32_t line) {
char* buf = NULL;
size_t bufSize = 0;
ssize_t size = taosGetlineImp(&buf, &bufSize, stream);
ssize_t size = tgetline(&buf, &bufSize, stream);
if (size != -1) {
if (*n < size + 1) {
void* p = taosReallocDetectLeak(*lineptr, size + 1, file, line);
......@@ -372,7 +372,7 @@ void taosFreeMem(void* ptr, const char* file, uint32_t line) {
char* taosStrdupMem(const char* str, const char* file, uint32_t line) {
switch (allocMode) {
case TAOS_ALLOC_MODE_DEFAULT:
return taosStrdupImp(str);
return tstrdup(str);
case TAOS_ALLOC_MODE_RANDOM_FAIL:
return taosRandomStrdup(str, file, line);
......@@ -380,13 +380,13 @@ char* taosStrdupMem(const char* str, const char* file, uint32_t line) {
case TAOS_ALLOC_MODE_DETECT_LEAK:
return taosStrdupDetectLeak(str, file, line);
}
return taosStrdupImp(str);
return tstrdup(str);
}
char* taosStrndupMem(const char* str, size_t size, const char* file, uint32_t line) {
switch (allocMode) {
case TAOS_ALLOC_MODE_DEFAULT:
return taosStrndupImp(str, size);
return tstrndup(str, size);
case TAOS_ALLOC_MODE_RANDOM_FAIL:
return taosRandomStrndup(str, size, file, line);
......@@ -394,13 +394,13 @@ char* taosStrndupMem(const char* str, size_t size, const char* file, uint32_t li
case TAOS_ALLOC_MODE_DETECT_LEAK:
return taosStrndupDetectLeak(str, size, file, line);
}
return taosStrndupImp(str, size);
return tstrndup(str, size);
}
ssize_t taosGetlineMem(char **lineptr, size_t *n, FILE *stream, const char* file, uint32_t line) {
switch (allocMode) {
case TAOS_ALLOC_MODE_DEFAULT:
return taosGetlineImp(lineptr, n, stream);
return tgetline(lineptr, n, stream);
case TAOS_ALLOC_MODE_RANDOM_FAIL:
return taosRandomGetline(lineptr, n, stream, file, line);
......@@ -408,7 +408,7 @@ ssize_t taosGetlineMem(char **lineptr, size_t *n, FILE *stream, const char* file
case TAOS_ALLOC_MODE_DETECT_LEAK:
return taosGetlineDetectLeak(lineptr, n, stream, file, line);
}
return taosGetlineImp(lineptr, n, stream);
return tgetline(lineptr, n, stream);
}
static void taosCloseAllocLog() {
......@@ -517,4 +517,4 @@ void* taosTZfree(void* ptr) {
free((void*)((char*)ptr - sizeof(size_t)));
}
return NULL;
}
\ No newline at end of file
}
......@@ -25,14 +25,14 @@
typedef void (*FLinuxSignalHandler)(int32_t signum, siginfo_t *sigInfo, void *context);
void taosSetSignal(int32_t signum, FSignalHandler sigfp) {
struct sigaction act = {{0}};
struct sigaction act; memset(&act, 0, sizeof(act));
#if 1
act.sa_flags = SA_SIGINFO;
act.sa_sigaction = (FLinuxSignalHandler)sigfp;
#else
act.sa_handler = sigfp;
#endif
sigaction(signum, &act, NULL);
#else
act.sa_handler = sigfp;
#endif
sigaction(signum, &act, NULL);
}
void taosIgnSignal(int32_t signum) {
......
......@@ -14,6 +14,7 @@
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "taosdef.h"
#include "tglobal.h"
......@@ -24,7 +25,7 @@
bool taosCheckPthreadValid(pthread_t thread) { return thread.p != NULL; }
void taosResetPthread(pthread_t *thread) { thread->p = 0; }
void taosResetPthread(pthread_t* thread) { thread->p = 0; }
int64_t taosGetPthreadId(pthread_t thread) {
#ifdef PTW32_VERSION
......@@ -34,27 +35,24 @@ int64_t taosGetPthreadId(pthread_t thread) {
#endif
}
int64_t taosGetSelfPthreadId() {
return GetCurrentThreadId();
}
int64_t taosGetSelfPthreadId() { return GetCurrentThreadId(); }
bool taosComparePthread(pthread_t first, pthread_t second) {
return first.p == second.p;
}
bool taosComparePthread(pthread_t first, pthread_t second) { return first.p == second.p; }
int32_t taosGetPId() {
return GetCurrentProcessId();
}
int32_t taosGetPId() { return GetCurrentProcessId(); }
int32_t taosGetCurrentAPPName(char *name, int32_t* len) {
int32_t taosGetCurrentAPPName(char* name, int32_t* len) {
char filepath[1024] = {0};
GetModuleFileName(NULL, filepath, MAX_PATH);
*strrchr(filepath,'.') = '\0';
char* sub = strrchr(filepath, '.');
if (sub != NULL) {
*sub = '\0';
}
strcpy(name, filepath);
if (len != NULL) {
*len = (int32_t) strlen(filepath);
*len = (int32_t)strlen(filepath);
}
return 0;
......
......@@ -417,3 +417,13 @@ void monExecuteSQL(char *sql) {
monDebug("execute sql:%s", sql);
taos_query_a(tsMonitor.conn, sql, monExecSqlCb, "sql");
}
void monExecuteSQLWithResultCallback(char *sql, MonExecuteSQLCbFP callback, void* param) {
if (tsMonitor.conn == NULL) {
callback(param, NULL, TSDB_CODE_MON_CONNECTION_INVALID);
return;
}
monDebug("execute sql:%s", sql);
taos_query_a(tsMonitor.conn, sql, callback, param);
}
......@@ -40,7 +40,7 @@ typedef struct SHeapEntry {
} SHeapEntry;
typedef struct SHistogramInfo {
int32_t numOfElems;
int64_t numOfElems;
int32_t numOfEntries;
int32_t maxEntries;
double min;
......
......@@ -446,7 +446,7 @@ void tHistogramDestroy(SHistogramInfo** pHisto) {
}
void tHistogramPrint(SHistogramInfo* pHisto) {
printf("total entries: %d, elements: %d\n", pHisto->numOfEntries, pHisto->numOfElems);
printf("total entries: %d, elements: %"PRId64 "\n", pHisto->numOfEntries, pHisto->numOfElems);
#if defined(USE_ARRAYLIST)
for (int32_t i = 0; i < pHisto->numOfEntries; ++i) {
printf("%d: (%f, %" PRId64 ")\n", i + 1, pHisto->elems[i].val, pHisto->elems[i].num);
......
......@@ -18,7 +18,7 @@
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef _TD_ARM_
#if !defined(_TD_ARM_) && !defined(_TD_MIPS_)
#include <nmmintrin.h>
#endif
......
......@@ -120,12 +120,14 @@ int32_t vnodeDrop(int32_t vgId) {
vDebug("vgId:%d, failed to drop, vnode not find", vgId);
return TSDB_CODE_VND_INVALID_VGROUP_ID;
}
if (pVnode->dropped) {
vnodeRelease(pVnode);
return TSDB_CODE_SUCCESS;
}
vInfo("vgId:%d, vnode will be dropped, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
pVnode->dropped = 1;
// remove from hash, so new messages wont be consumed
vnodeRemoveFromHash(pVnode);
vnodeRelease(pVnode);
vnodeCleanupInMWorker(pVnode);
......@@ -425,6 +427,10 @@ int32_t vnodeOpen(int32_t vgId) {
int32_t vnodeClose(int32_t vgId) {
SVnodeObj *pVnode = vnodeAcquire(vgId);
if (pVnode == NULL) return 0;
if (pVnode->dropped) {
vnodeRelease(pVnode);
return 0;
}
vDebug("vgId:%d, vnode will be closed, pVnode:%p", pVnode->vgId, pVnode);
vnodeRemoveFromHash(pVnode);
......@@ -510,6 +516,8 @@ void vnodeCleanUp(SVnodeObj *pVnode) {
vnodeSetClosingStatus(pVnode);
vnodeRemoveFromHash(pVnode);
// stop replication module
if (pVnode->sync > 0) {
int64_t sync = pVnode->sync;
......
......@@ -117,14 +117,17 @@ static SVReadMsg *vnodeBuildVReadMsg(SVnodeObj *pVnode, void *pCont, int32_t con
}
int32_t vnodeWriteToRQueue(void *vparam, void *pCont, int32_t contLen, int8_t qtype, void *rparam) {
SVnodeObj *pVnode = vparam;
if (pVnode->dropped) {
return TSDB_CODE_APP_NOT_READY;
}
SVReadMsg *pRead = vnodeBuildVReadMsg(vparam, pCont, contLen, qtype, rparam);
if (pRead == NULL) {
assert(terrno != 0);
return terrno;
}
SVnodeObj *pVnode = vparam;
int32_t code = vnodeCheckRead(pVnode);
if (code != TSDB_CODE_SUCCESS) {
taosFreeQitem(pRead);
......
......@@ -66,6 +66,9 @@ static bool vnodeSetClosingStatusImp(SVnodeObj* pVnode) {
}
bool vnodeSetClosingStatus(SVnodeObj* pVnode) {
if (pVnode->status == TAOS_VN_STATUS_CLOSING)
return true;
while (!vnodeSetClosingStatusImp(pVnode)) {
taosMsleep(1);
}
......
......@@ -55,6 +55,11 @@ void vnodeNotifyRole(int32_t vgId, int8_t role) {
vTrace("vgId:%d, vnode not found while notify role", vgId);
return;
}
if (pVnode->dropped) {
vTrace("vgId:%d, vnode dropped while notify role", vgId);
vnodeRelease(pVnode);
return;
}
vInfo("vgId:%d, sync role changed from %s to %s", pVnode->vgId, syncRole[pVnode->role], syncRole[role]);
pVnode->role = role;
......@@ -75,6 +80,11 @@ void vnodeCtrlFlow(int32_t vgId, int32_t level) {
vTrace("vgId:%d, vnode not found while flow ctrl", vgId);
return;
}
if (pVnode->dropped) {
vTrace("vgId:%d, vnode dropped while flow ctrl", vgId);
vnodeRelease(pVnode);
return;
}
if (pVnode->flowctrlLevel != level) {
vDebug("vgId:%d, set flowctrl level from %d to %d", pVnode->vgId, pVnode->flowctrlLevel, level);
......@@ -129,6 +139,7 @@ int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rpara
SVnodeObj *pVnode = vnodeAcquire(vgId);
if (pVnode == NULL) {
vError("vgId:%d, vnode not found while write to cache", vgId);
vnodeRelease(pVnode);
return TSDB_CODE_VND_INVALID_VGROUP_ID;
}
......
......@@ -340,8 +340,11 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
if (pWrite->processedCount >= 100) {
vError("vgId:%d, msg:%p, failed to process since %s, retry:%d", pVnode->vgId, pWrite, tstrerror(code),
pWrite->processedCount);
pWrite->processedCount = 1;
dnodeSendRpcVWriteRsp(pWrite->pVnode, pWrite, code);
void *handle = pWrite->rpcMsg.handle;
taosFreeQitem(pWrite);
vnodeRelease(pVnode);
SRpcMsg rpcRsp = {.handle = handle, .code = code};
rpcSendResponse(&rpcRsp);
} else {
code = vnodePerformFlowCtrl(pWrite);
if (code == 0) {
......@@ -386,4 +389,6 @@ void vnodeWaitWriteCompleted(SVnodeObj *pVnode) {
vTrace("vgId:%d, queued wmsg num:%d", pVnode->vgId, pVnode->queuedWMsg);
taosMsleep(10);
}
taosMsleep(900);
}
......@@ -37,7 +37,7 @@ pipeline {
stage('Parallel test stage') {
parallel {
stage('pytest') {
agent{label '184'}
agent{label 'slad1'}
steps {
pre_test()
sh '''
......@@ -62,7 +62,7 @@ pipeline {
}
stage('test_crash_gen') {
agent{label "185"}
agent{label "slad2"}
steps {
pre_test()
sh '''
......@@ -149,7 +149,7 @@ pipeline {
}
stage('test_valgrind') {
agent{label "186"}
agent{label "slad3"}
steps {
pre_test()
......
......@@ -74,7 +74,7 @@ function runQueryPerfTest {
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT
python3 perfbenchmark/joinPerformance.py | tee -a $PERFORMANCE_TEST_REPORT
#python3 perfbenchmark/joinPerformance.py | tee -a $PERFORMANCE_TEST_REPORT
}
......
......@@ -104,7 +104,7 @@ class taosdemoPerformace:
return output
def insertData(self):
os.system("taosdemo -f %s > taosdemoperf.txt" % self.generateJson())
os.system("taosdemo -f %s > taosdemoperf.txt 2>&1" % self.generateJson())
self.createTableTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'")
self.insertRecordsTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'")
self.recordsPerSecond = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'")
......
......@@ -23,32 +23,32 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
if "community" in selfPath:
projPath = selfPath[: selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
projPath = selfPath[: selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
if "taosd" in files:
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
if "packaging" not in rootRealPath:
buildPath = root[: len(root) - len("/build/bin")]
break
return buildPath
def run(self):
tdSql.prepare()
buildPath = self.getBuildPath()
if (buildPath == ""):
if buildPath == "":
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
os.system("yes | %staosdemo -f tools/insert.json" % binPath)
binPath = buildPath + "/build/bin/"
os.system("%staosdemo -f tools/insert.json -y" % binPath)
tdSql.execute("use db01")
tdSql.query("select count(*) from stb01")
......
......@@ -37,7 +37,7 @@ typedef struct {
//void taosMsleep(int mseconds);
int g_rows = 0;
int g_runTimes = 5;
unsigned long long getCurrentTime(){
......@@ -1336,7 +1336,7 @@ static void checkResult(TAOS *taos, char *tname, int printr, int expected) {
result = taos_query(taos, sql);
int code = taos_errno(result);
if (code != 0) {
printf("failed to query table, reason:%s\n", taos_errstr(result));
printf("failed to query table: %s, reason:%s\n", tname, taos_errstr(result));
taos_free_result(result);
return;
}
......@@ -1358,9 +1358,9 @@ static void checkResult(TAOS *taos, char *tname, int printr, int expected) {
}
if (rows == expected) {
printf("%d rows are fetched as expectation\n", rows);
printf("%d rows are fetched as expectation from %s\n", rows, tname);
} else {
printf("!!!expect %d rows, but %d rows are fetched\n", expected, rows);
printf("!!!expect %d rows, but %d rows are fetched from %s\n", expected, rows, tname);
return;
}
......@@ -1451,15 +1451,52 @@ static void prepareV_long(TAOS *taos, int schemaCase, int tableNum, int lenO
//void runcase(TAOS *taos, int idx) {
static void* runCase(void *para) {
ThreadInfo* tInfo = (ThreadInfo *)para;
TAOS *taos = tInfo->taos;
int idx = tInfo->idx;
static void prepareVcolumn(TAOS *taos, int schemaCase, int tableNum, int lenOfBinaryDef, char* dbName) {
TAOS_RES *result;
int code;
char sqlstr[1024] = {0};
sprintf(sqlstr, "drop database if exists %s;", dbName);
result = taos_query(taos, sqlstr);
taos_free_result(result);
sprintf(sqlstr, "create database %s;", dbName);
result = taos_query(taos, sqlstr);
code = taos_errno(result);
if (code != 0) {
printf("failed to create database, reason:%s\n", taos_errstr(result));
taos_free_result(result);
return;
}
taos_free_result(result);
sprintf(sqlstr, "use %s;", dbName);
result = taos_query(taos, sqlstr);
taos_free_result(result);
TAOS_STMT *stmt = NULL;
// create table
for (int i = 0 ; i < tableNum; i++) {
char buf[1024];
if (schemaCase) {
sprintf(buf, "create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp)", i, lenOfBinaryDef, lenOfBinaryDef) ;
} else {
sprintf(buf, "create table m%d (ts timestamp, b int)", i) ;
}
result = taos_query(taos, buf);
code = taos_errno(result);
if (code != 0) {
printf("failed to create table, reason:%s\n", taos_errstr(result));
taos_free_result(result);
return;
}
taos_free_result(result);
}
}
(void)idx;
//void runcase(TAOS *taos, int idx) {
static void runCase(TAOS *taos) {
TAOS_STMT *stmt = NULL;
int tableNum;
int lenOfBinaryDef;
......@@ -1975,7 +2012,7 @@ static void* runCase(void *para) {
}
#endif
return NULL;
return ;
}
......@@ -2166,14 +2203,8 @@ static int stmt_bind_case_001_long(TAOS_STMT *stmt, int tableNum, int rowsOfPerC
return 0;
}
static void* runCase_long(void *para) {
ThreadInfo* tInfo = (ThreadInfo *)para;
TAOS *taos = tInfo->taos;
int idx = tInfo->idx;
static void runCase_long(TAOS *taos) {
TAOS_STMT *stmt = NULL;
(void)idx;
int tableNum;
int lenOfBinaryDef;
......@@ -2228,7 +2259,7 @@ static void* runCase_long(void *para) {
prepareV_long(taos, 1, tableNum, lenOfBinaryDef);
totalRowsPerTbl = 0;
for (int i = 0; i < 30000; i++) {
for (int i = 0; i < g_runTimes; i++) {
stmt = taos_stmt_init(taos);
stmt_bind_case_001_long(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum, &startTs);
......@@ -2245,61 +2276,955 @@ static void* runCase_long(void *para) {
}
#endif
return NULL;
return;
}
/*=======================*/
/*
test scene: insert into tb1 (ts,f1) values (?,?)
*/
static int stmt_specifyCol_bind_case_001(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) {
sampleValue* v = (sampleValue *)calloc(1, sizeof(sampleValue));
int totalRowsPerTbl = rowsOfPerColum * bingNum;
int main(int argc, char *argv[])
{
TAOS *taos;
char host[32] = "127.0.0.1";
char* serverIp = NULL;
int threadNum = 2;
v->ts = (int64_t *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * tableNum));
v->br = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef));
v->nr = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef));
// connect to server
if (argc == 1) {
serverIp = host;
} else if (argc == 2) {
serverIp = argv[1];
} else if (argc == 3) {
serverIp = argv[1];
threadNum = atoi(argv[2]);
} else if (argc == 4) {
serverIp = argv[1];
threadNum = atoi(argv[2]);
g_rows = atoi(argv[3]);
int *lb = (int *)malloc(MAX_ROWS_OF_PER_COLUMN * sizeof(int));
TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * (size_t)(bingNum * columnNum * (tableNum+1) * rowsOfPerColum));
char* is_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN);
char* no_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN);
int64_t tts = 1591060628000;
for (int i = 0; i < rowsOfPerColum; ++i) {
lb[i] = lenOfBinaryAct;
no_null[i] = 0;
is_null[i] = (i % 10 == 2) ? 1 : 0;
v->b[i] = (int8_t)(i % 2);
v->v1[i] = (int8_t)((i+1) % 2);
v->v2[i] = (int16_t)i;
v->v4[i] = (int32_t)(i+1);
v->v8[i] = (int64_t)(i+2);
v->f4[i] = (float)(i+3);
v->f8[i] = (double)(i+4);
char tbuf[MAX_BINARY_DEF_LEN];
memset(tbuf, 0, MAX_BINARY_DEF_LEN);
sprintf(tbuf, "binary-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10);
memcpy(v->br + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct);
memset(tbuf, 0, MAX_BINARY_DEF_LEN);
sprintf(tbuf, "nchar-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10);
memcpy(v->nr + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct);
v->ts2[i] = tts + i;
}
printf("server:%s, threadNum:%d, rows:%d\n\n", serverIp, threadNum, g_rows);
int i = 0;
for (int j = 0; j < bingNum * tableNum; j++) {
params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
params[i+0].buffer_length = sizeof(int64_t);
params[i+0].buffer = &v->ts[j*rowsOfPerColum];
params[i+0].length = NULL;
params[i+0].is_null = no_null;
params[i+0].num = rowsOfPerColum;
params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL;
params[i+1].buffer_length = sizeof(int8_t);
params[i+1].buffer = v->b;
params[i+1].length = NULL;
params[i+1].is_null = is_null;
params[i+1].num = rowsOfPerColum;
pthread_t *pThreadList = (pthread_t *) calloc(sizeof(pthread_t), (size_t)threadNum);
ThreadInfo* threadInfo = (ThreadInfo *) calloc(sizeof(ThreadInfo), (size_t)threadNum);
params[i+2].buffer_type = TSDB_DATA_TYPE_INT;
params[i+2].buffer_length = sizeof(int32_t);
params[i+2].buffer = v->v4;
params[i+2].length = NULL;
params[i+2].is_null = is_null;
params[i+2].num = rowsOfPerColum;
ThreadInfo* tInfo = threadInfo;
for (int i = 0; i < threadNum; i++) {
taos = taos_connect(serverIp, "root", "taosdata", NULL, 0);
if (taos == NULL) {
printf("failed to connect to TDengine, reason:%s\n", taos_errstr(taos));
return -1;
}
params[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT;
params[i+3].buffer_length = sizeof(float);
params[i+3].buffer = v->f4;
params[i+3].length = NULL;
params[i+3].is_null = is_null;
params[i+3].num = rowsOfPerColum;
tInfo->taos = taos;
tInfo->idx = i;
if (0 == i) {
pthread_create(&(pThreadList[0]), NULL, runCase, (void *)tInfo);
} else if (1 == i){
pthread_create(&(pThreadList[0]), NULL, runCase_long, (void *)tInfo);
params[i+4].buffer_type = TSDB_DATA_TYPE_BINARY;
params[i+4].buffer_length = (uintptr_t)lenOfBinaryDef;
params[i+4].buffer = v->br;
params[i+4].length = lb;
params[i+4].is_null = is_null;
params[i+4].num = rowsOfPerColum;
i+=columnNum;
}
//int64_t tts = 1591060628000;
for (int i = 0; i < totalRowsPerTbl * tableNum; ++i) {
v->ts[i] = tts + i;
}
unsigned long long starttime = getCurrentTime();
// create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp)
char *sql = "insert into m0 (ts,b,v4,f4,br) values(?,?,?,?,?)";
int code = taos_stmt_prepare(stmt, sql, 0);
if (code != 0){
printf("failed to execute taos_stmt_prepare. code:0x%x[%s]\n", code, tstrerror(code));
return -1;
}
int id = 0;
for (int l = 0; l < bingNum; l++) {
for (int zz = 0; zz < tableNum; zz++) {
//char buf[32];
//sprintf(buf, "m%d", zz);
//code = taos_stmt_set_tbname(stmt, buf);
//if (code != 0){
// printf("failed to execute taos_stmt_set_tbname. code:0x%x[%s]\n", code, tstrerror(code));
// return -1;
//}
for (int col=0; col < columnNum; ++col) {
code = taos_stmt_bind_single_param_batch(stmt, params + id, col);
if (code != 0){
printf("failed to execute taos_stmt_bind_single_param_batch. code:0x%x[%s]\n", code, tstrerror(code));
return -1;
}
id++;
}
code = taos_stmt_add_batch(stmt);
if (code != 0) {
printf("failed to execute taos_stmt_add_batch. code:0x%x[%s]\n", code, tstrerror(code));
return -1;
}
}
code = taos_stmt_execute(stmt);
if (code != 0) {
printf("failed to execute taos_stmt_execute. code:0x%x[%s]\n", code, tstrerror(code));
return -1;
}
tInfo++;
}
for (int i = 0; i < threadNum; i++) {
pthread_join(pThreadList[i], NULL);
unsigned long long endtime = getCurrentTime();
unsigned long long totalRows = (uint32_t)(totalRowsPerTbl * tableNum);
printf("insert total %d records, used %u seconds, avg:%u useconds per record\n", totalRows, (endtime-starttime)/1000000UL, (endtime-starttime)/totalRows);
free(v->ts);
free(v->br);
free(v->nr);
free(v);
free(lb);
free(params);
free(is_null);
free(no_null);
return 0;
}
/*=======================*/
/*
test scene: insert into ? (ts,f1) values (?,?)
*/
static int stmt_specifyCol_bind_case_002(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) {
sampleValue* v = (sampleValue *)calloc(1, sizeof(sampleValue));
int totalRowsPerTbl = rowsOfPerColum * bingNum;
v->ts = (int64_t *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * tableNum));
v->br = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef));
v->nr = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef));
int *lb = (int *)malloc(MAX_ROWS_OF_PER_COLUMN * sizeof(int));
TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * (size_t)(bingNum * columnNum * (tableNum+1) * rowsOfPerColum));
char* is_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN);
char* no_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN);
int64_t tts = 1591060628000;
for (int i = 0; i < rowsOfPerColum; ++i) {
lb[i] = lenOfBinaryAct;
no_null[i] = 0;
is_null[i] = (i % 10 == 2) ? 1 : 0;
v->b[i] = (int8_t)(i % 2);
v->v1[i] = (int8_t)((i+1) % 2);
v->v2[i] = (int16_t)i;
v->v4[i] = (int32_t)(i+1);
v->v8[i] = (int64_t)(i+2);
v->f4[i] = (float)(i+3);
v->f8[i] = (double)(i+4);
char tbuf[MAX_BINARY_DEF_LEN];
memset(tbuf, 0, MAX_BINARY_DEF_LEN);
sprintf(tbuf, "binary-%d", i%10);
memcpy(v->br + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct);
memset(tbuf, 0, MAX_BINARY_DEF_LEN);
sprintf(tbuf, "nchar-%d", i%10);
memcpy(v->nr + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct);
v->ts2[i] = tts + i;
}
free(pThreadList);
free(threadInfo);
int i = 0;
for (int j = 0; j < bingNum * tableNum; j++) {
params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
params[i+0].buffer_length = sizeof(int64_t);
params[i+0].buffer = &v->ts[j*rowsOfPerColum];
params[i+0].length = NULL;
params[i+0].is_null = no_null;
params[i+0].num = rowsOfPerColum;
params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL;
params[i+1].buffer_length = sizeof(int8_t);
params[i+1].buffer = v->b;
params[i+1].length = NULL;
params[i+1].is_null = is_null;
params[i+1].num = rowsOfPerColum;
params[i+2].buffer_type = TSDB_DATA_TYPE_INT;
params[i+2].buffer_length = sizeof(int32_t);
params[i+2].buffer = v->v4;
params[i+2].length = NULL;
params[i+2].is_null = is_null;
params[i+2].num = rowsOfPerColum;
params[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT;
params[i+3].buffer_length = sizeof(float);
params[i+3].buffer = v->f4;
params[i+3].length = NULL;
params[i+3].is_null = is_null;
params[i+3].num = rowsOfPerColum;
params[i+4].buffer_type = TSDB_DATA_TYPE_BINARY;
params[i+4].buffer_length = (uintptr_t)lenOfBinaryDef;
params[i+4].buffer = v->br;
params[i+4].length = lb;
params[i+4].is_null = is_null;
params[i+4].num = rowsOfPerColum;
i+=columnNum;
}
//int64_t tts = 1591060628000;
for (int i = 0; i < totalRowsPerTbl * tableNum; ++i) {
v->ts[i] = tts + i;
}
unsigned long long starttime = getCurrentTime();
// create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp)
char *sql = "insert into ? (ts,b,v4,f4,br) values(?,?,?,?,?)";
int code = taos_stmt_prepare(stmt, sql, 0);
if (code != 0){
printf("failed to execute taos_stmt_prepare. code:0x%x[%s]\n", code, tstrerror(code));
return -1;
}
int id = 0;
for (int l = 0; l < bingNum; l++) {
for (int zz = 0; zz < tableNum; zz++) {
char buf[32];
sprintf(buf, "m%d", zz);
code = taos_stmt_set_tbname(stmt, buf);
if (code != 0){
printf("failed to execute taos_stmt_set_tbname. code:0x%x[%s]\n", code, tstrerror(code));
return -1;
}
for (int col=0; col < columnNum; ++col) {
code = taos_stmt_bind_single_param_batch(stmt, params + id, col);
if (code != 0){
printf("failed to execute taos_stmt_bind_single_param_batch. code:0x%x[%s]\n", code, tstrerror(code));
return -1;
}
id++;
}
code = taos_stmt_add_batch(stmt);
if (code != 0) {
printf("failed to execute taos_stmt_add_batch. code:0x%x[%s]\n", code, tstrerror(code));
return -1;
}
}
code = taos_stmt_execute(stmt);
if (code != 0) {
printf("failed to execute taos_stmt_execute. code:0x%x[%s]\n", code, tstrerror(code));
return -1;
}
}
unsigned long long endtime = getCurrentTime();
unsigned long long totalRows = (uint32_t)(totalRowsPerTbl * tableNum);
printf("insert total %d records, used %u seconds, avg:%u useconds per record\n", totalRows, (endtime-starttime)/1000000UL, (endtime-starttime)/totalRows);
free(v->ts);
free(v->br);
free(v->nr);
free(v);
free(lb);
free(params);
free(is_null);
free(no_null);
return 0;
}
/*=======================*/
/*
test scene: insert into tb1 (ts,f1) values (?,?)
*/
static int stmt_specifyCol_bind_case_001_maxRows(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) {
sampleValue* v = (sampleValue *)calloc(1, sizeof(sampleValue));
int totalRowsPerTbl = rowsOfPerColum * bingNum;
v->ts = (int64_t *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * tableNum));
v->br = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef));
v->nr = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef));
int *lb = (int *)malloc(MAX_ROWS_OF_PER_COLUMN * sizeof(int));
TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * (size_t)(bingNum * columnNum * (tableNum+1) * rowsOfPerColum));
char* is_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN);
char* no_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN);
int64_t tts = 1591060628000;
for (int i = 0; i < rowsOfPerColum; ++i) {
lb[i] = lenOfBinaryAct;
no_null[i] = 0;
is_null[i] = (i % 10 == 2) ? 1 : 0;
v->b[i] = (int8_t)(i % 2);
v->v1[i] = (int8_t)((i+1) % 2);
v->v2[i] = (int16_t)i;
v->v4[i] = (int32_t)(i+1);
v->v8[i] = (int64_t)(i+2);
v->f4[i] = (float)(i+3);
v->f8[i] = (double)(i+4);
char tbuf[MAX_BINARY_DEF_LEN];
memset(tbuf, 0, MAX_BINARY_DEF_LEN);
sprintf(tbuf, "binary-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10);
memcpy(v->br + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct);
memset(tbuf, 0, MAX_BINARY_DEF_LEN);
sprintf(tbuf, "nchar-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10);
memcpy(v->nr + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct);
v->ts2[i] = tts + i;
}
int i = 0;
for (int j = 0; j < bingNum * tableNum; j++) {
params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
params[i+0].buffer_length = sizeof(int64_t);
params[i+0].buffer = &v->ts[j*rowsOfPerColum];
params[i+0].length = NULL;
params[i+0].is_null = no_null;
params[i+0].num = rowsOfPerColum;
params[i+1].buffer_type = TSDB_DATA_TYPE_INT;
params[i+1].buffer_length = sizeof(int32_t);
params[i+1].buffer = v->v4;
params[i+1].length = NULL;
params[i+1].is_null = is_null;
params[i+1].num = rowsOfPerColum;
i+=columnNum;
}
//int64_t tts = 1591060628000;
for (int i = 0; i < totalRowsPerTbl * tableNum; ++i) {
v->ts[i] = tts + i;
}
unsigned long long starttime = getCurrentTime();
// create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp)
char *sql = "insert into m0 (ts,b,v4,f4,br) values(?,?,?,?,?)";
int code = taos_stmt_prepare(stmt, sql, 0);
if (code != 0){
printf("failed to execute taos_stmt_prepare. code:0x%x[%s]\n", code, tstrerror(code));
return -1;
}
int id = 0;
for (int l = 0; l < bingNum; l++) {
for (int zz = 0; zz < tableNum; zz++) {
//char buf[32];
//sprintf(buf, "m%d", zz);
//code = taos_stmt_set_tbname(stmt, buf);
//if (code != 0){
// printf("failed to execute taos_stmt_set_tbname. code:0x%x[%s]\n", code, tstrerror(code));
// return -1;
//}
for (int col=0; col < columnNum; ++col) {
code = taos_stmt_bind_single_param_batch(stmt, params + id, col);
if (code != 0){
printf("failed to execute taos_stmt_bind_single_param_batch. code:0x%x[%s]\n", code, tstrerror(code));
return -1;
}
id++;
}
code = taos_stmt_add_batch(stmt);
if (code != 0) {
printf("failed to execute taos_stmt_add_batch. code:0x%x[%s]\n", code, tstrerror(code));
return -1;
}
}
code = taos_stmt_execute(stmt);
if (code != 0) {
printf("failed to execute taos_stmt_execute. code:0x%x[%s]\n", code, tstrerror(code));
return -1;
}
}
unsigned long long endtime = getCurrentTime();
unsigned long long totalRows = (uint32_t)(totalRowsPerTbl * tableNum);
printf("insert total %d records, used %u seconds, avg:%u useconds per record\n", totalRows, (endtime-starttime)/1000000UL, (endtime-starttime)/totalRows);
free(v->ts);
free(v->br);
free(v->nr);
free(v);
free(lb);
free(params);
free(is_null);
free(no_null);
return 0;
}
static void SpecifyColumnBatchCase(TAOS *taos) {
TAOS_STMT *stmt = NULL;
int tableNum;
int lenOfBinaryDef;
int rowsOfPerColum;
int bingNum;
int lenOfBinaryAct;
int columnNum;
int totalRowsPerTbl;
//=======================================================================//
//=============================== single table ==========================//
//========== case 1: ======================//
#if 1
{
stmt = taos_stmt_init(taos);
tableNum = 1;
rowsOfPerColum = 1;
bingNum = 1;
lenOfBinaryDef = 40;
lenOfBinaryAct = 8;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db1");
stmt_specifyCol_bind_case_001(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 1 check result end\n\n");
}
#endif
//========== case 2: ======================//
#if 1
{
stmt = taos_stmt_init(taos);
tableNum = 1;
rowsOfPerColum = 5;
bingNum = 1;
lenOfBinaryDef = 1000;
lenOfBinaryAct = 15;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db2");
stmt_specifyCol_bind_case_001(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
//checkResult(taos, "m1", 0, totalRowsPerTbl);
//checkResult(taos, "m2", 0, totalRowsPerTbl);
//checkResult(taos, "m3", 0, totalRowsPerTbl);
//checkResult(taos, "m4", 0, totalRowsPerTbl);
//checkResult(taos, "m5", 0, totalRowsPerTbl);
//checkResult(taos, "m6", 0, totalRowsPerTbl);
//checkResult(taos, "m7", 0, totalRowsPerTbl);
//checkResult(taos, "m8", 0, totalRowsPerTbl);
//checkResult(taos, "m9", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 2 check result end\n\n");
}
#endif
//========== case 2-1: ======================//
#if 1
{
stmt = taos_stmt_init(taos);
tableNum = 1;
rowsOfPerColum = 32767;
bingNum = 1;
lenOfBinaryDef = 1000;
lenOfBinaryAct = 15;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db2_1");
stmt_specifyCol_bind_case_001(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
//checkResult(taos, "m1", 0, totalRowsPerTbl);
//checkResult(taos, "m2", 0, totalRowsPerTbl);
//checkResult(taos, "m3", 0, totalRowsPerTbl);
//checkResult(taos, "m4", 0, totalRowsPerTbl);
//checkResult(taos, "m5", 0, totalRowsPerTbl);
//checkResult(taos, "m6", 0, totalRowsPerTbl);
//checkResult(taos, "m7", 0, totalRowsPerTbl);
//checkResult(taos, "m8", 0, totalRowsPerTbl);
//checkResult(taos, "m9", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 2-1 check result end\n\n");
}
#endif
//========== case 2-2: ======================//
#if 1
{
printf("====case 2-2 error test start\n");
stmt = taos_stmt_init(taos);
tableNum = 1;
rowsOfPerColum = 32768;
bingNum = 1;
lenOfBinaryDef = 1000;
lenOfBinaryAct = 15;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db2_2");
stmt_specifyCol_bind_case_001(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
//checkResult(taos, "m1", 0, totalRowsPerTbl);
//checkResult(taos, "m2", 0, totalRowsPerTbl);
//checkResult(taos, "m3", 0, totalRowsPerTbl);
//checkResult(taos, "m4", 0, totalRowsPerTbl);
//checkResult(taos, "m5", 0, totalRowsPerTbl);
//checkResult(taos, "m6", 0, totalRowsPerTbl);
//checkResult(taos, "m7", 0, totalRowsPerTbl);
//checkResult(taos, "m8", 0, totalRowsPerTbl);
//checkResult(taos, "m9", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("====case 2-2 check result end\n\n");
}
#endif
//========== case 3: ======================//
#if 1
{
stmt = taos_stmt_init(taos);
tableNum = 1;
rowsOfPerColum = 1;
bingNum = 5;
lenOfBinaryDef = 1000;
lenOfBinaryAct = 20;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db3");
stmt_specifyCol_bind_case_001(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
//checkResult(taos, "m1", 0, totalRowsPerTbl);
//checkResult(taos, "m2", 0, totalRowsPerTbl);
//checkResult(taos, "m3", 0, totalRowsPerTbl);
//checkResult(taos, "m4", 0, totalRowsPerTbl);
//checkResult(taos, "m5", 0, totalRowsPerTbl);
//checkResult(taos, "m6", 0, totalRowsPerTbl);
//checkResult(taos, "m7", 0, totalRowsPerTbl);
//checkResult(taos, "m8", 0, totalRowsPerTbl);
//checkResult(taos, "m9", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 3 check result end\n\n");
}
#endif
//========== case 4: ======================//
#if 1
{
stmt = taos_stmt_init(taos);
tableNum = 1;
rowsOfPerColum = 5;
bingNum = 5;
lenOfBinaryDef = 1000;
lenOfBinaryAct = 33;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db4");
stmt_specifyCol_bind_case_001(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
//checkResult(taos, "m1", 0, totalRowsPerTbl);
//checkResult(taos, "m2", 0, totalRowsPerTbl);
//checkResult(taos, "m3", 0, totalRowsPerTbl);
//checkResult(taos, "m4", 0, totalRowsPerTbl);
//checkResult(taos, "m5", 0, totalRowsPerTbl);
//checkResult(taos, "m6", 0, totalRowsPerTbl);
//checkResult(taos, "m7", 0, totalRowsPerTbl);
//checkResult(taos, "m8", 0, totalRowsPerTbl);
//checkResult(taos, "m9", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 4 check result end\n\n");
}
#endif
//=======================================================================//
//=============================== multi tables ==========================//
//========== case 5: ======================//
#if 1
{
stmt = taos_stmt_init(taos);
tableNum = 5;
rowsOfPerColum = 1;
bingNum = 1;
lenOfBinaryDef = 40;
lenOfBinaryAct = 16;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db5");
stmt_specifyCol_bind_case_002(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
checkResult(taos, "m1", 0, totalRowsPerTbl);
checkResult(taos, "m2", 0, totalRowsPerTbl);
checkResult(taos, "m3", 0, totalRowsPerTbl);
checkResult(taos, "m4", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 5 check result end\n\n");
}
#endif
//========== case 6: ======================//
#if 1
{
stmt = taos_stmt_init(taos);
tableNum = 5;
rowsOfPerColum = 5;
bingNum = 1;
lenOfBinaryDef = 1000;
lenOfBinaryAct = 20;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db6");
stmt_specifyCol_bind_case_002(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
checkResult(taos, "m1", 0, totalRowsPerTbl);
checkResult(taos, "m2", 0, totalRowsPerTbl);
checkResult(taos, "m3", 0, totalRowsPerTbl);
checkResult(taos, "m4", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 6 check result end\n\n");
}
#endif
//========== case 7: ======================//
#if 1
{
stmt = taos_stmt_init(taos);
tableNum = 5;
rowsOfPerColum = 1;
bingNum = 5;
lenOfBinaryDef = 1000;
lenOfBinaryAct = 33;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db7");
stmt_specifyCol_bind_case_002(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
checkResult(taos, "m1", 0, totalRowsPerTbl);
checkResult(taos, "m2", 0, totalRowsPerTbl);
checkResult(taos, "m3", 0, totalRowsPerTbl);
checkResult(taos, "m4", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 7 check result end\n\n");
}
#endif
//========== case 8: ======================//
#if 1
{
stmt = taos_stmt_init(taos);
tableNum = 5;
rowsOfPerColum = 5;
bingNum = 5;
lenOfBinaryDef = 1000;
lenOfBinaryAct = 40;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db8");
stmt_specifyCol_bind_case_002(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
checkResult(taos, "m1", 0, totalRowsPerTbl);
checkResult(taos, "m2", 0, totalRowsPerTbl);
checkResult(taos, "m3", 0, totalRowsPerTbl);
checkResult(taos, "m4", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 8 check result end\n\n");
}
#endif
//=======================================================================//
//=============================== multi-rows to single table ==========================//
//========== case 9: ======================//
#if 1
{
stmt = taos_stmt_init(taos);
tableNum = 1;
rowsOfPerColum = 23740;
bingNum = 1;
lenOfBinaryDef = 40;
lenOfBinaryAct = 8;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db9");
stmt_specifyCol_bind_case_001(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 9 check result end\n\n");
}
#endif
//========== case 10: ======================//
#if 0
{
printf("====case 10 error test start\n");
stmt = taos_stmt_init(taos);
tableNum = 1;
rowsOfPerColum = 23741; // WAL size exceeds limit
bingNum = 1;
lenOfBinaryDef = 40;
lenOfBinaryAct = 8;
columnNum = 5;
prepareV(taos, 1, tableNum, lenOfBinaryDef);
stmt_specifyCol_bind_case_001(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("====case 10 check result end\n\n");
}
#endif
//=======================================================================//
//=============================== multi tables, multi bind one same table ==========================//
//========== case 13: ======================//
#if 1
{
stmt = taos_stmt_init(taos);
tableNum = 5;
rowsOfPerColum = 1;
bingNum = 5;
lenOfBinaryDef = 40;
lenOfBinaryAct = 28;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db13");
stmt_specifyCol_bind_case_002(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
checkResult(taos, "m1", 0, totalRowsPerTbl);
checkResult(taos, "m2", 0, totalRowsPerTbl);
checkResult(taos, "m3", 0, totalRowsPerTbl);
checkResult(taos, "m4", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 13 check result end\n\n");
}
#endif
//========== case 14: ======================//
#if 1
{
stmt = taos_stmt_init(taos);
tableNum = 5;
rowsOfPerColum = 5;
bingNum = 5;
lenOfBinaryDef = 1000;
lenOfBinaryAct = 33;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db14");
stmt_specifyCol_bind_case_002(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
checkResult(taos, "m1", 0, totalRowsPerTbl);
checkResult(taos, "m2", 0, totalRowsPerTbl);
checkResult(taos, "m3", 0, totalRowsPerTbl);
checkResult(taos, "m4", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 14 check result end\n\n");
}
#endif
//========== case 15: ======================//
#if 1
{
stmt = taos_stmt_init(taos);
tableNum = 1000;
rowsOfPerColum = 10;
bingNum = 5;
lenOfBinaryDef = 1000;
lenOfBinaryAct = 8;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db15");
stmt_specifyCol_bind_case_002(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
checkResult(taos, "m111", 0, totalRowsPerTbl);
checkResult(taos, "m222", 0, totalRowsPerTbl);
checkResult(taos, "m333", 0, totalRowsPerTbl);
checkResult(taos, "m500", 0, totalRowsPerTbl);
checkResult(taos, "m999", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 15 check result end\n\n");
}
#endif
//========== case 17: ======================//
#if 1
{
//printf("case 17 test start\n");
stmt = taos_stmt_init(taos);
tableNum = 10;
rowsOfPerColum = 100;
bingNum = 1;
lenOfBinaryDef = 100;
lenOfBinaryAct = 8;
columnNum = 5;
prepareVcolumn(taos, 1, tableNum, lenOfBinaryDef, "db17");
stmt_specifyCol_bind_case_002(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum);
totalRowsPerTbl = rowsOfPerColum * bingNum;
checkResult(taos, "m0", 0, totalRowsPerTbl);
checkResult(taos, "m3", 0, totalRowsPerTbl);
checkResult(taos, "m5", 0, totalRowsPerTbl);
checkResult(taos, "m8", 0, totalRowsPerTbl);
checkResult(taos, "m9", 0, totalRowsPerTbl);
taos_stmt_close(stmt);
printf("case 17 check result end\n\n");
}
#endif
return ;
}
int main(int argc, char *argv[])
{
TAOS *taos;
char host[32] = "127.0.0.1";
char* serverIp = NULL;
int threadNum = 1;
// connect to server
if (argc == 1) {
serverIp = host;
} else if (argc == 2) {
serverIp = argv[1];
} else if (argc == 3) {
serverIp = argv[1];
threadNum = atoi(argv[2]);
} else if (argc == 4) {
serverIp = argv[1];
threadNum = atoi(argv[2]);
g_runTimes = atoi(argv[3]);
}
printf("server:%s, runTimes:%d\n\n", serverIp, g_runTimes);
#if 0
printf("server:%s, threadNum:%d, rows:%d\n\n", serverIp, threadNum, g_rows);
pthread_t *pThreadList = (pthread_t *) calloc(sizeof(pthread_t), (size_t)threadNum);
ThreadInfo* threadInfo = (ThreadInfo *) calloc(sizeof(ThreadInfo), (size_t)threadNum);
ThreadInfo* tInfo = threadInfo;
for (int i = 0; i < threadNum; i++) {
taos = taos_connect(serverIp, "root", "taosdata", NULL, 0);
if (taos == NULL) {
printf("failed to connect to TDengine, reason:%s\n", taos_errstr(taos));
return -1;
}
tInfo->taos = taos;
tInfo->idx = i;
if (0 == i) {
//pthread_create(&(pThreadList[0]), NULL, runCase, (void *)tInfo);
pthread_create(&(pThreadList[0]), NULL, SpecifyColumnBatchCase, (void *)tInfo);
} else if (1 == i){
pthread_create(&(pThreadList[0]), NULL, runCase_long, (void *)tInfo);
}
tInfo++;
}
for (int i = 0; i < threadNum; i++) {
pthread_join(pThreadList[i], NULL);
}
free(pThreadList);
free(threadInfo);
#endif
taos = taos_connect(serverIp, "root", "taosdata", NULL, 0);
if (taos == NULL) {
printf("failed to connect to TDengine, reason:%s\n", taos_errstr(taos));
return -1;
}
runCase(taos);
runCase_long(taos);
SpecifyColumnBatchCase(taos);
return 0;
}
......@@ -68,7 +68,7 @@ while $loop <= $loops
while $i < 10
sql select count(*) from $stb where t1 = $i
if $data00 != $rowNum then
print expect $rowNum, actual: $data00
print expect $rowNum , actual: $data00
return -1
endi
$i = $i + 1
......
......@@ -41,7 +41,7 @@ sql create dnode $hostname2
sleep 10000
sql show log.tables;
if $rows != 5 then
if $rows > 6 then
return -1
endi
......
......@@ -56,7 +56,7 @@ print $data30
print $data40
print $data50
if $rows != 5 then
if $rows > 6 then
return -1
endi
......
......@@ -19,7 +19,7 @@ sleep 3000
sql show dnodes
print dnode1 openVnodes $data2_1
if $data2_1 != 1 then
if $data2_1 > 2 then
return -1
endi
......@@ -41,7 +41,7 @@ print dnode2 openVnodes $data2_2
if $data2_1 != 0 then
goto show2
endi
if $data2_2 != 1 then
if $data2_2 > 2 then
goto show2
endi
......@@ -55,7 +55,7 @@ print $data30
print $data40
print $data50
if $rows != 4 then
if $rows > 5 then
return -1
endi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册