diff --git a/.appveyor.yml b/.appveyor.yml
index fe4816688b43276a4a9ca7b911b39a43b8fc2141..ee1dc91767da710bbc508801d88474a20eba60df 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -1,30 +1,49 @@
version: 1.0.{build}
-os: Visual Studio 2015
+image:
+ - Visual Studio 2015
+ - macos
environment:
matrix:
- ARCH: amd64
- ARCH: x86
+matrix:
+ exclude:
+ - image: macos
+ ARCH: x86
+for:
+ -
+ matrix:
+ only:
+ - image: Visual Studio 2015
+ clone_folder: c:\dev\TDengine
+ clone_depth: 1
-clone_folder: c:\dev\TDengine
-clone_depth: 1
+ init:
+ - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
-init:
- - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH%
+ before_build:
+ - cd c:\dev\TDengine
+ - md build
-before_build:
- - cd c:\dev\TDengine
- - md build
-
-build_script:
- - cd build
- - cmake -G "NMake Makefiles" ..
- - nmake install
+ build_script:
+ - cd build
+ - cmake -G "NMake Makefiles" ..
+ - nmake install
+ -
+ matrix:
+ only:
+ - image: macos
+ clone_depth: 1
+ build_script:
+ - mkdir debug
+ - cd debug
+ - cmake .. > /dev/null
+ - make > /dev/null
notifications:
- provider: Email
to:
- sangshuduo@gmail.com
-
on_build_success: true
on_build_failure: true
on_build_status_changed: true
diff --git a/.drone.yml b/.drone.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e7ae6ebbdaed1b2446e99e3f0365907ff44754d0
--- /dev/null
+++ b/.drone.yml
@@ -0,0 +1,180 @@
+---
+kind: pipeline
+name: test_amd64
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: smoke_test
+ image: python:3.8
+ commands:
+ - apt-get update
+ - apt-get install -y cmake build-essential gcc
+ - pip3 install psutil
+ - pip3 install guppy3
+ - pip3 install src/connector/python/linux/python3/
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ - cd ../tests
+ - ./test-all.sh smoke
+ when:
+ branch:
+ - develop
+ - master
+
+
+- name: crash_gen
+ image: python:3.8
+ commands:
+ - pip3 install requests
+ - pip3 install src/connector/python/linux/python3/
+ - pip3 install psutil
+ - pip3 install guppy3
+ - cd tests/pytest
+ - ./crash_gen.sh -a -p -t 4 -s 2000
+ when:
+ branch:
+ - develop
+ - master
+
+
+---
+kind: pipeline
+name: test_arm64
+
+platform:
+ os: linux
+ arch: arm64
+
+steps:
+- name: build
+ image: gcc
+ commands:
+ - apt-get update
+ - apt-get install -y cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake .. -DCPUTYPE=aarch64 > /dev/null
+ - make
+ when:
+ branch:
+ - develop
+ - master
+---
+kind: pipeline
+name: test_arm
+
+platform:
+ os: linux
+ arch: arm
+
+steps:
+- name: build
+ image: arm32v7/ubuntu:bionic
+ commands:
+ - apt-get update
+ - apt-get install -y cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake .. -DCPUTYPE=aarch32 > /dev/null
+ - make
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: build_trusty
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: ubuntu:trusty
+ commands:
+ - apt-get update
+ - apt-get install -y gcc cmake3 build-essential git binutils-2.26
+
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: build_xenial
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: ubuntu:xenial
+ commands:
+ - apt-get update
+ - apt-get install -y gcc cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: build_bionic
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: build
+ image: ubuntu:bionic
+ commands:
+ - apt-get update
+ - apt-get install -y gcc cmake build-essential
+ - mkdir debug
+ - cd debug
+ - cmake ..
+ - make
+ when:
+ branch:
+ - develop
+ - master
+
+---
+kind: pipeline
+name: goodbye
+
+platform:
+ os: linux
+ arch: amd64
+
+steps:
+- name: 64-bit
+ image: alpine
+ commands:
+ - echo 64-bit is good.
+ when:
+ branch:
+ - develop
+ - master
+
+
+depends_on:
+- test_arm64
+- test_amd64
\ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index efe79171058c1c22d84a3792628ddcefd32f1bad..0000000000000000000000000000000000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,298 +0,0 @@
-#
-# Configuration
-#
-#
-# Build Matrix
-#
-branches:
- only:
- - master
- - develop
- - coverity_scan
- - /^.*ci-.*$/
-
-matrix:
- - os: linux
- dist: bionic
- language: c
-
- git:
- - depth: 1
-
- compiler: gcc
- env: DESC="linux/gcc build and test"
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - net-tools
- - python3.8
- - libc6-dbg
- - valgrind
- - psmisc
- - unixodbc
- - unixodbc-dev
- - mono-complete
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && python3.8 get-pip.py
- - python3.8 -m pip install --upgrade pip setuptools
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - cmake .. > /dev/null
- - make > /dev/null
-
- after_success:
- - travis_wait 20
- - |-
- case $TRAVIS_OS_NAME in
- linux)
- cd ${TRAVIS_BUILD_DIR}/debug
- make install > /dev/null || travis_terminate $?
-
- py3ver=`python3 --version|awk '{print $2}'|cut -d "." -f 1,2` && apt install python$py3ver-dev
- pip3 install psutil
- pip3 install guppy3
- pip3 install --user ${TRAVIS_BUILD_DIR}/src/connector/python/linux/python3/
-
- cd ${TRAVIS_BUILD_DIR}/tests/examples/C#/taosdemo
- mcs -out:taosdemo *.cs || travis_terminate $?
- pkill -TERM -x taosd
- fuser -k -n tcp 6030
- sleep 1
- ${TRAVIS_BUILD_DIR}/debug/build/bin/taosd -c ${TRAVIS_BUILD_DIR}/debug/test/cfg > /dev/null &
- sleep 5
- mono taosdemo -Q DEFAULT -y || travis_terminate $?
- pkill -KILL -x taosd
- fuser -k -n tcp 6030
- sleep 1
-
- cd ${TRAVIS_BUILD_DIR}/tests
- ./test-all.sh smoke || travis_terminate $?
- sleep 1
-
- cd ${TRAVIS_BUILD_DIR}/tests/pytest
- pkill -TERM -x taosd
- fuser -k -n tcp 6030
- sleep 1
- ./crash_gen.sh -a -p -t 4 -s 2000|| travis_terminate $?
- sleep 1
-
- cd ${TRAVIS_BUILD_DIR}/tests/pytest
- ./valgrind-test.sh 2>&1 > mem-error-out.log
- sleep 1
-
-
- # Color setting
- RED='\033[0;31m'
- GREEN='\033[1;32m'
- GREEN_DARK='\033[0;32m'
- GREEN_UNDERLINE='\033[4;32m'
- NC='\033[0m'
-
- grep 'start to execute\|ERROR SUMMARY' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-mem-error-out.log
-
- for memError in `grep 'ERROR SUMMARY' uniq-mem-error-out.log | awk '{print $4}'`
- do
- if [ -n "$memError" ]; then
- if [ "$memError" -gt 12 ]; then
- echo -e "${RED} ## Memory errors number valgrind reports is $memError.\
- More than our threshold! ## ${NC}"
- travis_terminate $memError
- fi
- fi
- done
-
- grep 'start to execute\|definitely lost:' mem-error-out.log|grep -v 'grep'|uniq|tee uniq-definitely-lost-out.log
- for defiMemError in `grep 'definitely lost:' uniq-definitely-lost-out.log | awk '{print $7}'`
- do
- if [ -n "$defiMemError" ]; then
- if [ "$defiMemError" -gt 13 ]; then
- echo -e "${RED} ## Memory errors number valgrind reports \
- Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
- travis_terminate $defiMemError
- fi
- fi
- done
-
- ;;
- esac
-
- - os: linux
- dist: bionic
- language: c
- compiler: gcc
- env: COVERITY_SCAN=true
- git:
- - depth: 1
-
- script:
- - echo "this job is for coverity scan"
-
- addons:
- coverity_scan:
- # GitHub project metadata
- # ** specific to your project **
- project:
- name: TDengine
- version: 2.x
- description: TDengine
-
- # Where email notification of build analysis results will be sent
- notification_email: sdsang@taosdata.com, slguan@taosdata.com
-
- # Commands to prepare for build_command
- # ** likely specific to your build **
- build_command_prepend: cmake . > /dev/null
-
- # The command that will be added as an argument to "cov-build" to compile your project for analysis,
- # ** likely specific to your build **
- build_command: make
-
- # Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'.
- # Take care in resource usage, and consider the build frequency allowances per
- # https://scan.coverity.com/faq#frequency
- branch_pattern: coverity_scan
-
- - os: linux
- dist: trusty
- language: c
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - binutils-2.26
- - unixodbc
- - unixodbc-dev
- env:
- - DESC="trusty/gcc-4.8/bintuils-2.26 build"
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - cmake .. > /dev/null
- - export PATH=/usr/lib/binutils-2.26/bin:$PATH && make
-
- - os: linux
- dist: bionic
- language: c
- compiler: clang
- env: DESC="linux/clang build"
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - unixodbc
- - unixodbc-dev
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - cmake .. > /dev/null
- - make > /dev/null
-
- - os: linux
- arch: arm64
- dist: bionic
- language: c
- compiler: clang
- env: DESC="arm64 linux/clang build"
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
- cmake .. -DCPUTYPE=aarch64 > /dev/null;
- else
- cmake .. > /dev/null;
- fi
- - make > /dev/null
-
- - os: linux
- arch: arm64
- dist: xenial
- language: c
- git:
- - depth: 1
-
- addons:
- apt:
- packages:
- - build-essential
- - cmake
- - unixodbc
- - unixodbc-dev
- env:
- - DESC="arm64 xenial build"
-
- before_script:
- - export TZ=Asia/Harbin
- - date
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
-
- script:
- - if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then
- cmake .. -DCPUTYPE=aarch64 > /dev/null;
- else
- cmake .. > /dev/null;
- fi
- - make > /dev/null
-
- - os: osx
- osx_image: xcode11.4
- language: c
- compiler: clang
- env: DESC="mac/clang build"
- git:
- - depth: 1
- addons:
- homebrew:
- - cmake
- - unixodbc
-
- script:
- - cd ${TRAVIS_BUILD_DIR}
- - mkdir debug
- - cd debug
- - cmake .. > /dev/null
- - make > /dev/null
diff --git a/cmake/version.inc b/cmake/version.inc
index 8035b31cc7a9a391becaacbff22d708177536ce2..0ee23f319a9c5761cfb6b47c6118f72e5c768f84 100755
--- a/cmake/version.inc
+++ b/cmake/version.inc
@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "2.0.20.0")
+ SET(TD_VER_NUMBER "2.1.0.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md
index 3442a2248cd3743cc93034fb5aa9d13b96079543..5eec33e2f1740eeff9042a5dc3ea01ecab50632b 100644
--- a/documentation20/cn/08.connector/01.java/docs.md
+++ b/documentation20/cn/08.connector/01.java/docs.md
@@ -16,7 +16,6 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
* TDengine 目前不支持针对单条数据记录的删除操作。
* 目前不支持事务操作。
-* 目前不支持表间的 union 操作。
* 目前不支持嵌套查询(nested query)。
* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。
@@ -447,7 +446,7 @@ Query OK, 1 row(s) in set (0.000141s)
-## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
+## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本
| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 |
| -------------------- | ----------------- | -------- |
diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md
index 72fcd05d5289682ac70887c9ddfbd13f01d52bd5..bfa0456c7d4e80d1fd9336d7c4b7b9ca829b278e 100644
--- a/documentation20/cn/11.administrator/docs.md
+++ b/documentation20/cn/11.administrator/docs.md
@@ -144,7 +144,7 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数
- numOfMnodes:系统中管理节点个数。默认值:3。
- balance:是否启动负载均衡。0:否,1:是。默认值:1。
- mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值:4。
-- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*100(即100天)。
+- offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。
- statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。
- maxVgroupsPerDb: 每个数据库中能够使用的最大vgroup个数。
@@ -462,31 +462,31 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
| 关键字列表 | | | | |
| ---------- | ----------- | ------------ | ---------- | --------- |
-| ABLOCKS | CONNECTIONS | HAVING | MODULES | SLIMIT |
-| ABORT | COPY | ID | NCHAR | SMALLINT |
-| ACCOUNT | COUNT | IF | NE | SPREAD |
-| ACCOUNTS | CREATE | IGNORE | NONE | STABLE |
-| ADD | CTIME | IMMEDIATE | NOT | STABLES |
-| AFTER | DATABASE | IMPORT | NOTNULL | STAR |
-| ALL | DATABASES | IN | NOW | STATEMENT |
-| ALTER | DAYS | INITIALLY | OF | STDDEV |
-| AND | DEFERRED | INSERT | OFFSET | STREAM |
-| AS | DELIMITERS | INSTEAD | OR | STREAMS |
-| ASC | DESC | INTEGER | ORDER | STRING |
-| ATTACH | DESCRIBE | INTERVAL | PASS | SUM |
-| AVG | DETACH | INTO | PERCENTILE | TABLE |
-| BEFORE | DIFF | IP | PLUS | TABLES |
-| BEGIN | DISTINCT | IS | PRAGMA | TAG |
-| BETWEEN | DIVIDE | ISNULL | PREV | TAGS |
-| BIGINT | DNODE | JOIN | PRIVILEGE | TBLOCKS |
-| BINARY | DNODES | KEEP | QUERIES | TBNAME |
-| BITAND | DOT | KEY | QUERY | TIMES |
-| BITNOT | DOUBLE | KILL | RAISE | TIMESTAMP |
-| BITOR | DROP | LAST | REM | TINYINT |
-| BOOL | EACH | LE | REPLACE | TOP |
-| BOTTOM | END | LEASTSQUARES | REPLICA | TOPIC |
-| BY | EQ | LIKE | RESET | TRIGGER |
-| CACHE | EXISTS | LIMIT | RESTRICT | UMINUS |
+| ABLOCKS | CONNECTIONS | HAVING | MODULES | SMALLINT |
+| ABORT | COPY | ID | NCHAR | SPREAD |
+| ACCOUNT | COUNT | IF | NE | STABLE |
+| ACCOUNTS | CREATE | IGNORE | NONE | STABLES |
+| ADD | CTIME | IMMEDIATE | NOT | STAR |
+| AFTER | DATABASE | IMPORT | NOTNULL | STATEMENT |
+| ALL | DATABASES | IN | NOW | STDDEV |
+| ALTER | DAYS | INITIALLY | OF | STREAM |
+| AND | DEFERRED | INSERT | OFFSET | STREAMS |
+| AS | DELIMITERS | INSTEAD | OR | STRING |
+| ASC | DESC | INTEGER | ORDER | SUM |
+| ATTACH | DESCRIBE | INTERVAL | PASS | TABLE |
+| AVG | DETACH | INTO | PERCENTILE | TABLES |
+| BEFORE | DIFF | IP | PLUS | TAG |
+| BEGIN | DISTINCT | IS | PRAGMA | TAGS |
+| BETWEEN | DIVIDE | ISNULL | PREV | TBLOCKS |
+| BIGINT | DNODE | JOIN | PRIVILEGE | TBNAME |
+| BINARY | DNODES | KEEP | QUERIES | TIMES |
+| BITAND | DOT | KEY | QUERY | TIMESTAMP |
+| BITNOT | DOUBLE | KILL | RAISE | TINYINT |
+| BITOR | DROP | LAST | REM | TOP |
+| BOOL | EACH | LE | REPLACE | TOPIC |
+| BOTTOM | END | LEASTSQUARES | REPLICA | TRIGGER |
+| BY | EQ | LIKE | RESET | UMINUS |
+| CACHE | EXISTS | LIMIT | RESTRICT | UNION |
| CASCADE | EXPLAIN | LINEAR | ROW | UPLUS |
| CHANGE | FAIL | LOCAL | ROWS | USE |
| CLOG | FILL | LP | RP | USER |
@@ -498,5 +498,5 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
| CONCAT | GLOB | METRICS | SHOW | VIEW |
| CONFIGS | GRANTS | MIN | SLASH | WAVG |
| CONFLICT | GROUP | MINUS | SLIDING | WHERE |
-| CONNECTION | GT | MNODES | | |
+| CONNECTION | GT | MNODES | SLIMIT | |
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index 04c90748f2ecffa5c58b337f7eda564581985d3a..112ad99391521b31bdb4876519be6b68d8b62fe6 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -407,7 +407,7 @@ SELECT select_expr [, select_expr ...]
[INTERVAL (interval_val [, interval_offset])]
[SLIDING sliding_val]
[FILL fill_val]
- [GROUP BY col_list ]
+ [GROUP BY col_list]
[ORDER BY col_list { DESC | ASC }]
[SLIMIT limit_val [SOFFSET offset_val]]
[LIMIT limit_val [OFFSET offset_val]]
@@ -647,7 +647,7 @@ Query OK, 1 row(s) in set (0.001091s)
3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。
+### UNION ALL 操作符
+
+```mysql
+SELECT ...
+UNION ALL SELECT ...
+[UNION ALL SELECT ...]
+```
+
+TDengine 支持 UNION ALL 操作符。也就是说,如果多个 SELECT 子句返回结果集的结构完全相同(列名、列类型、列数、顺序),那么可以通过 UNION ALL 把这些结果集合并到一起。目前只支持 UNION ALL 模式,也即在结果集的合并过程中是不去重的。
+
### SQL 示例
- 对于下面的例子,表tb1用以下语句创建
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index 31343ed293c2617a56da57e3893a36b5d3289faf..66f22fda1a6ff41406b5c63c10a78dd28173269e 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,6 +1,6 @@
name: tdengine
base: core18
-version: '2.0.20.0'
+version: '2.1.0.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- - usr/lib/libtaos.so.2.0.20.0
+ - usr/lib/libtaos.so.2.1.0.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
diff --git a/src/connector/python/linux/python2/setup.py b/src/connector/python/linux/python2/setup.py
index ff2d90fcb3dff5965572452672a7491dd4aa44d1..3f065e03485bb0436d7b0356c2508d5913273eb5 100644
--- a/src/connector/python/linux/python2/setup.py
+++ b/src/connector/python/linux/python2/setup.py
@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
- version="2.0.8",
+ version="2.0.9",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py
index 4367947341edad7b0a9bdbcaec69a7de9801e267..3d0ecd290156b71e2d738fe7d3693763e0047338 100644
--- a/src/connector/python/linux/python2/taos/cinterface.py
+++ b/src/connector/python/linux/python2/taos/cinterface.py
@@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
_timestamp_converter = _convert_microsecond_to_datetime
if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_int64))[
+ :abs(num_of_rows)]]
else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_int64))[
+ :abs(num_of_rows)]]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
diff --git a/src/connector/python/linux/python3/setup.py b/src/connector/python/linux/python3/setup.py
index 296e79b973fde7c86493565940d63f64e7c6fca3..0bd7d51b6a3699acc6c923de2533cd85393f6ae3 100644
--- a/src/connector/python/linux/python3/setup.py
+++ b/src/connector/python/linux/python3/setup.py
@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
- version="2.0.7",
+ version="2.0.9",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/linux/python3/taos/cinterface.py
index 4367947341edad7b0a9bdbcaec69a7de9801e267..3d0ecd290156b71e2d738fe7d3693763e0047338 100644
--- a/src/connector/python/linux/python3/taos/cinterface.py
+++ b/src/connector/python/linux/python3/taos/cinterface.py
@@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
_timestamp_converter = _convert_microsecond_to_datetime
if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_int64))[
+ :abs(num_of_rows)]]
else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_int64))[
+ :abs(num_of_rows)]]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
diff --git a/src/connector/python/osx/python3/setup.py b/src/connector/python/osx/python3/setup.py
index 9bce1a976febcec457c2b3f8ade0bc6d546307c4..4c865676c9145846e61ee789cb503b84ad424bf1 100644
--- a/src/connector/python/osx/python3/setup.py
+++ b/src/connector/python/osx/python3/setup.py
@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
- version="2.0.7",
+ version="2.0.9",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
diff --git a/src/connector/python/osx/python3/taos/cinterface.py b/src/connector/python/osx/python3/taos/cinterface.py
index dca9bd42e8733616f34654542ba8c2c3ea3ece9d..720fbef6f5111c2ec85394498ec862c85aafbc99 100644
--- a/src/connector/python/osx/python3/taos/cinterface.py
+++ b/src/connector/python/osx/python3/taos/cinterface.py
@@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
_timestamp_converter = _convert_microsecond_to_datetime
if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_int64))[
+ :abs(num_of_rows)]]
else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_int64))[
+ :abs(num_of_rows)]]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
diff --git a/src/connector/python/windows/python2/setup.py b/src/connector/python/windows/python2/setup.py
index 47d374fe67673172596824f68ed495f68941bb51..24d75f937cdb2a61cd2330138c0575e4ccbe2875 100644
--- a/src/connector/python/windows/python2/setup.py
+++ b/src/connector/python/windows/python2/setup.py
@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
- version="2.0.7",
+ version="2.0.9",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py
index ec72474df93a4d13aa13256611044c5bc140f2d3..65cb183f26978dc83553b3e88a5c6599ccc1dd48 100644
--- a/src/connector/python/windows/python2/taos/cinterface.py
+++ b/src/connector/python/windows/python2/taos/cinterface.py
@@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
_timestamp_converter = _convert_microsecond_to_datetime
if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_int64))[
+ :abs(num_of_rows)]]
else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_int64))[
+ :abs(num_of_rows)]]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
diff --git a/src/connector/python/windows/python3/setup.py b/src/connector/python/windows/python3/setup.py
index cdcec62a218e72adbda949c0fd666b5ec4abc340..2659c493aad8c3059095f0d4c98c61b69ed834f2 100644
--- a/src/connector/python/windows/python3/setup.py
+++ b/src/connector/python/windows/python3/setup.py
@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
- version="2.0.7",
+ version="2.0.9",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py
index ec72474df93a4d13aa13256611044c5bc140f2d3..65cb183f26978dc83553b3e88a5c6599ccc1dd48 100644
--- a/src/connector/python/windows/python3/taos/cinterface.py
+++ b/src/connector/python/windows/python3/taos/cinterface.py
@@ -21,11 +21,17 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
_timestamp_converter = _convert_microsecond_to_datetime
if num_of_rows > 0:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_int64))[
+ :abs(num_of_rows)]]
else:
- return list(map(_timestamp_converter, ctypes.cast(
- data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
+ return [
+ None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast(
+ data, ctypes.POINTER(
+ ctypes.c_int64))[
+ :abs(num_of_rows)]]
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index f35c0facfa56fc24efe83d6195bf19c808e5d7ac..20d7f671385378e83c1601b6909d041fe2e82e66 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -188,7 +188,7 @@ typedef struct {
/* Used by main to communicate with parse_opt. */
typedef struct SArguments_S {
char * metaFile;
- int test_mode;
+ uint32_t test_mode;
char * host;
uint16_t port;
char * user;
@@ -205,31 +205,31 @@ typedef struct SArguments_S {
bool verbose_print;
bool performance_print;
char * output_file;
- int query_mode;
+ uint32_t query_mode;
char * datatype[MAX_NUM_DATATYPE + 1];
- int len_of_binary;
- int num_of_CPR;
- int num_of_threads;
- int64_t insert_interval;
+ uint32_t len_of_binary;
+ uint32_t num_of_CPR;
+ uint32_t num_of_threads;
+ uint64_t insert_interval;
int64_t query_times;
- int64_t interlace_rows;
- int64_t num_of_RPR; // num_of_records_per_req
- int64_t max_sql_len;
- int64_t num_of_tables;
- int64_t num_of_DPT;
+ uint64_t interlace_rows;
+ uint64_t num_of_RPR; // num_of_records_per_req
+ uint64_t max_sql_len;
+ uint64_t num_of_tables;
+ uint64_t num_of_DPT;
int abort;
int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision
- int method_of_delete;
+ uint32_t method_of_delete;
char ** arg_list;
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
} SArguments;
typedef struct SColumn_S {
- char field[TSDB_COL_NAME_LEN + 1];
- char dataType[MAX_TB_NAME_SIZE];
- int dataLen;
+ char field[TSDB_COL_NAME_LEN + 1];
+ char dataType[MAX_TB_NAME_SIZE];
+ uint32_t dataLen;
char note[128];
} StrColumn;
@@ -237,50 +237,50 @@ typedef struct SSuperTable_S {
char sTblName[MAX_TB_NAME_SIZE+1];
int64_t childTblCount;
bool childTblExists; // 0: no, 1: yes
- int64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
- int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
+ uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql
+ uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table
char childTblPrefix[MAX_TB_NAME_SIZE];
char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample
char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest
int64_t childTblLimit;
- int64_t childTblOffset;
+ uint64_t childTblOffset;
// int multiThreadWriteOneTbl; // 0: no, 1: yes
- int64_t interlaceRows; //
+ uint64_t interlaceRows; //
int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision
- int64_t maxSqlLen; //
+ uint64_t maxSqlLen; //
- int64_t insertInterval; // insert interval, will override global insert interval
- int64_t insertRows;
+ uint64_t insertInterval; // insert interval, will override global insert interval
+ uint64_t insertRows;
int64_t timeStampStep;
char startTimestamp[MAX_TB_NAME_SIZE];
char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json
char sampleFile[MAX_FILE_NAME_LEN+1];
char tagsFile[MAX_FILE_NAME_LEN+1];
- int columnCount;
+ uint32_t columnCount;
StrColumn columns[MAX_COLUMN_COUNT];
- int tagCount;
+ uint32_t tagCount;
StrColumn tags[MAX_TAG_COUNT];
char* childTblName;
char* colsOfCreateChildTable;
- int64_t lenOfOneRow;
- int64_t lenOfTagOfOneRow;
+ uint64_t lenOfOneRow;
+ uint64_t lenOfTagOfOneRow;
char* sampleDataBuf;
//int sampleRowCount;
//int sampleUsePos;
- int tagSource; // 0: rand, 1: tag sample
+ uint32_t tagSource; // 0: rand, 1: tag sample
char* tagDataBuf;
- int tagSampleCount;
- int tagUsePos;
+ uint32_t tagSampleCount;
+ uint32_t tagUsePos;
// statistics
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
} SSuperTable;
typedef struct {
@@ -307,8 +307,8 @@ typedef struct {
typedef struct SDbCfg_S {
// int maxtablesPerVnode;
- int minRows;
- int maxRows;
+ uint32_t minRows; // 0 means default
+ uint32_t maxRows; // 0 means default
int comp;
int walLevel;
int cacheLast;
@@ -327,7 +327,7 @@ typedef struct SDataBase_S {
char dbName[MAX_DB_NAME_SIZE];
bool drop; // 0: use exists, 1: if exists, drop then new create
SDbCfg dbCfg;
- int64_t superTblCount;
+ uint64_t superTblCount;
SSuperTable superTbls[MAX_SUPER_TABLE_COUNT];
} SDataBase;
@@ -345,57 +345,57 @@ typedef struct SDbs_S {
bool do_aggreFunc;
bool queryMode;
- int threadCount;
- int threadCountByCreateTbl;
- int dbCount;
+ uint32_t threadCount;
+ uint32_t threadCountByCreateTbl;
+ uint32_t dbCount;
SDataBase db[MAX_DB_COUNT];
// statistics
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
} SDbs;
typedef struct SpecifiedQueryInfo_S {
- int64_t queryInterval; // 0: unlimit > 0 loop/s
- int64_t concurrent;
- int64_t sqlCount;
- int mode; // 0: sync, 1: async
- int64_t subscribeInterval; // ms
- int64_t queryTimes;
+ uint64_t queryInterval; // 0: unlimit > 0 loop/s
+ uint64_t concurrent;
+ uint64_t sqlCount;
+ uint32_t mode; // 0: sync, 1: async
+ uint64_t subscribeInterval; // ms
+ uint64_t queryTimes;
int subscribeRestart;
int subscribeKeepProgress;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
- int64_t totalQueried;
+ uint64_t totalQueried;
} SpecifiedQueryInfo;
typedef struct SuperQueryInfo_S {
char sTblName[MAX_TB_NAME_SIZE+1];
- int64_t queryInterval; // 0: unlimit > 0 loop/s
- int threadCnt;
- int mode; // 0: sync, 1: async
- int64_t subscribeInterval; // ms
+ uint64_t queryInterval; // 0: unlimit > 0 loop/s
+ uint32_t threadCnt;
+ uint32_t mode; // 0: sync, 1: async
+ uint64_t subscribeInterval; // ms
int subscribeRestart;
int subscribeKeepProgress;
- int64_t queryTimes;
- int64_t childTblCount;
+ uint64_t queryTimes;
+ uint64_t childTblCount;
char childTblPrefix[MAX_TB_NAME_SIZE];
- int64_t sqlCount;
+ uint64_t sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1];
TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT];
char* childTblName;
- int64_t totalQueried;
+ uint64_t totalQueried;
} SuperQueryInfo;
typedef struct SQueryMetaInfo_S {
char cfgDir[MAX_FILE_NAME_LEN+1];
char host[MAX_HOSTNAME_SIZE];
uint16_t port;
- struct sockaddr_in serv_addr;
+ struct sockaddr_in serv_addr;
char user[MAX_USERNAME_SIZE];
char password[MAX_PASSWORD_SIZE];
char dbName[MAX_DB_NAME_SIZE+1];
@@ -403,47 +403,47 @@ typedef struct SQueryMetaInfo_S {
SpecifiedQueryInfo specifiedQueryInfo;
SuperQueryInfo superQueryInfo;
- int64_t totalQueried;
+ uint64_t totalQueried;
} SQueryMetaInfo;
typedef struct SThreadInfo_S {
- TAOS *taos;
- int threadID;
- char db_name[MAX_DB_NAME_SIZE+1];
- uint32_t time_precision;
- char fp[4096];
- char tb_prefix[MAX_TB_NAME_SIZE];
- int64_t start_table_from;
- int64_t end_table_to;
- int64_t ntables;
- int64_t data_of_rate;
- int64_t start_time;
- char* cols;
- bool use_metric;
+ TAOS * taos;
+ int threadID;
+ char db_name[MAX_DB_NAME_SIZE+1];
+ uint32_t time_precision;
+ char fp[4096];
+ char tb_prefix[MAX_TB_NAME_SIZE];
+ uint64_t start_table_from;
+ uint64_t end_table_to;
+ uint64_t ntables;
+ uint64_t data_of_rate;
+ int64_t start_time;
+ char* cols;
+ bool use_metric;
SSuperTable* superTblInfo;
// for async insert
- tsem_t lock_sem;
- int64_t counter;
+ tsem_t lock_sem;
+ int64_t counter;
uint64_t st;
uint64_t et;
- int64_t lastTs;
+ uint64_t lastTs;
// sample data
- int64_t samplePos;
+ int64_t samplePos;
// statistics
- int64_t totalInsertRows;
- int64_t totalAffectedRows;
+ uint64_t totalInsertRows;
+ uint64_t totalAffectedRows;
// insert delay statistics
- int64_t cntDelay;
- int64_t totalDelay;
- int64_t avgDelay;
- int64_t maxDelay;
- int64_t minDelay;
+ uint64_t cntDelay;
+ uint64_t totalDelay;
+ uint64_t avgDelay;
+ uint64_t maxDelay;
+ uint64_t minDelay;
// query
- int64_t querySeq; // sequence number of sql command
+ uint64_t querySeq; // sequence number of sql command
} threadInfo;
#ifdef WINDOWS
@@ -725,7 +725,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
errorPrint("%s", "\n\t-c need a valid path following!\n");
exit(EXIT_FAILURE);
}
- tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN);
+ tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN);
} else if (strcmp(argv[i], "-h") == 0) {
if (argc == i+1) {
@@ -967,9 +967,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
}
} else if (strcmp(argv[i], "-D") == 0) {
arguments->method_of_delete = atoi(argv[++i]);
- if (arguments->method_of_delete < 0
- || arguments->method_of_delete > 3) {
- arguments->method_of_delete = 0;
+ if (arguments->method_of_delete > 3) {
+ errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n");
+ exit(EXIT_FAILURE);
}
} else if ((strcmp(argv[i], "--version") == 0) ||
(strcmp(argv[i], "-V") == 0)){
@@ -1004,17 +1004,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
break;
printf("\n");
}
- printf("# Insertion interval: %"PRId64"\n",
+ printf("# Insertion interval: %"PRIu64"\n",
arguments->insert_interval);
- printf("# Number of records per req: %"PRId64"\n",
+ printf("# Number of records per req: %"PRIu64"\n",
arguments->num_of_RPR);
- printf("# Max SQL length: %"PRId64"\n",
+ printf("# Max SQL length: %"PRIu64"\n",
arguments->max_sql_len);
printf("# Length of Binary: %d\n", arguments->len_of_binary);
printf("# Number of Threads: %d\n", arguments->num_of_threads);
- printf("# Number of Tables: %"PRId64"\n",
+ printf("# Number of Tables: %"PRIu64"\n",
arguments->num_of_tables);
- printf("# Number of Data per Table: %"PRId64"\n",
+ printf("# Number of Data per Table: %"PRIu64"\n",
arguments->num_of_DPT);
printf("# Database name: %s\n", arguments->database);
printf("# Table prefix: %s\n", arguments->tb_prefix);
@@ -1270,11 +1270,11 @@ static int printfInsertMeta() {
printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile);
printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount);
printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl);
- printf("top insert interval: \033[33m%"PRId64"\033[0m\n",
+ printf("top insert interval: \033[33m%"PRIu64"\033[0m\n",
g_args.insert_interval);
- printf("number of records per req: \033[33m%"PRId64"\033[0m\n",
+ printf("number of records per req: \033[33m%"PRIu64"\033[0m\n",
g_args.num_of_RPR);
- printf("max sql length: \033[33m%"PRId64"\033[0m\n",
+ printf("max sql length: \033[33m%"PRIu64"\033[0m\n",
g_args.max_sql_len);
printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount);
@@ -1336,10 +1336,10 @@ static int printfInsertMeta() {
}
}
- printf(" super table count: \033[33m%"PRId64"\033[0m\n",
+ printf(" super table count: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTblCount);
- for (int64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
- printf(" super table[\033[33m%"PRId64"\033[0m]:\n", j);
+ for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) {
+ printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j);
printf(" stbName: \033[33m%s\033[0m\n",
g_Dbs.db[i].superTbls[j].sTblName);
@@ -1360,7 +1360,7 @@ static int printfInsertMeta() {
printf(" childTblExists: \033[33m%s\033[0m\n", "error");
}
- printf(" childTblCount: \033[33m%"PRId64"\033[0m\n",
+ printf(" childTblCount: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblCount);
printf(" childTblPrefix: \033[33m%s\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblPrefix);
@@ -1372,11 +1372,11 @@ static int printfInsertMeta() {
printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblLimit);
}
- if (g_Dbs.db[i].superTbls[j].childTblOffset >= 0) {
- printf(" childTblOffset: \033[33m%"PRId64"\033[0m\n",
+ if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) {
+ printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblOffset);
}
- printf(" insertRows: \033[33m%"PRId64"\033[0m\n",
+ printf(" insertRows: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].insertRows);
/*
if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
@@ -1385,11 +1385,11 @@ static int printfInsertMeta() {
printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n");
}
*/
- printf(" interlaceRows: \033[33m%"PRId64"\033[0m\n",
+ printf(" interlaceRows: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
- printf(" stable insert interval: \033[33m%"PRId64"\033[0m\n",
+ printf(" stable insert interval: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].insertInterval);
}
@@ -1397,7 +1397,7 @@ static int printfInsertMeta() {
g_Dbs.db[i].superTbls[j].disorderRange);
printf(" disorderRatio: \033[33m%d\033[0m\n",
g_Dbs.db[i].superTbls[j].disorderRatio);
- printf(" maxSqlLen: \033[33m%"PRId64"\033[0m\n",
+ printf(" maxSqlLen: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].maxSqlLen);
printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n",
g_Dbs.db[i].superTbls[j].timeStampStep);
@@ -1463,8 +1463,8 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile);
fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount);
fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl);
- fprintf(fp, "number of records per req: %"PRId64"\n", g_args.num_of_RPR);
- fprintf(fp, "max sql length: %"PRId64"\n", g_args.max_sql_len);
+ fprintf(fp, "number of records per req: %"PRIu64"\n", g_args.num_of_RPR);
+ fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len);
fprintf(fp, "database count: %d\n", g_Dbs.dbCount);
for (int i = 0; i < g_Dbs.dbCount; i++) {
@@ -1521,7 +1521,7 @@ static void printfInsertMetaToFile(FILE* fp) {
}
}
- fprintf(fp, " super table count: %"PRId64"\n", g_Dbs.db[i].superTblCount);
+ fprintf(fp, " super table count: %"PRIu64"\n", g_Dbs.db[i].superTblCount);
for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) {
fprintf(fp, " super table[%d]:\n", j);
@@ -1543,7 +1543,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " childTblExists: %s\n", "error");
}
- fprintf(fp, " childTblCount: %"PRId64"\n",
+ fprintf(fp, " childTblCount: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].childTblCount);
fprintf(fp, " childTblPrefix: %s\n",
g_Dbs.db[i].superTbls[j].childTblPrefix);
@@ -1551,12 +1551,12 @@ static void printfInsertMetaToFile(FILE* fp) {
g_Dbs.db[i].superTbls[j].dataSource);
fprintf(fp, " insertMode: %s\n",
g_Dbs.db[i].superTbls[j].insertMode);
- fprintf(fp, " insertRows: %"PRId64"\n",
+ fprintf(fp, " insertRows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].insertRows);
- fprintf(fp, " interlace rows: %"PRId64"\n",
+ fprintf(fp, " interlace rows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) {
- fprintf(fp, " stable insert interval: %"PRId64"\n",
+ fprintf(fp, " stable insert interval: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].insertInterval);
}
/*
@@ -1566,11 +1566,11 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " multiThreadWriteOneTbl: yes\n");
}
*/
- fprintf(fp, " interlaceRows: %"PRId64"\n",
+ fprintf(fp, " interlaceRows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange);
fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio);
- fprintf(fp, " maxSqlLen: %"PRId64"\n",
+ fprintf(fp, " maxSqlLen: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].maxSqlLen);
fprintf(fp, " timeStampStep: %"PRId64"\n",
@@ -1631,21 +1631,21 @@ static void printfQueryMeta() {
printf("\n");
printf("specified table query info: \n");
- printf("query interval: \033[33m%"PRId64" ms\033[0m\n",
+ printf("query interval: \033[33m%"PRIu64" ms\033[0m\n",
g_queryInfo.specifiedQueryInfo.queryInterval);
- printf("top query times:\033[33m%"PRId64"\033[0m\n", g_args.query_times);
- printf("concurrent: \033[33m%"PRId64"\033[0m\n",
+ printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times);
+ printf("concurrent: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.concurrent);
- printf("sqlCount: \033[33m%"PRId64"\033[0m\n",
+ printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.sqlCount);
printf("specified tbl query times:\n");
- printf(" \033[33m%"PRId64"\033[0m\n",
+ printf(" \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.queryTimes);
if (SUBSCRIBE_TEST == g_args.test_mode) {
printf("mod: \033[33m%d\033[0m\n",
g_queryInfo.specifiedQueryInfo.mode);
- printf("interval: \033[33m%"PRId64"\033[0m\n",
+ printf("interval: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.specifiedQueryInfo.subscribeInterval);
printf("restart: \033[33m%d\033[0m\n",
g_queryInfo.specifiedQueryInfo.subscribeRestart);
@@ -1653,27 +1653,27 @@ static void printfQueryMeta() {
g_queryInfo.specifiedQueryInfo.subscribeKeepProgress);
}
- for (int64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
- printf(" sql[%"PRId64"]: \033[33m%s\033[0m\n",
+ for (uint64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) {
+ printf(" sql[%"PRIu64"]: \033[33m%s\033[0m\n",
i, g_queryInfo.specifiedQueryInfo.sql[i]);
}
printf("\n");
printf("super table query info:\n");
- printf("query interval: \033[33m%"PRId64"\033[0m\n",
+ printf("query interval: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.queryInterval);
printf("threadCnt: \033[33m%d\033[0m\n",
g_queryInfo.superQueryInfo.threadCnt);
- printf("childTblCount: \033[33m%"PRId64"\033[0m\n",
+ printf("childTblCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.childTblCount);
printf("stable name: \033[33m%s\033[0m\n",
g_queryInfo.superQueryInfo.sTblName);
- printf("stb query times:\033[33m%"PRId64"\033[0m\n",
+ printf("stb query times:\033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.queryTimes);
if (SUBSCRIBE_TEST == g_args.test_mode) {
printf("mod: \033[33m%d\033[0m\n",
g_queryInfo.superQueryInfo.mode);
- printf("interval: \033[33m%"PRId64"\033[0m\n",
+ printf("interval: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.subscribeInterval);
printf("restart: \033[33m%d\033[0m\n",
g_queryInfo.superQueryInfo.subscribeRestart);
@@ -1681,7 +1681,7 @@ static void printfQueryMeta() {
g_queryInfo.superQueryInfo.subscribeKeepProgress);
}
- printf("sqlCount: \033[33m%"PRId64"\033[0m\n",
+ printf("sqlCount: \033[33m%"PRIu64"\033[0m\n",
g_queryInfo.superQueryInfo.sqlCount);
for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
printf(" sql[%d]: \033[33m%s\033[0m\n",
@@ -2290,7 +2290,7 @@ static int calcRowLen(SSuperTable* superTbls) {
static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* dbName, char* sTblName, char** childTblNameOfSuperTbl,
- int64_t* childTblCountOfSuperTbl, int64_t limit, int64_t offset) {
+ uint64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) {
char command[BUFFER_SIZE] = "\0";
char limitBuf[100] = "\0";
@@ -2301,7 +2301,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* childTblName = *childTblNameOfSuperTbl;
if (offset >= 0) {
- snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRId64"",
+ snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"",
limit, offset);
}
@@ -2367,11 +2367,11 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
char* sTblName, char** childTblNameOfSuperTbl,
- int64_t* childTblCountOfSuperTbl) {
+ uint64_t* childTblCountOfSuperTbl) {
return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName,
childTblNameOfSuperTbl, childTblCountOfSuperTbl,
- -1, -1);
+ -1, 0);
}
static int getSuperTableFromServer(TAOS * taos, char* dbName,
@@ -2707,7 +2707,7 @@ static int createDatabasesAndStables() {
printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName);
}
- debugPrint("%s() LN%d supertbl count:%"PRId64"\n",
+ debugPrint("%s() LN%d supertbl count:%"PRIu64"\n",
__func__, __LINE__, g_Dbs.db[i].superTblCount);
int validStbCount = 0;
@@ -2766,15 +2766,15 @@ static void* createTable(void *sarg)
int len = 0;
int batchNum = 0;
- verbosePrint("%s() LN%d: Creating table from %"PRId64" to %"PRId64"\n",
+ verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n",
__func__, __LINE__,
pThreadInfo->start_table_from, pThreadInfo->end_table_to);
- for (int64_t i = pThreadInfo->start_table_from;
+ for (uint64_t i = pThreadInfo->start_table_from;
i <= pThreadInfo->end_table_to; i++) {
if (0 == g_Dbs.use_metric) {
snprintf(buffer, buff_len,
- "create table if not exists %s.%s%"PRId64" %s;",
+ "create table if not exists %s.%s%"PRIu64" %s;",
pThreadInfo->db_name,
g_args.tb_prefix, i,
pThreadInfo->cols);
@@ -2805,7 +2805,7 @@ static void* createTable(void *sarg)
}
len += snprintf(buffer + len,
buff_len - len,
- "if not exists %s.%s%"PRId64" using %s.%s tags %s ",
+ "if not exists %s.%s%"PRIu64" using %s.%s tags %s ",
pThreadInfo->db_name, superTblInfo->childTblPrefix,
i, pThreadInfo->db_name,
superTblInfo->sTblName, tagsValBuf);
@@ -2829,7 +2829,7 @@ static void* createTable(void *sarg)
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] already create %"PRId64" - %"PRId64" tables\n",
+ printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n",
pThreadInfo->threadID, pThreadInfo->start_table_from, i);
lastPrintTime = currentPrintTime;
}
@@ -2897,7 +2897,7 @@ static int startMultiThreadCreateChildTable(
startFrom = t_info->end_table_to + 1;
t_info->use_metric = true;
t_info->cols = cols;
- t_info->minDelay = INT64_MAX;
+ t_info->minDelay = UINT64_MAX;
pthread_create(pids + i, NULL, createTable, t_info);
}
@@ -2963,7 +2963,7 @@ static void createChildTables() {
snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")");
- verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n",
+ verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRIu64" schema: %s\n",
__func__, __LINE__,
g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf);
startMultiThreadCreateChildTable(
@@ -3091,7 +3091,7 @@ static int readSampleFromCsvFileToMem(
}
if (readLen > superTblInfo->lenOfOneRow) {
- printf("sample row len[%d] overflow define schema len[%"PRId64"], so discard this row\n",
+ printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n",
(int32_t)readLen, superTblInfo->lenOfOneRow);
continue;
}
@@ -3343,6 +3343,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval");
if (gInsertInterval && gInsertInterval->type == cJSON_Number) {
+ if (gInsertInterval->valueint <0) {
+ errorPrint("%s() LN%d, failed to read json, insert interval input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_args.insert_interval = gInsertInterval->valueint;
} else if (!gInsertInterval) {
g_args.insert_interval = 0;
@@ -3354,13 +3359,19 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
+ if (interlaceRows->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+
+ }
g_args.interlace_rows = interlaceRows->valueint;
// rows per table need be less than insert batch
if (g_args.interlace_rows > g_args.num_of_RPR) {
- printf("NOTICE: interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n",
+ printf("NOTICE: interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
g_args.interlace_rows, g_args.num_of_RPR);
- printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n",
+ printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
g_args.num_of_RPR);
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
@@ -3376,6 +3387,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len");
if (maxSqlLen && maxSqlLen->type == cJSON_Number) {
+ if (maxSqlLen->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_args.max_sql_len = maxSqlLen->valueint;
} else if (!maxSqlLen) {
g_args.max_sql_len = (1024*1024);
@@ -3387,9 +3403,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req");
if (numRecPerReq && numRecPerReq->type == cJSON_Number) {
+ if (numRecPerReq->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_args.num_of_RPR = numRecPerReq->valueint;
} else if (!numRecPerReq) {
- g_args.num_of_RPR = INT64_MAX;
+ g_args.num_of_RPR = UINT64_MAX;
} else {
errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n",
__func__, __LINE__);
@@ -3549,7 +3570,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (minRows && minRows->type == cJSON_Number) {
g_Dbs.db[i].dbCfg.minRows = minRows->valueint;
} else if (!minRows) {
- g_Dbs.db[i].dbCfg.minRows = -1;
+ g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default
} else {
printf("ERROR: failed to read json, minRows not found\n");
goto PARSE_OVER;
@@ -3559,7 +3580,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if (maxRows && maxRows->type == cJSON_Number) {
g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint;
} else if (!maxRows) {
- g_Dbs.db[i].dbCfg.maxRows = -1;
+ g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default
} else {
printf("ERROR: failed to read json, maxRows not found\n");
goto PARSE_OVER;
@@ -3704,7 +3725,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count");
if (!count || count->type != cJSON_Number || 0 >= count->valueint) {
- errorPrint("%s() LN%d, failed to read json, childtable_count not found\n",
+ errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n",
__func__, __LINE__);
goto PARSE_OVER;
}
@@ -3858,12 +3879,17 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
*/
cJSON* interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
+ if (interlaceRows->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint;
// rows per table need be less than insert batch
if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) {
- printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRId64" > num_of_records_per_req %"PRId64"\n\n",
+ printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n",
i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR);
- printf(" interlace rows value will be set to num_of_records_per_req %"PRId64"\n\n",
+ printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n",
g_args.num_of_RPR);
printf(" press Enter key to continue or Ctrl-C to stop.");
(void)getchar();
@@ -3906,6 +3932,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows");
if (insertRows && insertRows->type == cJSON_Number) {
+ if (insertRows->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint;
} else if (!insertRows) {
g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF;
@@ -3918,8 +3949,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON* insertInterval = cJSON_GetObjectItem(stbInfo, "insert_interval");
if (insertInterval && insertInterval->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint;
+ if (insertInterval->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
} else if (!insertInterval) {
- verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRId64".\n",
+ verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRIu64".\n",
__func__, __LINE__, g_args.insert_interval);
g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval;
} else {
@@ -4000,6 +4036,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times");
if (gQueryTimes && gQueryTimes->type == cJSON_Number) {
+ if (gQueryTimes->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_args.query_times = gQueryTimes->valueint;
} else if (!gQueryTimes) {
g_args.query_times = 1;
@@ -4027,10 +4068,10 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
goto PARSE_OVER;
}
- // super_table_query
+ // specified_table_query
cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query");
if (!specifiedQuery) {
- g_queryInfo.specifiedQueryInfo.concurrent = 0;
+ g_queryInfo.specifiedQueryInfo.concurrent = 1;
g_queryInfo.specifiedQueryInfo.sqlCount = 0;
} else if (specifiedQuery->type != cJSON_Object) {
printf("ERROR: failed to read json, super_table_query not found\n");
@@ -4046,6 +4087,12 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery,
"query_times");
if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) {
+ if (specifiedQueryTimes->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+
+ }
g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint;
} else if (!specifiedQueryTimes) {
g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times;
@@ -4057,13 +4104,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent");
if (concurrent && concurrent->type == cJSON_Number) {
- g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint;
- if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) {
- errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n",
- __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount,
+ if (concurrent->valueint <= 0) {
+ errorPrint("%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount,
g_queryInfo.specifiedQueryInfo.concurrent);
goto PARSE_OVER;
}
+ g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint;
} else if (!concurrent) {
g_queryInfo.specifiedQueryInfo.concurrent = 1;
}
@@ -4167,7 +4215,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
// sub_table_query
cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query");
if (!superQuery) {
- g_queryInfo.superQueryInfo.threadCnt = 0;
+ g_queryInfo.superQueryInfo.threadCnt = 1;
g_queryInfo.superQueryInfo.sqlCount = 0;
} else if (superQuery->type != cJSON_Object) {
printf("ERROR: failed to read json, sub_table_query not found\n");
@@ -4183,6 +4231,11 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times");
if (superQueryTimes && superQueryTimes->type == cJSON_Number) {
+ if (superQueryTimes->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, query_times input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint;
} else if (!superQueryTimes) {
g_queryInfo.superQueryInfo.queryTimes = g_args.query_times;
@@ -4194,6 +4247,12 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON* threads = cJSON_GetObjectItem(superQuery, "threads");
if (threads && threads->type == cJSON_Number) {
+ if (threads->valueint <= 0) {
+ errorPrint("%s() LN%d, failed to read json, threads input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+
+ }
g_queryInfo.superQueryInfo.threadCnt = threads->valueint;
} else if (!threads) {
g_queryInfo.superQueryInfo.threadCnt = 1;
@@ -4233,10 +4292,15 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
g_queryInfo.superQueryInfo.mode = SYNC_QUERY_MODE;
}
- cJSON* subinterval = cJSON_GetObjectItem(superQuery, "interval");
- if (subinterval && subinterval->type == cJSON_Number) {
- g_queryInfo.superQueryInfo.subscribeInterval = subinterval->valueint;
- } else if (!subinterval) {
+ cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval");
+ if (superInterval && superInterval->type == cJSON_Number) {
+ if (superInterval->valueint < 0) {
+ errorPrint("%s() LN%d, failed to read json, interval input mistake\n",
+ __func__, __LINE__);
+ goto PARSE_OVER;
+ }
+ g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint;
+ } else if (!superInterval) {
//printf("failed to read json, subscribe interval no found\n");
//goto PARSE_OVER;
g_queryInfo.superQueryInfo.subscribeInterval = 10000;
@@ -4587,7 +4651,7 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) {
sampleDataBuf = calloc(
superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1);
if (sampleDataBuf == NULL) {
- errorPrint("%s() LN%d, Failed to calloc %"PRId64" Bytes, reason:%s\n",
+ errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__,
superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE,
strerror(errno));
@@ -4608,7 +4672,7 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) {
return 0;
}
-static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, int k)
+static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, uint64_t k)
{
int affectedRows;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
@@ -4638,18 +4702,17 @@ static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, int k)
return affectedRows;
}
-static void getTableName(char *pTblName, threadInfo* pThreadInfo, int64_t tableSeq)
+static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t tableSeq)
{
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
if (superTblInfo) {
- if ((superTblInfo->childTblOffset >= 0)
- && (superTblInfo->childTblLimit > 0)) {
+ if (superTblInfo->childTblLimit > 0) {
snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s",
superTblInfo->childTblName +
(tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN);
} else {
- verbosePrint("[%d] %s() LN%d: from=%"PRId64" count=%"PRId64" seq=%"PRId64"\n",
+ verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRIu64" seq=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__,
pThreadInfo->start_table_from,
pThreadInfo->ntables, tableSeq);
@@ -4657,31 +4720,31 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int64_t tableS
superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN);
}
} else {
- snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRId64"",
+ snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"",
g_args.tb_prefix, tableSeq);
}
}
static int64_t generateDataTail(
SSuperTable* superTblInfo,
- int64_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows,
+ uint64_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows,
int64_t startFrom, int64_t startTime, int64_t *pSamplePos, int64_t *dataLen) {
- int64_t len = 0;
- int ncols_per_record = 1; // count first col ts
+ uint64_t len = 0;
+ uint32_t ncols_per_record = 1; // count first col ts
char *pstr = buffer;
if (superTblInfo == NULL) {
- int datatypeSeq = 0;
+ uint32_t datatypeSeq = 0;
while(g_args.datatype[datatypeSeq]) {
datatypeSeq ++;
ncols_per_record ++;
}
}
- verbosePrint("%s() LN%d batch=%"PRId64"\n", __func__, __LINE__, batch);
+ verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch);
- int64_t k = 0;
+ uint64_t k = 0;
for (k = 0; k < batch;) {
char data[MAX_DATA_SIZE];
memset(data, 0, MAX_DATA_SIZE);
@@ -4756,7 +4819,7 @@ static int64_t generateDataTail(
remainderBufLen -= retLen;
}
- verbosePrint("%s() LN%d len=%"PRId64" k=%"PRId64" \nbuffer=%s\n",
+ verbosePrint("%s() LN%d len=%"PRIu64" k=%"PRIu64" \nbuffer=%s\n",
__func__, __LINE__, len, k, buffer);
startFrom ++;
@@ -4838,12 +4901,12 @@ static int generateSQLHead(char *tableName, int32_t tableSeq,
}
static int64_t generateInterlaceDataBuffer(
- char *tableName, int64_t batchPerTbl, int64_t i, int64_t batchPerTblTimes,
- int64_t tableSeq,
+ char *tableName, uint64_t batchPerTbl, uint64_t i, uint64_t batchPerTblTimes,
+ uint64_t tableSeq,
threadInfo *pThreadInfo, char *buffer,
- int64_t insertRows,
+ uint64_t insertRows,
int64_t startTime,
- int64_t *pRemainderBufLen)
+ uint64_t *pRemainderBufLen)
{
assert(buffer);
char *pstr = buffer;
@@ -4856,7 +4919,7 @@ static int64_t generateInterlaceDataBuffer(
return 0;
}
// generate data buffer
- verbosePrint("[%d] %s() LN%d i=%"PRId64" buffer:\n%s\n",
+ verbosePrint("[%d] %s() LN%d i=%"PRIu64" buffer:\n%s\n",
pThreadInfo->threadID, __func__, __LINE__, i, buffer);
pstr += headLen;
@@ -4864,7 +4927,7 @@ static int64_t generateInterlaceDataBuffer(
int64_t dataLen = 0;
- verbosePrint("[%d] %s() LN%d i=%"PRId64" batchPerTblTimes=%"PRId64" batchPerTbl = %"PRId64"\n",
+ verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%"PRIu64" batchPerTbl = %"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__,
i, batchPerTblTimes, batchPerTbl);
@@ -4886,7 +4949,7 @@ static int64_t generateInterlaceDataBuffer(
pstr += dataLen;
*pRemainderBufLen -= dataLen;
} else {
- debugPrint("%s() LN%d, generated data tail: %"PRId64", not equal batch per table: %"PRId64"\n",
+ debugPrint("%s() LN%d, generated data tail: %"PRIu64", not equal batch per table: %"PRIu64"\n",
__func__, __LINE__, k, batchPerTbl);
pstr -= headLen;
pstr[0] = '\0';
@@ -4896,7 +4959,7 @@ static int64_t generateInterlaceDataBuffer(
return k;
}
-static int generateProgressiveDataBuffer(
+static int64_t generateProgressiveDataBuffer(
char *tableName,
int64_t tableSeq,
threadInfo *pThreadInfo, char *buffer,
@@ -4941,12 +5004,21 @@ static int generateProgressiveDataBuffer(
return k;
}
+static void printStatPerThread(threadInfo *pThreadInfo)
+{
+ fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n",
+ pThreadInfo->threadID,
+ pThreadInfo->totalInsertRows,
+ pThreadInfo->totalAffectedRows,
+ (double)(pThreadInfo->totalAffectedRows / (pThreadInfo->totalDelay/1000.0)));
+}
+
static void* syncWriteInterlace(threadInfo *pThreadInfo) {
debugPrint("[%d] %s() LN%d: ### interlace write\n",
pThreadInfo->threadID, __func__, __LINE__);
- int64_t insertRows;
- int64_t interlaceRows;
+ uint64_t insertRows;
+ uint64_t interlaceRows;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
@@ -4981,10 +5053,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
// TODO: prompt tbl count multple interlace rows and batch
//
- int64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
+ uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
char* buffer = calloc(maxSqlLen, 1);
if (NULL == buffer) {
- errorPrint( "%s() LN%d, Failed to alloc %"PRId64" Bytes, reason:%s\n",
+ errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n",
__func__, __LINE__, maxSqlLen, strerror(errno));
return NULL;
}
@@ -4996,18 +5068,18 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
int64_t nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
- int insert_interval =
+ uint64_t insert_interval =
superTblInfo?superTblInfo->insertInterval:g_args.insert_interval;
- int64_t st = 0;
- int64_t et = 0xffffffff;
+ uint64_t st = 0;
+ uint64_t et = UINT64_MAX;
- int64_t lastPrintTime = taosGetTimestampMs();
- int64_t startTs = taosGetTimestampMs();
- int64_t endTs;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
- int64_t tableSeq = pThreadInfo->start_table_from;
+ uint64_t tableSeq = pThreadInfo->start_table_from;
- debugPrint("[%d] %s() LN%d: start_table_from=%"PRId64" ntables=%"PRId64" insertRows=%"PRId64"\n",
+ debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRIu64" insertRows=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from,
pThreadInfo->ntables, insertRows);
@@ -5015,9 +5087,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
assert(pThreadInfo->ntables > 0);
- int64_t batchPerTbl = interlaceRows;
+ uint64_t batchPerTbl = interlaceRows;
+ uint64_t batchPerTblTimes;
- int64_t batchPerTblTimes;
if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) {
batchPerTblTimes =
g_args.num_of_RPR / interlaceRows;
@@ -5025,9 +5097,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
batchPerTblTimes = 1;
}
- int64_t generatedRecPerTbl = 0;
+ uint64_t generatedRecPerTbl = 0;
bool flagSleep = true;
- int64_t sleepTimeTotal = 0;
+ uint64_t sleepTimeTotal = 0;
char *strInsertInto = "insert into ";
int nInsertBufLen = strlen(strInsertInto);
@@ -5039,7 +5111,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
// generate data
memset(buffer, 0, maxSqlLen);
- int64_t remainderBufLen = maxSqlLen;
+ uint64_t remainderBufLen = maxSqlLen;
char *pstr = buffer;
@@ -5047,9 +5119,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
pstr += len;
remainderBufLen -= len;
- int64_t recOfBatch = 0;
+ uint64_t recOfBatch = 0;
- for (int64_t i = 0; i < batchPerTblTimes; i ++) {
+ for (uint64_t i = 0; i < batchPerTblTimes; i ++) {
getTableName(tableName, pThreadInfo, tableSeq);
if (0 == strlen(tableName)) {
errorPrint("[%d] %s() LN%d, getTableName return null\n",
@@ -5058,7 +5130,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
return NULL;
}
- int64_t oldRemainderLen = remainderBufLen;
+ uint64_t oldRemainderLen = remainderBufLen;
int64_t generated = generateInterlaceDataBuffer(
tableName, batchPerTbl, i, batchPerTblTimes,
tableSeq,
@@ -5067,10 +5139,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
startTime,
&remainderBufLen);
+ debugPrint("[%d] %s() LN%d, generated records is %"PRId64"\n",
+ pThreadInfo->threadID, __func__, __LINE__, generated);
if (generated < 0) {
- debugPrint("[%d] %s() LN%d, generated data is %"PRId64"\n",
+ errorPrint("[%d] %s() LN%d, generated records is %"PRId64"\n",
pThreadInfo->threadID, __func__, __LINE__, generated);
- goto free_and_statistics_interlace;
+ goto free_of_interlace;
} else if (generated == 0) {
break;
}
@@ -5114,7 +5188,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
break;
}
- verbosePrint("[%d] %s() LN%d recOfBatch=%"PRId64" totalInsertRows=%"PRId64"\n",
+ verbosePrint("[%d] %s() LN%d recOfBatch=%"PRIu64" totalInsertRows=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__, recOfBatch,
pThreadInfo->totalInsertRows);
verbosePrint("[%d] %s() LN%d, buffer=%s\n",
@@ -5125,30 +5199,30 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
int64_t affectedRows = execInsert(pThreadInfo, buffer, recOfBatch);
endTs = taosGetTimestampMs();
- int64_t delay = endTs - startTs;
- performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n",
+ uint64_t delay = endTs - startTs;
+ performancePrint("%s() LN%d, insert execution time is %"PRIu64"ms\n",
__func__, __LINE__, delay);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
pThreadInfo->cntDelay++;
pThreadInfo->totalDelay += delay;
- verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
- pThreadInfo->threadID,
- __func__, __LINE__, affectedRows);
- if ((affectedRows < 0) || (recOfBatch != affectedRows)) {
- errorPrint("[%d] %s() LN%d execInsert insert %"PRId64", affected rows: %"PRId64"\n%s\n",
+ if (recOfBatch != affectedRows) {
+ errorPrint("[%d] %s() LN%d execInsert insert %"PRIu64", affected rows: %"PRId64"\n%s\n",
pThreadInfo->threadID, __func__, __LINE__,
recOfBatch, affectedRows, buffer);
- goto free_and_statistics_interlace;
+ goto free_of_interlace;
}
pThreadInfo->totalAffectedRows += affectedRows;
int64_t currentPrintTime = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n",
+ printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n",
pThreadInfo->threadID,
pThreadInfo->totalInsertRows,
pThreadInfo->totalAffectedRows);
@@ -5168,13 +5242,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
}
}
-free_and_statistics_interlace:
+free_of_interlace:
tmfree(buffer);
-
- printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n",
- pThreadInfo->threadID,
- pThreadInfo->totalInsertRows,
- pThreadInfo->totalAffectedRows);
+ printStatPerThread(pThreadInfo);
return NULL;
}
@@ -5190,19 +5260,19 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__);
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
- int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
+ uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len;
char* buffer = calloc(maxSqlLen, 1);
if (NULL == buffer) {
- errorPrint( "Failed to alloc %d Bytes, reason:%s\n",
+ errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n",
maxSqlLen,
strerror(errno));
return NULL;
}
- int64_t lastPrintTime = taosGetTimestampMs();
- int64_t startTs = taosGetTimestampMs();
- int64_t endTs;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
+ uint64_t endTs;
int64_t timeStampStep =
superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP;
@@ -5217,15 +5287,15 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pThreadInfo->samplePos = 0;
- for (int64_t tableSeq =
+ for (uint64_t tableSeq =
pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to;
tableSeq ++) {
int64_t start_time = pThreadInfo->start_time;
- int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
+ uint64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows);
- for (int64_t i = 0; i < insertRows;) {
+ for (uint64_t i = 0; i < insertRows;) {
/*
if (insert_interval) {
st = taosGetTimestampMs();
@@ -5247,7 +5317,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
pstr += len;
remainderBufLen -= len;
- int generated = generateProgressiveDataBuffer(
+ int64_t generated = generateProgressiveDataBuffer(
tableName, tableSeq, pThreadInfo, pstr, insertRows,
i, start_time,
&(pThreadInfo->samplePos),
@@ -5255,7 +5325,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
if (generated > 0)
i += generated;
else
- goto free_and_statistics_2;
+ goto free_of_progressive;
start_time += generated * timeStampStep;
pThreadInfo->totalInsertRows += generated;
@@ -5265,17 +5335,23 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
int64_t affectedRows = execInsert(pThreadInfo, buffer, generated);
endTs = taosGetTimestampMs();
- int64_t delay = endTs - startTs;
+ uint64_t delay = endTs - startTs;
performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n",
__func__, __LINE__, delay);
+ verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n",
+ pThreadInfo->threadID,
+ __func__, __LINE__, affectedRows);
if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay;
if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay;
pThreadInfo->cntDelay++;
pThreadInfo->totalDelay += delay;
- if (affectedRows < 0)
- goto free_and_statistics_2;
+ if (affectedRows < 0) {
+ errorPrint("%s() LN%d, affected rows: %"PRId64"\n",
+ __func__, __LINE__, affectedRows);
+ goto free_of_progressive;
+ }
pThreadInfo->totalAffectedRows += affectedRows;
@@ -5314,13 +5390,9 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
}
} // tableSeq
-free_and_statistics_2:
+free_of_progressive:
tmfree(buffer);
-
- printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n",
- pThreadInfo->threadID,
- pThreadInfo->totalInsertRows,
- pThreadInfo->totalAffectedRows);
+ printStatPerThread(pThreadInfo);
return NULL;
}
@@ -5349,6 +5421,7 @@ static void* syncWrite(void *sarg) {
// progressive mode
return syncWriteProgressive(pThreadInfo);
}
+
}
static void callBack(void *param, TAOS_RES *res, int code) {
@@ -5539,15 +5612,15 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int startFrom;
if (superTblInfo) {
- int limit, offset;
+ int64_t limit;
+ uint64_t offset;
if ((NULL != g_args.sqlFile) && (superTblInfo->childTblExists == TBL_NO_EXISTS) &&
((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) {
printf("WARNING: offset and limit will not be used since the child tables not exists!\n");
}
- if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS)
- && (superTblInfo->childTblOffset >= 0)) {
+ if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) {
if ((superTblInfo->childTblLimit < 0)
|| ((superTblInfo->childTblOffset + superTblInfo->childTblLimit)
> (superTblInfo->childTblCount))) {
@@ -5592,7 +5665,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
exit(-1);
}
- int64_t childTblCount;
+ uint64_t childTblCount;
getChildNameOfSuperTableWithLimitAndOffset(
taos,
db_name, superTblInfo->sTblName,
@@ -5631,7 +5704,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
t_info->superTblInfo = superTblInfo;
t_info->start_time = start_time;
- t_info->minDelay = INT64_MAX;
+ t_info->minDelay = UINT64_MAX;
if ((NULL == superTblInfo) ||
(0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) {
@@ -5674,10 +5747,10 @@ static void startMultiThreadInsertData(int threads, char* db_name,
pthread_join(pids[i], NULL);
}
- int64_t totalDelay = 0;
- int64_t maxDelay = 0;
- int64_t minDelay = INT64_MAX;
- int64_t cntDelay = 1;
+ uint64_t totalDelay = 0;
+ uint64_t maxDelay = 0;
+ uint64_t minDelay = UINT64_MAX;
+ uint64_t cntDelay = 1;
double avgDelay = 0;
for (int i = 0; i < threads; i++) {
@@ -5686,7 +5759,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
tsem_destroy(&(t_info->lock_sem));
taos_close(t_info->taos);
- debugPrint("%s() LN%d, [%d] totalInsert=%"PRId64" totalAffected=%"PRId64"\n",
+ debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n",
__func__, __LINE__,
t_info->threadID, t_info->totalInsertRows,
t_info->totalAffectedRows);
@@ -5712,35 +5785,42 @@ static void startMultiThreadInsertData(int threads, char* db_name,
int64_t t = end - start;
if (superTblInfo) {
- printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n",
+ fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
t / 1000.0, superTblInfo->totalInsertRows,
superTblInfo->totalAffectedRows,
threads, db_name, superTblInfo->sTblName,
(double)superTblInfo->totalInsertRows / (t / 1000.0));
- fprintf(g_fpOfInsertResult,
- "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n",
+
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n",
t / 1000.0, superTblInfo->totalInsertRows,
superTblInfo->totalAffectedRows,
threads, db_name, superTblInfo->sTblName,
(double)superTblInfo->totalInsertRows / (t / 1000.0));
+ }
} else {
- printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n",
+ fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
t / 1000.0, g_args.totalInsertRows,
g_args.totalAffectedRows,
threads, db_name,
(double)g_args.totalInsertRows / (t / 1000.0));
- fprintf(g_fpOfInsertResult,
- "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n",
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
+ "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n",
t * 1000.0, g_args.totalInsertRows,
g_args.totalAffectedRows,
threads, db_name,
(double)g_args.totalInsertRows / (t / 1000.0));
+ }
}
- printf("insert delay, avg: %10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n",
+ fprintf(stderr, "insert delay, avg: %10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n",
avgDelay, maxDelay, minDelay);
- fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n",
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n",
avgDelay, maxDelay, minDelay);
+ }
//taos_close(taos);
@@ -5780,11 +5860,11 @@ static void *readTable(void *sarg) {
printf("%d records:\n", totalData);
fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n");
- for (int j = 0; j < n; j++) {
+ for (uint64_t j = 0; j < n; j++) {
double totalT = 0;
- int count = 0;
- for (int i = 0; i < num_of_tables; i++) {
- sprintf(command, "select %s from %s%d where ts>= %" PRId64,
+ uint64_t count = 0;
+ for (uint64_t i = 0; i < num_of_tables; i++) {
+ sprintf(command, "select %s from %s%"PRIu64" where ts>= %" PRIu64,
aggreFunc[j], tb_prefix, i, sTime);
double t = taosGetTimestampMs();
@@ -5910,7 +5990,8 @@ static int insertTestProcess() {
return -1;
}
- printfInsertMetaToFile(g_fpOfInsertResult);
+ if (g_fpOfInsertResult)
+ printfInsertMetaToFile(g_fpOfInsertResult);
if (!g_args.answer_yes) {
printf("Press enter key to continue\n\n");
@@ -5921,7 +6002,8 @@ static int insertTestProcess() {
// create database and super tables
if(createDatabasesAndStables() != 0) {
- fclose(g_fpOfInsertResult);
+ if (g_fpOfInsertResult)
+ fclose(g_fpOfInsertResult);
return -1;
}
@@ -5937,11 +6019,13 @@ static int insertTestProcess() {
end = taosGetTimestampMs();
if (g_totalChildTables > 0) {
- printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n",
+ fprintf(stderr, "Spent %.4f seconds to create %d tables with %d thread(s)\n\n",
(end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
- fprintf(g_fpOfInsertResult,
+ if (g_fpOfInsertResult) {
+ fprintf(g_fpOfInsertResult,
"Spent %.4f seconds to create %d tables with %d thread(s)\n\n",
(end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
+ }
}
taosMsleep(1000);
@@ -6014,14 +6098,14 @@ static void *specifiedTableQuery(void *sarg) {
return NULL;
}
- int64_t st = 0;
- int64_t et = 0;
+ uint64_t st = 0;
+ uint64_t et = 0;
- int queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
+ uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes;
- int totalQueried = 0;
- int64_t lastPrintTime = taosGetTimestampMs();
- int64_t startTs = taosGetTimestampMs();
+ uint64_t totalQueried = 0;
+ uint64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
while(queryTimes --) {
if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) <
@@ -6067,12 +6151,12 @@ static void *specifiedTableQuery(void *sarg) {
et = taosGetTimestampMs();
- int64_t currentPrintTime = taosGetTimestampMs();
- int64_t endTs = taosGetTimestampMs();
+ uint64_t currentPrintTime = taosGetTimestampMs();
+ uint64_t endTs = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- debugPrint("%s() LN%d, endTs=%"PRId64"ms, startTs=%"PRId64"ms\n",
+ debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n",
__func__, __LINE__, endTs, startTs);
- printf("thread[%d] has currently completed queries: %d, QPS: %10.6f\n",
+ printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n",
pThreadInfo->threadID,
totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0)));
@@ -6124,14 +6208,14 @@ static void *superTableQuery(void *sarg) {
}
}
- int64_t st = 0;
- int64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval;
+ uint64_t st = 0;
+ uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval;
- int queryTimes = g_queryInfo.superQueryInfo.queryTimes;
- int totalQueried = 0;
- int64_t startTs = taosGetTimestampMs();
+ uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes;
+ uint64_t totalQueried = 0;
+ uint64_t startTs = taosGetTimestampMs();
- int64_t lastPrintTime = taosGetTimestampMs();
+ uint64_t lastPrintTime = taosGetTimestampMs();
while(queryTimes --) {
if (g_queryInfo.superQueryInfo.queryInterval
&& (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) {
@@ -6158,7 +6242,7 @@ static void *superTableQuery(void *sarg) {
int64_t currentPrintTime = taosGetTimestampMs();
int64_t endTs = taosGetTimestampMs();
if (currentPrintTime - lastPrintTime > 30*1000) {
- printf("thread[%d] has currently completed queries: %d, QPS: %10.3f\n",
+ printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.3f\n",
pThreadInfo->threadID,
totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0)));
@@ -6167,7 +6251,7 @@ static void *superTableQuery(void *sarg) {
}
}
et = taosGetTimestampMs();
- printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRId64" - %"PRId64"] once queries duration:%.4fs\n\n",
+ printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRIu64" - %"PRIu64"] once queries duration:%.4fs\n\n",
taosGetSelfPthreadId(),
pThreadInfo->start_table_from,
pThreadInfo->end_table_to,
@@ -6222,7 +6306,7 @@ static int queryTestProcess() {
int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent;
int nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount;
- int64_t startTs = taosGetTimestampMs();
+ uint64_t startTs = taosGetTimestampMs();
if ((nSqlCount > 0) && (nConcurrent > 0)) {
@@ -6282,16 +6366,16 @@ static int queryTestProcess() {
ERROR_EXIT("memory allocation failed for create threads\n");
}
- int ntables = g_queryInfo.superQueryInfo.childTblCount;
+ uint64_t ntables = g_queryInfo.superQueryInfo.childTblCount;
int threads = g_queryInfo.superQueryInfo.threadCnt;
- int a = ntables / threads;
+ uint64_t a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
- int b = 0;
+ uint64_t b = 0;
if (threads != 0) {
b = ntables % threads;
}
@@ -6333,12 +6417,12 @@ static int queryTestProcess() {
tmfree((char*)infosOfSub);
// taos_close(taos);// TODO: workaround to use separate taos connection;
- int64_t endTs = taosGetTimestampMs();
+ uint64_t endTs = taosGetTimestampMs();
- int totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
+ uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried +
g_queryInfo.superQueryInfo.totalQueried;
- printf("==== completed total queries: %d, the QPS of all threads: %10.3f====\n",
+ fprintf(stderr, "==== completed total queries: %"PRIu64", the QPS of all threads: %10.3f====\n",
totalQueried,
(double)(totalQueried/((endTs-startTs)/1000.0)));
return 0;
@@ -6433,7 +6517,7 @@ static void *superSubscribe(void *sarg) {
}
}
//et = taosGetTimestampMs();
- //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
+ //printf("========thread[%"PRIu64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
} while(0);
// start loop to consume result
@@ -6499,7 +6583,7 @@ static void *specifiedSubscribe(void *sarg) {
do {
//if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < g_queryInfo.specifiedQueryInfo.queryInterval) {
// taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval- (et - st)); // ms
- // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
+ // //printf("========sleep duration:%"PRIu64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to);
//}
//st = taosGetTimestampMs();
@@ -6519,7 +6603,7 @@ static void *specifiedSubscribe(void *sarg) {
}
}
//et = taosGetTimestampMs();
- //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
+ //printf("========thread[%"PRIu64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0);
} while(0);
// start loop to consume result
@@ -6589,8 +6673,9 @@ static int subscribeTestProcess() {
//==== create sub threads for query from super table
if ((g_queryInfo.specifiedQueryInfo.sqlCount <= 0) ||
(g_queryInfo.specifiedQueryInfo.concurrent <= 0)) {
- errorPrint("%s() LN%d, query sqlCount %"PRId64" or concurrent %"PRId64" is not correct.\n",
- __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount,
+ errorPrint("%s() LN%d, query sqlCount %"PRIu64" or concurrent %"PRIu64" is not correct.\n",
+ __func__, __LINE__,
+ g_queryInfo.specifiedQueryInfo.sqlCount,
g_queryInfo.specifiedQueryInfo.concurrent);
exit(-1);
}
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index 96a1cd16f8f724efbee8780a72bcfadea4121be1..e706ddefd6205c5ca4ec35f0722f69d8543e1706 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -72,7 +72,8 @@ enum _show_db_index {
TSDB_SHOW_DB_WALLEVEL_INDEX,
TSDB_SHOW_DB_FSYNC_INDEX,
TSDB_SHOW_DB_COMP_INDEX,
- TSDB_SHOW_DB_PRECISION_INDEX,
+ TSDB_SHOW_DB_CACHELAST_INDEX,
+ TSDB_SHOW_DB_PRECISION_INDEX,
TSDB_SHOW_DB_UPDATE_INDEX,
TSDB_SHOW_DB_STATUS_INDEX,
TSDB_MAX_SHOW_DB
@@ -83,10 +84,10 @@ enum _show_tables_index {
TSDB_SHOW_TABLES_NAME_INDEX,
TSDB_SHOW_TABLES_CREATED_TIME_INDEX,
TSDB_SHOW_TABLES_COLUMNS_INDEX,
- TSDB_SHOW_TABLES_METRIC_INDEX,
- TSDB_SHOW_TABLES_UID_INDEX,
+ TSDB_SHOW_TABLES_METRIC_INDEX,
+ TSDB_SHOW_TABLES_UID_INDEX,
TSDB_SHOW_TABLES_TID_INDEX,
- TSDB_SHOW_TABLES_VGID_INDEX,
+ TSDB_SHOW_TABLES_VGID_INDEX,
TSDB_MAX_SHOW_TABLES
};
@@ -99,11 +100,13 @@ enum _describe_table_index {
TSDB_MAX_DESCRIBE_METRIC
};
+#define COL_NOTE_LEN 128
+
typedef struct {
char field[TSDB_COL_NAME_LEN + 1];
char type[16];
int length;
- char note[128];
+ char note[COL_NOTE_LEN];
} SColDes;
typedef struct {
@@ -132,6 +135,7 @@ typedef struct {
int8_t wallevel;
int32_t fsync;
int8_t comp;
+ int8_t cachelast;
char precision[8]; // time resolution
int8_t update;
char status[16];
@@ -358,19 +362,19 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
arguments->data_batch = atoi(arg);
if (arguments->data_batch >= INT16_MAX) {
arguments->data_batch = INT16_MAX - 1;
- }
+ }
break;
- case 'L':
+ case 'L':
{
int32_t len = atoi(arg);
if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
len = TSDB_MAX_ALLOWED_SQL_LEN;
} else if (len < TSDB_MAX_SQL_LEN) {
len = TSDB_MAX_SQL_LEN;
- }
+ }
arguments->max_sql_len = len;
break;
- }
+ }
case 't':
arguments->table_batch = atoi(arg);
break;
@@ -413,12 +417,12 @@ static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, i
struct arguments g_args = {
// connection option
- NULL,
- "root",
+ NULL,
+ "root",
#ifdef _TD_POWER_
- "powerdb",
+ "powerdb",
#else
- "taosdata",
+ "taosdata",
#endif
0,
"",
@@ -523,7 +527,7 @@ int main(int argc, char *argv[]) {
/* Parse our arguments; every option seen by parse_opt will be
reflected in arguments. */
- if (argc > 1)
+ if (argc > 2)
parse_args(argc, argv, &g_args);
argp_parse(&argp, argc, argv, 0, 0, &g_args);
@@ -675,10 +679,10 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS
}
sprintf(tempCommand, "show tables like %s", table);
-
- result = taos_query(taosCon, tempCommand);
+
+ result = taos_query(taosCon, tempCommand);
int32_t code = taos_errno(result);
-
+
if (code != 0) {
fprintf(stderr, "failed to run command %s\n", tempCommand);
free(tempCommand);
@@ -705,12 +709,12 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS
free(tempCommand);
return 0;
}
-
+
sprintf(tempCommand, "show stables like %s", table);
-
- result = taos_query(taosCon, tempCommand);
+
+ result = taos_query(taosCon, tempCommand);
code = taos_errno(result);
-
+
if (code != 0) {
fprintf(stderr, "failed to run command %s\n", tempCommand);
free(tempCommand);
@@ -748,7 +752,7 @@ int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, char* metric
return -1;
}
}
-
+
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN);
tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
@@ -770,7 +774,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
}
sprintf(tmpCommand, "select tbname from %s", metric);
-
+
TAOS_RES *res = taos_query(taosCon, tmpCommand);
int32_t code = taos_errno(res);
if (code != 0) {
@@ -792,20 +796,20 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
}
TAOS_FIELD *fields = taos_fetch_fields(res);
-
+
int32_t numOfTable = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
+ while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes);
tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN);
-
- taosWrite(fd, &tableRecord, sizeof(STableRecord));
+
+ taosWrite(fd, &tableRecord, sizeof(STableRecord));
numOfTable++;
}
taos_free_result(res);
lseek(fd, 0, SEEK_SET);
-
+
int maxThreads = arguments->thread_num;
int tableOfPerFile ;
if (numOfTable <= arguments->thread_num) {
@@ -815,16 +819,16 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
tableOfPerFile = numOfTable / arguments->thread_num;
if (0 != numOfTable % arguments->thread_num) {
tableOfPerFile += 1;
- }
+ }
}
char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
if (NULL == tblBuf){
- fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
+ fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord));
close(fd);
return -1;
}
-
+
int32_t numOfThread = *totalNumOfThread;
int subFd = -1;
for (; numOfThread < maxThreads; numOfThread++) {
@@ -838,7 +842,7 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
(void)remove(tmpBuf);
}
sprintf(tmpBuf, ".select-tbname.tmp");
- (void)remove(tmpBuf);
+ (void)remove(tmpBuf);
free(tblBuf);
close(fd);
return -1;
@@ -856,11 +860,11 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu
sprintf(tmpBuf, ".select-tbname.tmp");
(void)remove(tmpBuf);
-
+
if (fd >= 0) {
close(fd);
fd = -1;
- }
+ }
*totalNumOfThread = numOfThread;
@@ -884,7 +888,7 @@ int taosDumpOut(struct arguments *arguments) {
} else {
sprintf(tmpBuf, "dbs.sql");
}
-
+
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@@ -916,9 +920,9 @@ int taosDumpOut(struct arguments *arguments) {
taosDumpCharset(fp);
sprintf(command, "show databases");
- result = taos_query(taos, command);
+ result = taos_query(taos, command);
int32_t code = taos_errno(result);
-
+
if (code != 0) {
fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(result));
goto _exit_failure;
@@ -958,12 +962,12 @@ int taosDumpOut(struct arguments *arguments) {
strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
if (arguments->with_property) {
dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
- dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
+ dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
- dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
+ dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
- strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
+ strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes);
//dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
//dbInfos[count]->daysToKeep1;
//dbInfos[count]->daysToKeep2;
@@ -974,8 +978,9 @@ int taosDumpOut(struct arguments *arguments) {
dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
+ dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
- strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
+ strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes);
//dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
@@ -1007,8 +1012,8 @@ int taosDumpOut(struct arguments *arguments) {
g_resultStatistics.totalDatabasesOfDumpOut++;
sprintf(command, "use %s", dbInfos[0]->name);
-
- result = taos_query(taos, command);
+
+ result = taos_query(taos, command);
int32_t code = taos_errno(result);
if (code != 0) {
fprintf(stderr, "invalid database %s\n", dbInfos[0]->name);
@@ -1038,7 +1043,7 @@ int taosDumpOut(struct arguments *arguments) {
int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name);
if (0 == ret) {
superTblCnt++;
- }
+ }
}
retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd);
}
@@ -1050,7 +1055,7 @@ int taosDumpOut(struct arguments *arguments) {
goto _clean_tmp_file;
}
}
-
+
// TODO: save dump super table into result_output.txt
fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt);
g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
@@ -1076,7 +1081,7 @@ int taosDumpOut(struct arguments *arguments) {
taos_close(taos);
taos_free_result(result);
tfree(command);
- taosFreeDbInfos();
+ taosFreeDbInfos();
fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows);
return 0;
@@ -1097,8 +1102,8 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
char sqlstr[COMMAND_SIZE];
sprintf(sqlstr, "describe %s.%s;", dbName, table);
-
- res = taos_query(taosCon, sqlstr);
+
+ res = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
@@ -1128,23 +1133,23 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
if (isSuperTable) {
return count;
}
-
+
// if chidl-table have tag, using select tagName from table to get tagValue
for (int i = 0 ; i < count; i++) {
if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
sprintf(sqlstr, "select %s from %s.%s", tableDes->cols[i].field, dbName, table);
-
- res = taos_query(taosCon, sqlstr);
+
+ res = taos_query(taosCon, sqlstr);
code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
taos_free_result(res);
return -1;
}
-
- fields = taos_fetch_fields(res);
+
+ fields = taos_fetch_fields(res);
row = taos_fetch_row(res);
if (NULL == row) {
@@ -1159,7 +1164,7 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
res = NULL;
continue;
}
-
+
int32_t* length = taos_fetch_lengths(res);
//int32_t* length = taos_fetch_lengths(tmpResult);
@@ -1188,16 +1193,16 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
case TSDB_DATA_TYPE_BINARY: {
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
tableDes->cols[i].note[0] = '\'';
- char tbuf[COMMAND_SIZE];
- converStringToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE);
+ char tbuf[COL_NOTE_LEN];
+ converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf);
*(pstr++) = '\'';
break;
}
case TSDB_DATA_TYPE_NCHAR: {
memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note));
- char tbuf[COMMAND_SIZE];
- convertNCharToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE);
+ char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
+ convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN);
sprintf(tableDes->cols[i].note, "\'%s\'", tbuf);
break;
}
@@ -1219,9 +1224,9 @@ int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCo
default:
break;
}
-
+
taos_free_result(res);
- res = NULL;
+ res = NULL;
}
return count;
@@ -1280,9 +1285,10 @@ void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
if (isDumpProperty) {
pstr += sprintf(pstr,
- "TABLES %d VGROUPS %d REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION '%s' UPDATE %d",
- dbInfo->ntables, dbInfo->vgroups, dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache,
- dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->wallevel, dbInfo->fsync, dbInfo->comp, dbInfo->precision, dbInfo->update);
+ "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
+ dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache,
+ dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->fsync, dbInfo->cachelast,
+ dbInfo->comp, dbInfo->precision, dbInfo->update);
}
pstr += sprintf(pstr, ";");
@@ -1293,8 +1299,8 @@ void* taosDumpOutWorkThreadFp(void *arg)
{
SThreadParaObj *pThread = (SThreadParaObj*)arg;
STableRecord tableRecord;
- int fd;
-
+ int fd;
+
char tmpBuf[TSDB_FILENAME_LEN*4] = {0};
sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex);
fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH);
@@ -1305,13 +1311,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
FILE *fp = NULL;
memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
-
+
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex);
} else {
sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex);
}
-
+
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@@ -1321,13 +1327,13 @@ void* taosDumpOutWorkThreadFp(void *arg)
memset(tmpBuf, 0, TSDB_FILENAME_LEN);
sprintf(tmpBuf, "use %s", pThread->dbName);
-
- TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
+
+ TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf);
int32_t code = taos_errno(tmpResult);
if (code != 0) {
fprintf(stderr, "invalid database %s\n", pThread->dbName);
taos_free_result(tmpResult);
- fclose(fp);
+ fclose(fp);
close(fd);
return NULL;
}
@@ -1345,7 +1351,7 @@ void* taosDumpOutWorkThreadFp(void *arg)
// TODO: sum table count and table rows by self
pThread->tablesOfDumpOut++;
pThread->rowsOfDumpOut += ret;
-
+
if (pThread->rowsOfDumpOut >= lastRowsPrint) {
printf(" %"PRId64 " rows already be dumpout from database %s\n", pThread->rowsOfDumpOut, pThread->dbName);
lastRowsPrint += 5000000;
@@ -1355,15 +1361,15 @@ void* taosDumpOutWorkThreadFp(void *arg)
if (tablesInOneFile >= g_args.table_batch) {
fclose(fp);
tablesInOneFile = 0;
-
- memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
+
+ memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128);
if (g_args.outpath[0] != 0) {
sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex);
} else {
sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex);
}
fileNameIndex++;
-
+
fp = fopen(tmpBuf, "w");
if (fp == NULL) {
fprintf(stderr, "failed to open file %s\n", tmpBuf);
@@ -1377,7 +1383,7 @@ void* taosDumpOutWorkThreadFp(void *arg)
taos_free_result(tmpResult);
close(fd);
- fclose(fp);
+ fclose(fp);
return NULL;
}
@@ -1393,7 +1399,7 @@ static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, i
pThread->threadIndex = t;
pThread->totalThreads = numOfThread;
tstrncpy(pThread->dbName, dbName, TSDB_TABLE_NAME_LEN);
- pThread->taosCon = taosCon;
+ pThread->taosCon = taosCon;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
@@ -1408,7 +1414,7 @@ static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, i
pthread_join(threadObj[t].threadID, NULL);
}
- // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
+ // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt
int64_t totalRowsOfDumpOut = 0;
int64_t totalChildTblsOfDumpOut = 0;
for (int32_t t = 0; t < numOfThread; ++t) {
@@ -1449,7 +1455,7 @@ int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName) {
}
-int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
+int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
{
TAOS_ROW row;
int fd = -1;
@@ -1457,8 +1463,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
char sqlstr[TSDB_MAX_SQL_LEN] = {0};
sprintf(sqlstr, "show %s.stables", dbName);
-
- TAOS_RES* res = taos_query(taosCon, sqlstr);
+
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason: %s\n", sqlstr, taos_errstr(res));
@@ -1478,13 +1484,13 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
(void)remove(".stables.tmp");
exit(-1);
}
-
- while ((row = taos_fetch_row(res)) != NULL) {
+
+ while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
taosWrite(fd, &tableRecord, sizeof(STableRecord));
- }
-
+ }
+
taos_free_result(res);
(void)lseek(fd, 0, SEEK_SET);
@@ -1492,7 +1498,7 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
while (1) {
ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord));
if (readLen <= 0) break;
-
+
int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName);
if (0 == ret) {
superTblCnt++;
@@ -1505,8 +1511,8 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp)
close(fd);
(void)remove(".stables.tmp");
-
- return 0;
+
+ return 0;
}
@@ -1516,19 +1522,19 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
STableRecord tableRecord;
taosDumpCreateDbClause(dbInfo, arguments->with_property, fp);
-
+
fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfo->name);
g_resultStatistics.totalDatabasesOfDumpOut++;
char sqlstr[TSDB_MAX_SQL_LEN] = {0};
fprintf(fp, "USE %s;\n\n", dbInfo->name);
-
+
(void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp);
sprintf(sqlstr, "show %s.tables", dbInfo->name);
-
- TAOS_RES* res = taos_query(taosCon, sqlstr);
+
+ TAOS_RES* res = taos_query(taosCon, sqlstr);
int code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res));
@@ -1547,15 +1553,15 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
}
TAOS_FIELD *fields = taos_fetch_fields(res);
-
+
int32_t numOfTable = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
+ while ((row = taos_fetch_row(res)) != NULL) {
memset(&tableRecord, 0, sizeof(STableRecord));
tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes);
tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes);
-
+
taosWrite(fd, &tableRecord, sizeof(STableRecord));
-
+
numOfTable++;
}
taos_free_result(res);
@@ -1570,7 +1576,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
tableOfPerFile = numOfTable / g_args.thread_num;
if (0 != numOfTable % g_args.thread_num) {
tableOfPerFile += 1;
- }
+ }
}
char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord));
@@ -1579,7 +1585,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
close(fd);
return -1;
}
-
+
int32_t numOfThread = 0;
int subFd = -1;
for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) {
@@ -1616,7 +1622,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
close(fd);
fd = -1;
}
-
+
taos_free_result(res);
// start multi threads to dumpout
@@ -1624,7 +1630,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao
for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) {
sprintf(tmpBuf, ".tables.tmp.%d", loopCnt);
(void)remove(tmpBuf);
- }
+ }
free(tblBuf);
return 0;
@@ -1735,7 +1741,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
char *pstr = NULL;
TAOS_ROW row = NULL;
int numFields = 0;
-
+
if (arguments->schemaonly) {
return 0;
}
@@ -1750,11 +1756,11 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
pstr = tmpBuffer;
char sqlstr[1024] = {0};
- sprintf(sqlstr,
- "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
+ sprintf(sqlstr,
+ "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
dbName, tbname, arguments->start_time, arguments->end_time);
-
- TAOS_RES* tmpResult = taos_query(taosCon, sqlstr);
+
+ TAOS_RES* tmpResult = taos_query(taosCon, sqlstr);
int32_t code = taos_errno(tmpResult);
if (code != 0) {
fprintf(stderr, "failed to run command %s, reason: %s\n", sqlstr, taos_errstr(tmpResult));
@@ -1774,7 +1780,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
while ((row = taos_fetch_row(tmpResult)) != NULL) {
pstr = tmpBuffer;
curr_sqlstr_len = 0;
-
+
int32_t* length = taos_fetch_lengths(tmpResult); // act len
if (count == 0) {
@@ -1829,7 +1835,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
//pstr = stpcpy(pstr, tbuf);
//*(pstr++) = '\'';
- pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
+ pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
break;
}
case TSDB_DATA_TYPE_NCHAR: {
@@ -1857,10 +1863,10 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") ");
- totalRows++;
+ totalRows++;
count++;
fprintf(fp, "%s", tmpBuffer);
-
+
if (totalRows >= lastRowsPrint) {
printf(" %"PRId64 " rows already be dumpout from %s.%s\n", totalRows, dbName, tbname);
lastRowsPrint += 5000000;
@@ -2206,7 +2212,7 @@ static FILE* taosOpenDumpInFile(char *fptr) {
}
char *fname = full_path.we_wordv[0];
-
+
FILE *f = fopen(fname, "r");
if (f == NULL) {
fprintf(stderr, "ERROR: failed to open file %s\n", fname);
@@ -2240,7 +2246,7 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c
line[--read_len] = '\0';
//if (read_len == 0 || isCommentLine(line)) { // line starts with #
- if (read_len == 0 ) {
+ if (read_len == 0 ) {
continue;
}
@@ -2259,8 +2265,8 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c
}
memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
- cmd_len = 0;
-
+ cmd_len = 0;
+
if (lineNo >= lastRowsPrint) {
printf(" %d lines already be executed from file %s\n", lineNo, fileName);
lastRowsPrint += 5000000;
@@ -2300,7 +2306,7 @@ static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args)
if (totalThreads > tsSqlFileNum) {
totalThreads = tsSqlFileNum;
}
-
+
SThreadParaObj *threadObj = (SThreadParaObj *)calloc(totalThreads, sizeof(SThreadParaObj));
for (int32_t t = 0; t < totalThreads; ++t) {
pThread = threadObj + t;
@@ -2330,7 +2336,7 @@ static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args)
int taosDumpIn(struct arguments *arguments) {
assert(arguments->isDumpIn);
-
+
TAOS *taos = NULL;
FILE *fp = NULL;
@@ -2345,22 +2351,22 @@ int taosDumpIn(struct arguments *arguments) {
int32_t tsSqlFileNumOfTbls = tsSqlFileNum;
if (tsDbSqlFile[0] != 0) {
tsSqlFileNumOfTbls--;
-
+
fp = taosOpenDumpInFile(tsDbSqlFile);
if (NULL == fp) {
fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile);
return -1;
}
fprintf(stderr, "Success Open input file: %s\n", tsDbSqlFile);
-
+
taosLoadFileCharset(fp, tsfCharset);
-
+
taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile);
}
if (0 != tsSqlFileNumOfTbls) {
taosStartDumpInWorkThreads(taos, arguments);
- }
+ }
taos_close(taos);
taosFreeSQLFiles();
diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h
index 634468f3ccfd041300e0b99c728a8fb6a9b8fbae..0a5822b90893861eb12aea756bf877bc81730413 100644
--- a/src/plugins/http/inc/httpInt.h
+++ b/src/plugins/http/inc/httpInt.h
@@ -171,7 +171,7 @@ typedef struct HttpThread {
EpollFd pollFd;
int32_t numOfContexts;
int32_t threadId;
- char label[HTTP_LABEL_SIZE];
+ char label[HTTP_LABEL_SIZE << 1];
bool (*processData)(HttpContext *pContext);
} HttpThread;
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 818e7c6b039ffd42c8a8dca32f365f4f8590b508..feaa205c3ef49d988049d7b0253a8ad62facb970 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -4466,13 +4466,18 @@ SArray* getOrderCheckColumns(SQueryAttr* pQuery) {
for(int32_t i = 0; i < numOfCols; ++i) {
SColIndex* index = taosArrayGet(pOrderColumns, i);
for(int32_t j = 0; j < pQuery->numOfOutput; ++j) {
- if (index->colId == pQuery->pExpr1[j].base.colInfo.colId) {
+ SSqlExpr* pExpr = &pQuery->pExpr1[j].base;
+ int32_t functionId = pExpr->functionId;
+
+ if (index->colId == pExpr->colInfo.colId &&
+ (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS)) {
index->colIndex = j;
- index->colId = pQuery->pExpr1[j].base.resColId;
+ index->colId = pExpr->resColId;
}
}
}
}
+
return pOrderColumns;
}
@@ -4804,7 +4809,7 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) {
}
// Return result of the previous group in the firstly.
- if (newgroup && pRes->info.rows > 0) {
+ if (*newgroup && pRes->info.rows > 0) {
pArithInfo->existDataBlock = pBlock;
clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput);
return pInfo->pRes;
diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c
index db3c72c2fc8a1a1c326c37974bfde0741f93004b..2ec508f050ea7cf916f8b1c1e6ff37d18fb96523 100644
--- a/src/rpc/src/rpcMain.c
+++ b/src/rpc/src/rpcMain.c
@@ -295,7 +295,7 @@ void *rpcOpen(const SRpcInit *pInit) {
return NULL;
}
} else {
- pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 30);
+ pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 20);
if ( pRpc->pCache == NULL ) {
tError("%s failed to init connection cache", pRpc->label);
rpcClose(pRpc);
diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c
index 131063b0de8b456ebd74011f74f95d199695b75b..318a2d48609a129bcf6094455ff2a7cc8f7c0467 100644
--- a/src/util/src/tnettest.c
+++ b/src/util/src/tnettest.c
@@ -539,7 +539,7 @@ static void taosNetTestServer(char *host, int32_t startPort, int32_t pkgLen) {
}
void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) {
-// tscEmbedded = 1;
+ tscEmbedded = 1;
if (host == NULL) host = tsLocalFqdn;
if (port == 0) port = tsServerPort;
if (pkgLen <= 10) pkgLen = 1000;
@@ -550,6 +550,7 @@ void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) {
} else if (0 == strcmp("server", role)) {
taosNetTestServer(host, port, pkgLen);
} else if (0 == strcmp("rpc", role)) {
+ tscEmbedded = 0;
taosNetTestRpc(host, port, pkgLen);
} else if (0 == strcmp("sync", role)) {
taosNetCheckSync(host, port);
@@ -559,5 +560,5 @@ void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) {
taosNetTestStartup(host, port);
}
-// tscEmbedded = 0;
+ tscEmbedded = 0;
}
diff --git a/tests/pytest/client/version.py b/tests/pytest/client/version.py
index 93b302f619d2ab6da2a3a24950ae70999e968425..7cbeeb60df54e8d89fdcc7815a2b2757793dfaec 100644
--- a/tests/pytest/client/version.py
+++ b/tests/pytest/client/version.py
@@ -28,20 +28,22 @@ class TDTestCase:
sql = "select server_version()"
ret = tdSql.query(sql)
version = tdSql.getData(0, 0)[0:3]
- expectedVersion = "2.0"
- if(version == expectedVersion):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (sql, 0, 0, version, expectedVersion))
+ expectedVersion_dev = "2.0"
+ expectedVersion_master = "2.1"
+ if(version == expectedVersion_dev or version == expectedVersion_master):
+ tdLog.info("sql:%s, row:%d col:%d data:%s == expect" % (sql, 0, 0, version))
else:
- tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s" % (sql, 0, 0, version, expectedVersion))
+ tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s or %s " % (sql, 0, 0, version, expectedVersion_dev, expectedVersion_master))
sql = "select client_version()"
ret = tdSql.query(sql)
version = tdSql.getData(0, 0)[0:3]
- expectedVersion = "2.0"
- if(version == expectedVersion):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (sql, 0, 0, version, expectedVersion))
+ expectedVersion_dev = "2.0"
+ expectedVersion_master = "2.1"
+ if(version == expectedVersion_dev or version == expectedVersion_master):
+ tdLog.info("sql:%s, row:%d col:%d data:%s == expect" % (sql, 0, 0, version))
else:
- tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s" % (sql, 0, 0, version, expectedVersion))
+ tdLog.exit("sql:%s, row:%d col:%d data:%s != expect:%s or %s " % (sql, 0, 0, version, expectedVersion_dev, expectedVersion_master))
def stop(self):
diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp
index 5eb5403395f4956bd2c6332e58566ccfe4ccc56a..a00b2d830c2e4a3261fcb6fc9a1769b2b583799f 100644
--- a/tests/pytest/crash_gen/valgrind_taos.supp
+++ b/tests/pytest/crash_gen/valgrind_taos.supp
@@ -17332,3 +17332,168 @@
fun:PyVectorcall_Call
fun:_PyEval_EvalFrameDefault
}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ fun:lib_build_and_cache_attr
+ fun:lib_getattr
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+ fun:PyEval_EvalCode
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyVectorcall_Call
+ fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ fun:lib_build_and_cache_attr
+ fun:lib_getattr
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:PyEval_EvalCode
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ fun:_my_Py_InitModule
+ fun:lib_getattr
+ fun:b_init_cffi_1_0_external_module
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyObject_CallMethod
+ fun:_cffi_init
+ fun:PyInit__bcrypt
+ fun:_PyImport_LoadDynamicModuleWithSpec
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyVectorcall_Call
+ fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ fun:_PyObject_GC_New
+ fun:lib_getattr
+ fun:ffi_internal_new
+ fun:b_init_cffi_1_0_external_module
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyObject_CallMethod
+ fun:_cffi_init
+ fun:PyInit__bcrypt
+ fun:_PyImport_LoadDynamicModuleWithSpec
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyVectorcall_Call
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ fun:lib_build_cpython_func.isra.87
+ fun:lib_build_and_cache_attr
+ fun:lib_getattr
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ fun:lib_build_and_cache_attr
+ fun:lib_getattr
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyFunction_Vectorcall
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ obj:/usr/bin/python3.8
+ fun:_PyEval_EvalFrameDefault
+ fun:_PyEval_EvalCodeWithName
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ fun:_my_Py_InitModule
+ fun:b_init_cffi_1_0_external_module
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyObject_CallMethod
+ fun:_cffi_init
+ fun:PyInit__bcrypt
+ fun:_PyImport_LoadDynamicModuleWithSpec
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyVectorcall_Call
+ fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ fun:_my_Py_InitModule
+ fun:b_init_cffi_1_0_external_module
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyObject_CallMethod
+ fun:PyInit__openssl
+ fun:_PyImport_LoadDynamicModuleWithSpec
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyVectorcall_Call
+ fun:_PyEval_EvalFrameDefault
+}
+{
+
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ fun:_PyObject_GC_New
+ fun:ffi_internal_new
+ fun:b_init_cffi_1_0_external_module
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyObject_CallMethod
+ fun:_cffi_init
+ fun:PyInit__bcrypt
+ fun:_PyImport_LoadDynamicModuleWithSpec
+ obj:/usr/bin/python3.8
+ obj:/usr/bin/python3.8
+ fun:PyVectorcall_Call
+}
\ No newline at end of file