提交 ac317434 编写于 作者: S Shuduo Sang

merge with develop branch.

......@@ -7,41 +7,22 @@ platform:
arch: amd64
steps:
- name: smoke_test
image: python:3.8
- name: build
image: gcc
commands:
- apt-get update
- apt-get install -y cmake build-essential gcc
- pip3 install psutil
- pip3 install guppy3
- pip3 install src/connector/python/linux/python3/
- apt-get install -y cmake build-essential
- mkdir debug
- cd debug
- cmake ..
- make
- cd ../tests
- ./test-all.sh smoke
trigger:
event:
- pull_request
when:
branch:
- develop
- master
- name: crash_gen
image: python:3.8
commands:
- pip3 install requests
- pip3 install src/connector/python/linux/python3/
- pip3 install psutil
- pip3 install guppy3
- cd tests/pytest
- ./crash_gen.sh -a -p -t 4 -s 2000
when:
branch:
- develop
- master
---
kind: pipeline
name: test_arm64
......@@ -60,6 +41,9 @@ steps:
- cd debug
- cmake .. -DCPUTYPE=aarch64 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
......@@ -82,6 +66,9 @@ steps:
- cd debug
- cmake .. -DCPUTYPE=aarch32 > /dev/null
- make
trigger:
event:
- pull_request
when:
branch:
- develop
......@@ -106,11 +93,13 @@ steps:
- cd debug
- cmake ..
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
---
kind: pipeline
name: build_xenial
......@@ -129,6 +118,9 @@ steps:
- cd debug
- cmake ..
- make
trigger:
event:
- pull_request
when:
branch:
- develop
......@@ -151,6 +143,32 @@ steps:
- cd debug
- cmake ..
- make
trigger:
event:
- pull_request
when:
branch:
- develop
- master
---
kind: pipeline
name: build_centos7
platform:
os: linux
arch: amd64
steps:
- name: build
image: ansible/centos7-ansible
commands:
- yum install -y gcc gcc-c++ make cmake
- mkdir debug
- cd debug
- cmake ..
- make
trigger:
event:
- pull_request
when:
branch:
- develop
......
......@@ -2,6 +2,7 @@ build/
.vscode/
.idea/
cmake-build-debug/
cmake-build-release/
cscope.out
.DS_Store
debug/
......
......@@ -3,7 +3,7 @@ IF (CMAKE_VERSION VERSION_LESS 3.0)
PROJECT(TDengine CXX)
SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}")
SET(PROJECT_VERSION_MINOR "${LIB_MINOR_VERSION}")
SET(PROJECT_VERSION_PATCH"${LIB_PATCH_VERSION}")
SET(PROJECT_VERSION_PATCH "${LIB_PATCH_VERSION}")
SET(PROJECT_VERSION "${LIB_VERSION_STRING}")
ELSE ()
CMAKE_POLICY(SET CMP0048 NEW)
......@@ -42,6 +42,13 @@ INCLUDE(cmake/env.inc)
INCLUDE(cmake/version.inc)
INCLUDE(cmake/install.inc)
IF (CMAKE_SYSTEM_NAME MATCHES "Linux")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pipe -Wall -Wshadow -Werror")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pipe -Wall -Wshadow -Werror")
ENDIF ()
MESSAGE(STATUS "CMAKE_C_FLAGS: ${CMAKE_C_FLAGS}")
MESSAGE(STATUS "CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}")
ADD_SUBDIRECTORY(deps)
ADD_SUBDIRECTORY(src)
ADD_SUBDIRECTORY(tests)
......
......@@ -94,7 +94,7 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
pip3 install ${WKC}/src/connector/python/linux/python3/
pip3 install ${WKC}/src/connector/python
'''
return 1
}
......
......@@ -157,7 +157,7 @@ IF (TD_WINDOWS)
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF ()
SET(DEBUG_FLAGS "/Zi /W3 /GL")
SET(DEBUG_FLAGS "/fsanitize=thread /fsanitize=leak /fsanitize=memory /fsanitize=undefined /fsanitize=hwaddress /Zi /W3 /GL")
SET(RELEASE_FLAGS "/W0 /O3 /GL")
ENDIF ()
......
......@@ -400,27 +400,22 @@ Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/
#### Linux
用户可以在源代码的src/connector/python(或者tar.gz的/connector/python)文件夹下找到python2和python3的connector安装包。用户可以通过pip命令安装:
用户可以在源代码的src/connector/python(或者tar.gz的/connector/python)文件夹下找到connector安装包。用户可以通过pip命令安装:
`pip install src/connector/python/linux/python2/`
`pip install src/connector/python/`
`pip3 install src/connector/python/linux/python3/`
`pip3 install src/connector/python/`
#### Windows
在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos <em>cmd</em> 命令行界面
```cmd
cd C:\TDengine\connector\python\windows
python -m pip install python2\
```
```cmd
cd C:\TDengine\connector\python\windows
python -m pip install python3\
cd C:\TDengine\connector\python
python -m pip install .
```
* 如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。
* 如果机器上没有pip命令,用户可将src/connector/python下的taos文件夹拷贝到应用程序的目录使用。
对于windows 客户端,安装TDengine windows 客户端后,将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。
### 使用
......
......@@ -16,7 +16,7 @@ TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin
以CentOS 7.2操作系统为例,将grafanaplugin目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。
```bash
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/tdengine
sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
```
### 使用 Grafana
......
......@@ -135,6 +135,14 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
SHOW DATABASES;
```
- **显示一个数据库的创建语句**
```mysql
SHOW CREATE DATABASE db_name;
```
常用于数据库迁移。对一个已经存在的数据库,返回其创建语句;在另一个集群中执行该语句,就能得到一个设置完全相同的 Database。
## <a class="anchor" id="table"></a>表管理
- **创建数据表**
......@@ -200,6 +208,13 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
通配符匹配:1)’%’ (百分号)匹配0到任意个字符;2)’\_’下划线匹配一个字符。
- **显示一个数据表的创建语句**
```mysql
SHOW CREATE TABLE tb_name;
```
常用于数据库迁移。对一个已经存在的数据表,返回其创建语句;在另一个集群中执行该语句,就能得到一个结构完全相同的数据表。
- **在线修改显示字符宽度**
```mysql
......@@ -265,6 +280,13 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM
```
查看数据库内全部 STable,及其相关信息,包括 STable 的名称、创建时间、列数量、标签(TAG)数量、通过该 STable 建表的数量。
- **显示一个超级表的创建语句**
```mysql
SHOW CREATE STABLE stb_name;
```
常用于数据库迁移。对一个已经存在的超级表,返回其创建语句;在另一个集群中执行该语句,就能得到一个结构完全相同的超级表。
- **获取超级表的结构信息**
```mysql
......
#!/bin/bash
#
# Generate the deb package for ubunt, or rpm package for centos, or tar.gz package for other linux os
# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
set -e
#set -x
# releash.sh -v [cluster | edge]
# release.sh -v [cluster | edge]
# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...]
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
# -V [stable | beta]
......
......@@ -607,6 +607,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"
......@@ -630,6 +631,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
......@@ -655,6 +657,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
......
......@@ -205,6 +205,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
......
......@@ -205,6 +205,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
......
......@@ -577,6 +577,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${powerd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/powerd' >> ${powerd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/power/bin/startPre.sh' >> ${powerd_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${powerd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${powerd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${powerd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${powerd_service_config}"
......@@ -599,6 +600,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Service]' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/tarbitrator' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${tarbitratord_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${tarbitratord_service_config}"
......@@ -624,6 +626,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'PIDFile=/usr/local/nginxd/logs/nginx.pid' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/local/nginxd/sbin/nginx' >> ${nginx_service_config}"
${csudo} bash -c "echo 'ExecStop=/usr/local/nginxd/sbin/nginx -s stop' >> ${nginx_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${nginx_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${nginx_service_config}"
......
......@@ -333,6 +333,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"
......
......@@ -148,20 +148,11 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
......
......@@ -170,20 +170,11 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py
sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py
sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
......
......@@ -405,6 +405,7 @@ function install_service_on_systemd() {
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}"
${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/startPre.sh' >> ${taosd_service_config}"
${csudo} bash -c "echo 'TimeoutStopSec=1000000s' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}"
${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}"
......
......@@ -55,9 +55,9 @@ static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) {
}
while (tsBufNextPos(pTSBuf)) {
STSElem el1 = tsBufGetElem(pTSBuf);
el1 = tsBufGetElem(pTSBuf);
int32_t res = tVariantCompare(el1.tag, tag1);
res = tVariantCompare(el1.tag, tag1);
if (res != 0) { // it is a record with new tag
return;
}
......@@ -2861,7 +2861,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
tscDebug("0x%"PRIx64" sub:%p retrieve numOfRows:%d totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d",
pParentSql->self, pSql, pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0) && !(tscGetQueryInfo(&pParentSql->cmd, 0)->distinctTag)) {
tscError("0x%"PRIx64" sub:0x%"PRIx64" num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
pParentSql->self, pSql->self, tsMaxNumOfOrderedResults, num);
tscAbortFurtherRetryRetrieval(trsupport, tres, TSDB_CODE_TSC_SORTED_RES_TOO_MANY);
......
......@@ -98,7 +98,7 @@ TEST(testCase, parse_time) {
taosParseTime(t41, &time, strlen(t41), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999);
int64_t k = timezone;
// int64_t k = timezone;
char t42[] = "1997-1-1T0:0:0.999999999Z";
taosParseTime(t42, &time, strlen(t42), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, 852048000999 - timezone * MILLISECOND_PER_SECOND);
......@@ -163,7 +163,7 @@ TEST(testCase, parse_time) {
taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0);
EXPECT_EQ(time, -28800 * MILLISECOND_PER_SECOND);
char* t = "2021-01-08T02:11:40.000+00:00";
char t[] = "2021-01-08T02:11:40.000+00:00";
taosParseTime(t, &time, strlen(t), TSDB_TIME_PRECISION_MILLI, 0);
printf("%ld\n", time);
}
......
......@@ -87,6 +87,8 @@ tExprNode* exprTreeFromBinary(const void* data, size_t size);
tExprNode* exprTreeFromTableName(const char* tbnameCond);
tExprNode* exprdup(tExprNode* pTree);
void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput, void *param, int32_t order,
......
Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f
Subproject commit 7a26c432f8b4203e42344ff3290b9b9b01b983d5
......@@ -122,6 +122,7 @@
<exclude>**/FailOverTest.java</exclude>
<exclude>**/InvalidResultSetPointerTest.java</exclude>
<exclude>**/RestfulConnectionTest.java</exclude>
<exclude>**/TD4144Test.java</exclude>
</excludes>
<testFailureIgnore>true</testFailureIgnore>
</configuration>
......
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.TSDBConnection;
import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.TSDBResultSet;
import com.taosdata.jdbc.TSDBSubscribe;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.sql.DriverManager;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
public class TD4144Test {
private static TSDBConnection connection;
private static final String host = "127.0.0.1";
private static final String topic = "topic-meter-current-bg-10";
private static final String sql = "select * from meters where current > 10";
private static final String sql2 = "select * from meters where ts >= '2020-08-15 12:20:00.000'";
@Test
public void test() throws SQLException {
TSDBSubscribe subscribe = null;
TSDBResultSet res = null;
boolean hasNext = false;
try {
subscribe = connection.subscribe(topic, sql, false);
int count = 0;
while (true) {
// 等待1秒,避免频繁调用 consume,给服务端造成压力
TimeUnit.SECONDS.sleep(1);
if (res == null) {
// 消费数据
res = subscribe.consume();
hasNext = res.next();
}
if (res == null) {
continue;
}
ResultSetMetaData metaData = res.getMetaData();
int number = 0;
while (hasNext) {
int columnCount = metaData.getColumnCount();
for (int i = 1; i <= columnCount; i++) {
System.out.print(metaData.getColumnLabel(i) + ": " + res.getString(i) + "\t");
}
System.out.println();
count++;
number++;
hasNext = res.next();
if (!hasNext) {
res.close();
res = null;
System.out.println("rows: " + count);
}
if (hasNext == true && number >= 10) {
System.out.println("batch" + number);
break;
}
}
}
} catch (SQLException | InterruptedException throwables) {
throwables.printStackTrace();
} finally {
if (subscribe != null)
subscribe.close(true);
}
}
@BeforeClass
public static void beforeClass() throws SQLException {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
connection = (DriverManager.getConnection(url, properties)).unwrap(TSDBConnection.class);
try (Statement stmt = connection.createStatement()) {
stmt.execute("drop database if exists power");
stmt.execute("create database if not exists power");
stmt.execute("use power");
stmt.execute("create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int)");
stmt.execute("create table d1001 using meters tags(\"Beijing.Chaoyang\", 2)");
stmt.execute("create table d1002 using meters tags(\"Beijing.Haidian\", 2)");
stmt.execute("insert into d1001 values(\"2020-08-15 12:00:00.000\", 12, 220, 1),(\"2020-08-15 12:10:00.000\", 12.3, 220, 2),(\"2020-08-15 12:20:00.000\", 12.2, 220, 1)");
stmt.execute("insert into d1002 values(\"2020-08-15 12:00:00.000\", 9.9, 220, 1),(\"2020-08-15 12:10:00.000\", 10.3, 220, 1),(\"2020-08-15 12:20:00.000\", 11.2, 220, 1)");
}
}
@AfterClass
public static void afterClass() throws SQLException {
if (connection != null)
connection.close();
}
}
......@@ -18,8 +18,8 @@
#define CHK_TEST(statement) \
do { \
D("testing: %s", #statement); \
int r = (statement); \
if (r) { \
int _r = (statement); \
if (_r) { \
D("testing failed: %s", #statement); \
return 1; \
} \
......@@ -181,7 +181,7 @@ static int do_statement(SQLHSTMT stmt, const char *statement) {
r = traverse_cols(stmt, cols);
char buf[4096];
while (1) {
SQLRETURN r = SQLFetch(stmt);
r = SQLFetch(stmt);
if (r==SQL_NO_DATA) break;
CHK_RESULT(r, SQL_HANDLE_STMT, stmt, "");
for (size_t i=0; i<cols; ++i) {
......
......@@ -1762,8 +1762,8 @@ static SQLRETURN tsdb_conn_prepare(stmt_t *stmt) {
tsdb_stmt->tsdb_params = tsdb_params;
for (int i=0; i<nums; ++i) {
SQLRETURN r = do_fill_param(stmt, i);
if (r) return r;
SQLRETURN _r = do_fill_param(stmt, i);
if (_r) return _r;
}
}
......
......@@ -148,15 +148,15 @@ static void *shellCheckThreadFp(void *arg) {
return NULL;
}
static void shellRunCheckThreads(TAOS *con, SShellArguments *args) {
static void shellRunCheckThreads(TAOS *con, SShellArguments *_args) {
pthread_attr_t thattr;
ShellThreadObj *threadObj = (ShellThreadObj *)calloc(args->threadNum, sizeof(ShellThreadObj));
for (int t = 0; t < args->threadNum; ++t) {
ShellThreadObj *threadObj = (ShellThreadObj *)calloc(_args->threadNum, sizeof(ShellThreadObj));
for (int t = 0; t < _args->threadNum; ++t) {
ShellThreadObj *pThread = threadObj + t;
pThread->threadIndex = t;
pThread->totalThreads = args->threadNum;
pThread->totalThreads = _args->threadNum;
pThread->taos = con;
pThread->db = args->database;
pThread->db = _args->database;
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
......@@ -167,31 +167,31 @@ static void shellRunCheckThreads(TAOS *con, SShellArguments *args) {
}
}
for (int t = 0; t < args->threadNum; ++t) {
for (int t = 0; t < _args->threadNum; ++t) {
pthread_join(threadObj[t].threadID, NULL);
}
for (int t = 0; t < args->threadNum; ++t) {
for (int t = 0; t < _args->threadNum; ++t) {
taos_close(threadObj[t].taos);
}
free(threadObj);
}
void shellCheck(TAOS *con, SShellArguments *args) {
void shellCheck(TAOS *con, SShellArguments *_args) {
int64_t start = taosGetTimestampMs();
if (shellUseDb(con, args->database) != 0) {
if (shellUseDb(con, _args->database) != 0) {
shellFreeTbnames();
return;
}
if (shellShowTables(con, args->database) != 0) {
if (shellShowTables(con, _args->database) != 0) {
shellFreeTbnames();
return;
}
fprintf(stdout, "total %d tables will be checked by %d threads\n", tbNum, args->threadNum);
shellRunCheckThreads(con, args);
fprintf(stdout, "total %d tables will be checked by %d threads\n", tbNum, _args->threadNum);
shellRunCheckThreads(con, _args);
int64_t end = taosGetTimestampMs();
fprintf(stdout, "total %d tables checked, failed:%d, time spent %.2f seconds\n", checkedNum, errorNum,
......
......@@ -56,24 +56,24 @@ extern TAOS *taos_connect_auth(const char *ip, const char *user, const char *aut
/*
* FUNCTION: Initialize the shell.
*/
TAOS *shellInit(SShellArguments *args) {
TAOS *shellInit(SShellArguments *_args) {
printf("\n");
printf(CLIENT_VERSION, tsOsName, taos_get_client_info());
fflush(stdout);
// set options before initializing
if (args->timezone != NULL) {
taos_options(TSDB_OPTION_TIMEZONE, args->timezone);
if (_args->timezone != NULL) {
taos_options(TSDB_OPTION_TIMEZONE, _args->timezone);
}
if (args->is_use_passwd) {
if (args->password == NULL) args->password = getpass("Enter password: ");
if (_args->is_use_passwd) {
if (_args->password == NULL) _args->password = getpass("Enter password: ");
} else {
args->password = TSDB_DEFAULT_PASS;
_args->password = TSDB_DEFAULT_PASS;
}
if (args->user == NULL) {
args->user = TSDB_DEFAULT_USER;
if (_args->user == NULL) {
_args->user = TSDB_DEFAULT_USER;
}
if (taos_init()) {
......@@ -84,10 +84,10 @@ TAOS *shellInit(SShellArguments *args) {
// Connect to the database.
TAOS *con = NULL;
if (args->auth == NULL) {
con = taos_connect(args->host, args->user, args->password, args->database, args->port);
if (_args->auth == NULL) {
con = taos_connect(_args->host, _args->user, _args->password, _args->database, _args->port);
} else {
con = taos_connect_auth(args->host, args->user, args->auth, args->database, args->port);
con = taos_connect_auth(_args->host, _args->user, _args->auth, _args->database, _args->port);
}
if (con == NULL) {
......@@ -100,14 +100,14 @@ TAOS *shellInit(SShellArguments *args) {
read_history();
// Check if it is temperory run
if (args->commands != NULL || args->file[0] != 0) {
if (args->commands != NULL) {
printf("%s%s\n", PROMPT_HEADER, args->commands);
shellRunCommand(con, args->commands);
if (_args->commands != NULL || _args->file[0] != 0) {
if (_args->commands != NULL) {
printf("%s%s\n", PROMPT_HEADER, _args->commands);
shellRunCommand(con, _args->commands);
}
if (args->file[0] != 0) {
source_file(con, args->file);
if (_args->file[0] != 0) {
source_file(con, _args->file);
}
taos_close(con);
......@@ -116,14 +116,14 @@ TAOS *shellInit(SShellArguments *args) {
}
#ifndef WINDOWS
if (args->dir[0] != 0) {
source_dir(con, args);
if (_args->dir[0] != 0) {
source_dir(con, _args);
taos_close(con);
exit(EXIT_SUCCESS);
}
if (args->check != 0) {
shellCheck(con, args);
if (_args->check != 0) {
shellCheck(con, _args);
taos_close(con);
exit(EXIT_SUCCESS);
}
......
......@@ -233,15 +233,15 @@ void* shellImportThreadFp(void *arg)
return NULL;
}
static void shellRunImportThreads(SShellArguments* args)
static void shellRunImportThreads(SShellArguments* _args)
{
pthread_attr_t thattr;
ShellThreadObj *threadObj = (ShellThreadObj *)calloc(args->threadNum, sizeof(ShellThreadObj));
for (int t = 0; t < args->threadNum; ++t) {
ShellThreadObj *threadObj = (ShellThreadObj *)calloc(_args->threadNum, sizeof(ShellThreadObj));
for (int t = 0; t < _args->threadNum; ++t) {
ShellThreadObj *pThread = threadObj + t;
pThread->threadIndex = t;
pThread->totalThreads = args->threadNum;
pThread->taos = taos_connect(args->host, args->user, args->password, args->database, tsDnodeShellPort);
pThread->totalThreads = _args->threadNum;
pThread->taos = taos_connect(_args->host, _args->user, _args->password, _args->database, tsDnodeShellPort);
if (pThread->taos == NULL) {
fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, "null taos"/*taos_errstr(pThread->taos)*/);
exit(0);
......@@ -256,18 +256,18 @@ static void shellRunImportThreads(SShellArguments* args)
}
}
for (int t = 0; t < args->threadNum; ++t) {
for (int t = 0; t < _args->threadNum; ++t) {
pthread_join(threadObj[t].threadID, NULL);
}
for (int t = 0; t < args->threadNum; ++t) {
for (int t = 0; t < _args->threadNum; ++t) {
taos_close(threadObj[t].taos);
}
free(threadObj);
}
void source_dir(TAOS* con, SShellArguments* args) {
shellGetDirectoryFileList(args->dir);
void source_dir(TAOS* con, SShellArguments* _args) {
shellGetDirectoryFileList(_args->dir);
int64_t start = taosGetTimestampMs();
if (shellTablesSQLFile[0] != 0) {
......@@ -276,7 +276,7 @@ void source_dir(TAOS* con, SShellArguments* args) {
fprintf(stdout, "import %s finished, time spent %.2f seconds\n", shellTablesSQLFile, (end - start) / 1000.0);
}
shellRunImportThreads(args);
shellRunImportThreads(_args);
int64_t end = taosGetTimestampMs();
fprintf(stdout, "import %s finished, time spent %.2f seconds\n", args->dir, (end - start) / 1000.0);
fprintf(stdout, "import %s finished, time spent %.2f seconds\n", _args->dir, (end - start) / 1000.0);
}
......@@ -415,7 +415,7 @@ void set_terminal_mode() {
}
}
void get_history_path(char *history) { snprintf(history, TSDB_FILENAME_LEN, "%s/%s", getenv("HOME"), HISTORY_FILE); }
void get_history_path(char *_history) { snprintf(_history, TSDB_FILENAME_LEN, "%s/%s", getenv("HOME"), HISTORY_FILE); }
void clearScreen(int ecmd_pos, int cursor_pos) {
struct winsize w;
......
......@@ -220,8 +220,8 @@ typedef struct SArguments_S {
uint64_t interlace_rows;
uint64_t num_of_RPR; // num_of_records_per_req
uint64_t max_sql_len;
uint64_t num_of_tables;
uint64_t num_of_DPT;
int64_t num_of_tables;
int64_t num_of_DPT;
int abort;
int disorderRatio; // 0: no disorder, >0: x%
int disorderRange; // ms or us by database precision
......@@ -257,7 +257,7 @@ typedef struct SSuperTable_S {
uint64_t maxSqlLen; //
uint64_t insertInterval; // insert interval, will override global insert interval
uint64_t insertRows;
int64_t insertRows;
int64_t timeStampStep;
char startTimestamp[MAX_TB_NAME_SIZE];
char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json
......@@ -385,7 +385,7 @@ typedef struct SuperQueryInfo_S {
int subscribeRestart;
int subscribeKeepProgress;
uint64_t queryTimes;
uint64_t childTblCount;
int64_t childTblCount;
char childTblPrefix[MAX_TB_NAME_SIZE];
uint64_t sqlCount;
char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1];
......@@ -592,7 +592,7 @@ SArguments g_args = {
static SDbs g_Dbs;
static uint64_t g_totalChildTables = 0;
static int64_t g_totalChildTables = 0;
static SQueryMetaInfo g_queryInfo;
static FILE * g_fpOfInsertResult = NULL;
......@@ -1046,9 +1046,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
arguments->max_sql_len);
printf("# Length of Binary: %d\n", arguments->len_of_binary);
printf("# Number of Threads: %d\n", arguments->num_of_threads);
printf("# Number of Tables: %"PRIu64"\n",
printf("# Number of Tables: %"PRId64"\n",
arguments->num_of_tables);
printf("# Number of Data per Table: %"PRIu64"\n",
printf("# Number of Data per Table: %"PRId64"\n",
arguments->num_of_DPT);
printf("# Database name: %s\n", arguments->database);
printf("# Table prefix: %s\n", arguments->tb_prefix);
......@@ -1420,7 +1420,7 @@ static int printfInsertMeta() {
printf(" childTblExists: \033[33m%s\033[0m\n", "error");
}
printf(" childTblCount: \033[33m%"PRIu64"\033[0m\n",
printf(" childTblCount: \033[33m%"PRId64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblCount);
printf(" childTblPrefix: \033[33m%s\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblPrefix);
......@@ -1437,7 +1437,7 @@ static int printfInsertMeta() {
printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n",
g_Dbs.db[i].superTbls[j].childTblOffset);
}
printf(" insertRows: \033[33m%"PRIu64"\033[0m\n",
printf(" insertRows: \033[33m%"PRId64"\033[0m\n",
g_Dbs.db[i].superTbls[j].insertRows);
/*
if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) {
......@@ -1604,7 +1604,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " childTblExists: %s\n", "error");
}
fprintf(fp, " childTblCount: %"PRIu64"\n",
fprintf(fp, " childTblCount: %"PRId64"\n",
g_Dbs.db[i].superTbls[j].childTblCount);
fprintf(fp, " childTblPrefix: %s\n",
g_Dbs.db[i].superTbls[j].childTblPrefix);
......@@ -1613,7 +1613,7 @@ static void printfInsertMetaToFile(FILE* fp) {
fprintf(fp, " insertMode: %s\n",
(g_Dbs.db[i].superTbls[j].insertMode==TAOSC_IFACE)?"taosc":
(g_Dbs.db[i].superTbls[j].insertMode==REST_IFACE)?"rest":"stmt");
fprintf(fp, " insertRows: %"PRIu64"\n",
fprintf(fp, " insertRows: %"PRId64"\n",
g_Dbs.db[i].superTbls[j].insertRows);
fprintf(fp, " interlace rows: %"PRIu64"\n",
g_Dbs.db[i].superTbls[j].interlaceRows);
......@@ -1731,7 +1731,7 @@ static void printfQueryMeta() {
g_queryInfo.superQueryInfo.queryInterval);
printf("threadCnt: \033[33m%d\033[0m\n",
g_queryInfo.superQueryInfo.threadCnt);
printf("childTblCount: \033[33m%"PRIu64"\033[0m\n",
printf("childTblCount: \033[33m%"PRId64"\033[0m\n",
g_queryInfo.superQueryInfo.childTblCount);
printf("stable name: \033[33m%s\033[0m\n",
g_queryInfo.superQueryInfo.sTblName);
......@@ -2361,7 +2361,7 @@ static int calcRowLen(SSuperTable* superTbls) {
static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char* dbName, char* sTblName, char** childTblNameOfSuperTbl,
uint64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) {
int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) {
char command[BUFFER_SIZE] = "\0";
char limitBuf[100] = "\0";
......@@ -2390,8 +2390,8 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
exit(-1);
}
int childTblCount = (limit < 0)?10000:limit;
int count = 0;
int64_t childTblCount = (limit < 0)?10000:limit;
int64_t count = 0;
if (childTblName == NULL) {
childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
if (NULL == childTblName) {
......@@ -2438,7 +2438,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName,
char* sTblName, char** childTblNameOfSuperTbl,
uint64_t* childTblCountOfSuperTbl) {
int64_t* childTblCountOfSuperTbl) {
return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName,
childTblNameOfSuperTbl, childTblCountOfSuperTbl,
......@@ -3006,7 +3006,7 @@ static void createChildTables() {
uint64_t startFrom = 0;
g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount;
verbosePrint("%s() LN%d: create %"PRIu64" child tables from %"PRIu64"\n",
verbosePrint("%s() LN%d: create %"PRId64" child tables from %"PRIu64"\n",
__func__, __LINE__, g_totalChildTables, startFrom);
startMultiThreadCreateChildTable(
g_Dbs.db[i].superTbls[j].colsOfCreateChildTable,
......@@ -3034,7 +3034,7 @@ static void createChildTables() {
snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")");
verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRIu64" schema: %s\n",
verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n",
__func__, __LINE__,
g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf);
startMultiThreadCreateChildTable(
......@@ -3929,7 +3929,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
cJSON* maxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len");
maxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len");
if (maxSqlLen && maxSqlLen->type == cJSON_Number) {
int32_t len = maxSqlLen->valueint;
if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
......@@ -3963,7 +3963,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
*/
cJSON* interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows");
if (interlaceRows && interlaceRows->type == cJSON_Number) {
if (interlaceRows->valueint < 0) {
errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n",
......@@ -4706,7 +4706,7 @@ static int64_t generateData(char *recBuf, char **data_type,
double t = rand_double();
pstr += sprintf(pstr, ",%20.8f", t);
} else if (strcasecmp(data_type[i % c], "BOOL") == 0) {
bool b = rand_bool() & 1;
bool b = taosRandom() & 1;
pstr += sprintf(pstr, ",%s", b ? "true" : "false");
} else if (strcasecmp(data_type[i % c], "BINARY") == 0) {
char *s = malloc(lenOfBinary);
......@@ -4812,7 +4812,7 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t table
(tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN);
} else {
verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRIu64" seq=%"PRIu64"\n",
verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRId64" seq=%"PRIu64"\n",
pThreadInfo->threadID, __func__, __LINE__,
pThreadInfo->start_table_from,
pThreadInfo->ntables, tableSeq);
......@@ -5004,7 +5004,7 @@ static int64_t generateInterlaceDataBuffer(
char *tableName, uint64_t batchPerTbl, uint64_t i, uint64_t batchPerTblTimes,
uint64_t tableSeq,
threadInfo *pThreadInfo, char *buffer,
uint64_t insertRows,
int64_t insertRows,
int64_t startTime,
uint64_t *pRemainderBufLen)
{
......@@ -5064,7 +5064,7 @@ static int64_t generateProgressiveDataBuffer(
int64_t tableSeq,
threadInfo *pThreadInfo, char *buffer,
int64_t insertRows,
int64_t startFrom, int64_t startTime, int64_t *pSamplePos,
uint64_t startFrom, int64_t startTime, int64_t *pSamplePos,
int64_t *pRemainderBufLen)
{
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
......@@ -5117,7 +5117,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
debugPrint("[%d] %s() LN%d: ### interlace write\n",
pThreadInfo->threadID, __func__, __LINE__);
uint64_t insertRows;
int64_t insertRows;
uint64_t interlaceRows;
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
......@@ -5182,8 +5182,6 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) {
int64_t startTime = pThreadInfo->start_time;
assert(pThreadInfo->ntables > 0);
uint64_t batchPerTbl = interlaceRows;
uint64_t batchPerTblTimes;
......@@ -5396,7 +5394,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) {
tableSeq ++) {
int64_t start_time = pThreadInfo->start_time;
uint64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT;
verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows);
for (uint64_t i = 0; i < insertRows;) {
......@@ -5760,7 +5758,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
exit(-1);
}
uint64_t childTblCount;
int64_t childTblCount;
getChildNameOfSuperTableWithLimitAndOffset(
taos0,
db_name, superTblInfo->sTblName,
......@@ -5965,7 +5963,7 @@ static void *readTable(void *sarg) {
return NULL;
}
uint64_t num_of_DPT;
int64_t num_of_DPT;
/* if (rinfo->superTblInfo) {
num_of_DPT = rinfo->superTblInfo->insertRows; // nrecords_per_table;
} else {
......@@ -5984,11 +5982,11 @@ static void *readTable(void *sarg) {
printf("%"PRId64" records:\n", totalData);
fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n");
for (uint64_t j = 0; j < n; j++) {
for (int j = 0; j < n; j++) {
double totalT = 0;
uint64_t count = 0;
for (uint64_t i = 0; i < num_of_tables; i++) {
sprintf(command, "select %s from %s%"PRIu64" where ts>= %" PRIu64,
for (int64_t i = 0; i < num_of_tables; i++) {
sprintf(command, "select %s from %s%"PRId64" where ts>= %" PRIu64,
aggreFunc[j], tb_prefix, i, sTime);
double t = taosGetTimestampMs();
......@@ -6013,7 +6011,7 @@ static void *readTable(void *sarg) {
taos_free_result(pSql);
}
fprintf(fp, "|%10s | %"PRIu64" | %12.2f | %10.2f |\n",
fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n",
aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData,
(double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000);
printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT * 1000);
......@@ -6035,7 +6033,7 @@ static void *readMetric(void *sarg) {
return NULL;
}
uint64_t num_of_DPT = rinfo->superTblInfo->insertRows;
int64_t num_of_DPT = rinfo->superTblInfo->insertRows;
int64_t num_of_tables = rinfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1;
int64_t totalData = num_of_DPT * num_of_tables;
bool do_aggreFunc = g_Dbs.do_aggreFunc;
......@@ -6051,13 +6049,13 @@ static void *readMetric(void *sarg) {
char condition[COND_BUF_LEN] = "\0";
char tempS[64] = "\0";
int m = 10 < num_of_tables ? 10 : num_of_tables;
int64_t m = 10 < num_of_tables ? 10 : num_of_tables;
for (int i = 1; i <= m; i++) {
for (int64_t i = 1; i <= m; i++) {
if (i == 1) {
sprintf(tempS, "t1 = %d", i);
sprintf(tempS, "t1 = %"PRId64"", i);
} else {
sprintf(tempS, " or t1 = %d ", i);
sprintf(tempS, " or t1 = %"PRId64" ", i);
}
strncat(condition, tempS, COND_BUF_LEN - 1);
......@@ -6143,11 +6141,11 @@ static int insertTestProcess() {
end = taosGetTimestampMs();
if (g_totalChildTables > 0) {
fprintf(stderr, "Spent %.4f seconds to create %"PRIu64" tables with %d thread(s)\n\n",
fprintf(stderr, "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n",
(end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
if (g_fpOfInsertResult) {
fprintf(g_fpOfInsertResult,
"Spent %.4f seconds to create %"PRIu64" tables with %d thread(s)\n\n",
"Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n",
(end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl);
}
}
......
......@@ -1017,7 +1017,7 @@ int taosDumpOut(struct arguments *arguments) {
sprintf(command, "use %s", dbInfos[0]->name);
result = taos_query(taos, command);
int32_t code = taos_errno(result);
code = taos_errno(result);
if (code != 0) {
fprintf(stderr, "invalid database %s\n", dbInfos[0]->name);
goto _exit_failure;
......
......@@ -522,13 +522,13 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
pStatus->lastReboot = htonl(pStatus->lastReboot);
pStatus->numOfCores = htons(pStatus->numOfCores);
uint32_t version = htonl(pStatus->version);
if (version != tsVersion) {
uint32_t _version = htonl(pStatus->version);
if (_version != tsVersion) {
pDnode = mnodeGetDnodeByEp(pStatus->dnodeEp);
if (pDnode != NULL && pDnode->status != TAOS_DN_STATUS_READY) {
pDnode->offlineReason = TAOS_DN_OFF_VERSION_NOT_MATCH;
}
mError("dnode:%d, status msg version:%d not equal with cluster:%d", pStatus->dnodeId, version, tsVersion);
mError("dnode:%d, status msg version:%d not equal with cluster:%d", pStatus->dnodeId, _version, tsVersion);
return TSDB_CODE_MND_INVALID_MSG_VERSION;
}
......
......@@ -132,10 +132,10 @@ int32_t ehttp_gzip_write(ehttp_gzip_t *gzip, const char *buf, int32_t len) {
if (ret != Z_STREAM_END) continue;
}
int32_t len = (int32_t)(gzip->gzip->next_out - (z_const Bytef *)gzip->chunk);
int32_t _len = (int32_t)(gzip->gzip->next_out - (z_const Bytef *)gzip->chunk);
gzip->gzip->next_out[0] = '\0';
gzip->callbacks.on_data(gzip, gzip->arg, gzip->chunk, len);
gzip->callbacks.on_data(gzip, gzip->arg, gzip->chunk, _len);
gzip->gzip->next_out = (z_const Bytef *)gzip->chunk;
gzip->gzip->avail_out = gzip->conf.chunk_size;
}
......
......@@ -163,9 +163,9 @@ static int32_t httpOnRequestLine(HttpParser *pParser, char *method, char *target
// parse decode method
for (int32_t i = 0; i < tsHttpServer.methodScannerLen; i++) {
HttpDecodeMethod *method = tsHttpServer.methodScanner[i];
if (strcmp(method->module, pParser->path[0].str) == 0) {
pContext->decodeMethod = method;
HttpDecodeMethod *_method = tsHttpServer.methodScanner[i];
if (strcmp(_method->module, pParser->path[0].str) == 0) {
pContext->decodeMethod = _method;
break;
}
}
......
......@@ -209,7 +209,7 @@ void tgParseSchemaMetric(cJSON *metric) {
parsedOk = false;
goto ParseEnd;
}
int32_t nameLen = (int32_t)strlen(field->valuestring);
nameLen = (int32_t)strlen(field->valuestring);
if (nameLen == 0 || nameLen >= TSDB_TABLE_NAME_LEN) {
parsedOk = false;
goto ParseEnd;
......
......@@ -592,14 +592,14 @@ void tSetDbName(SStrToken *pCpxName, SStrToken *pDb) {
void tSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) {
int32_t maxLen = sizeof(pField->name) / sizeof(pField->name[0]);
// truncate the column name
if ((int32_t)pName->n >= maxLen) {
pName->n = maxLen - 1;
}
strncpy(pField->name, pName->z, pName->n);
pField->name[pName->n] = 0;
// column name is too long, set the it to be invalid.
if ((int32_t) pName->n >= maxLen) {
pName->n = -1;
} else {
strncpy(pField->name, pName->z, pName->n);
pField->name[pName->n] = 0;
}
pField->type = pType->type;
if(!isValidDataType(pField->type)){
......
......@@ -576,7 +576,7 @@ static void *taosProcessTcpData(void *param) {
}
while (pThreadObj->pHead) {
SFdObj *pFdObj = pThreadObj->pHead;
pFdObj = pThreadObj->pHead;
pThreadObj->pHead = pFdObj->next;
taosReportBrokenLink(pFdObj);
}
......
......@@ -389,17 +389,17 @@ int32_t syncForwardToPeer(int64_t rid, void *data, void *mhandle, int32_t qtype,
return code;
}
void syncConfirmForward(int64_t rid, uint64_t version, int32_t code, bool force) {
void syncConfirmForward(int64_t rid, uint64_t _version, int32_t code, bool force) {
SSyncNode *pNode = syncAcquireNode(rid);
if (pNode == NULL) return;
SSyncPeer *pPeer = pNode->pMaster;
if (pPeer && (pNode->quorum > 1 || force)) {
SFwdRsp rsp;
syncBuildSyncFwdRsp(&rsp, pNode->vgId, version, code);
syncBuildSyncFwdRsp(&rsp, pNode->vgId, _version, code);
if (taosWriteMsg(pPeer->peerFd, &rsp, sizeof(SFwdRsp)) == sizeof(SFwdRsp)) {
sTrace("%s, forward-rsp is sent, code:0x%x hver:%" PRIu64, pPeer->id, code, version);
sTrace("%s, forward-rsp is sent, code:0x%x hver:%" PRIu64, pPeer->id, code, _version);
} else {
sDebug("%s, failed to send forward-rsp, restart", pPeer->id);
syncRestartConnection(pPeer);
......@@ -1302,14 +1302,14 @@ static void syncProcessBrokenLink(int64_t rid) {
syncReleasePeer(pPeer);
}
static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle) {
static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t _version, void *mhandle) {
SSyncFwds *pSyncFwds = pNode->pSyncFwds;
int64_t time = taosGetTimestampMs();
if (pSyncFwds->fwds >= SYNC_MAX_FWDS) {
// pSyncFwds->first = (pSyncFwds->first + 1) % SYNC_MAX_FWDS;
// pSyncFwds->fwds--;
sError("vgId:%d, failed to save fwd info, hver:%" PRIu64 " fwds:%d", pNode->vgId, version, pSyncFwds->fwds);
sError("vgId:%d, failed to save fwd info, hver:%" PRIu64 " fwds:%d", pNode->vgId, _version, pSyncFwds->fwds);
return TSDB_CODE_SYN_TOO_MANY_FWDINFO;
}
......@@ -1319,12 +1319,12 @@ static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle
SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + pSyncFwds->last;
memset(pFwdInfo, 0, sizeof(SFwdInfo));
pFwdInfo->version = version;
pFwdInfo->version = _version;
pFwdInfo->mhandle = mhandle;
pFwdInfo->time = time;
pSyncFwds->fwds++;
sTrace("vgId:%d, fwd info is saved, hver:%" PRIu64 " fwds:%d ", pNode->vgId, version, pSyncFwds->fwds);
sTrace("vgId:%d, fwd info is saved, hver:%" PRIu64 " fwds:%d ", pNode->vgId, _version, pSyncFwds->fwds);
return 0;
}
......
......@@ -61,13 +61,13 @@ void syncBuildSyncFwdMsg(SSyncHead *pHead, int32_t vgId, int32_t len) {
syncBuildHead(pHead);
}
void syncBuildSyncFwdRsp(SFwdRsp *pMsg, int32_t vgId, uint64_t version, int32_t code) {
void syncBuildSyncFwdRsp(SFwdRsp *pMsg, int32_t vgId, uint64_t _version, int32_t code) {
pMsg->head.type = TAOS_SMSG_SYNC_FWD_RSP;
pMsg->head.vgId = vgId;
pMsg->head.len = sizeof(SFwdRsp) - sizeof(SSyncHead);
syncBuildHead(&pMsg->head);
pMsg->version = version;
pMsg->version = _version;
pMsg->code = code;
}
......
......@@ -69,7 +69,7 @@ void tsdbFreeMeta(STsdbMeta* pMeta);
int tsdbOpenMeta(STsdbRepo* pRepo);
int tsdbCloseMeta(STsdbRepo* pRepo);
STable* tsdbGetTableByUid(STsdbMeta* pMeta, uint64_t uid);
STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t version);
STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t _version);
int tsdbWLockRepoMeta(STsdbRepo* pRepo);
int tsdbRLockRepoMeta(STsdbRepo* pRepo);
int tsdbUnlockRepoMeta(STsdbRepo* pRepo);
......@@ -89,16 +89,16 @@ static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *k
}
}
static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t version) {
static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t _version) {
STable* pDTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable;
STSchema* pSchema = NULL;
STSchema* pTSchema = NULL;
if (lock) TSDB_RLOCK_TABLE(pDTable);
if (version < 0) { // get the latest version of schema
if (_version < 0) { // get the latest version of schema
pTSchema = pDTable->schema[pDTable->numOfSchemas - 1];
} else { // get the schema with version
void* ptr = taosbsearch(&version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*),
void* ptr = taosbsearch(&_version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*),
tsdbCompareSchemaVersion, TD_EQ);
if (ptr == NULL) {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
......
......@@ -958,11 +958,11 @@ static int tsdbWriteBlockInfo(SCommitH *pCommih) {
}
static int tsdbWriteBlockIdx(SCommitH *pCommih) {
SBlockIdx *pBlkIdx;
SBlockIdx *pBlkIdx = NULL;
SDFile * pHeadf = TSDB_COMMIT_HEAD_FILE(pCommih);
size_t nidx = taosArrayGetSize(pCommih->aBlkIdx);
int tlen = 0, size;
int64_t offset;
int tlen = 0, size = 0;
int64_t offset = 0;
if (nidx <= 0) {
// All data are deleted
......
......@@ -957,10 +957,10 @@ static int tsdbRestoreMeta(STsdbRepo *pRepo) {
regfree(&regex);
return -1;
} else {
uint32_t version = 0;
uint32_t _version = 0;
if (strcmp(bname, "meta") != 0) {
sscanf(bname, "meta-ver%" PRIu32, &version);
pfs->cstatus->meta.version = version;
sscanf(bname, "meta-ver%" PRIu32, &_version);
pfs->cstatus->meta.version = _version;
}
pfs->cstatus->pmf = &(pfs->cstatus->mf);
......@@ -1103,10 +1103,10 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) {
int tvid, tfid;
TSDB_FILE_T ttype;
uint32_t tversion;
char bname[TSDB_FILENAME_LEN];
char _bname[TSDB_FILENAME_LEN];
tfsbasename(pf, bname);
tsdbParseDFilename(bname, &tvid, &tfid, &ttype, &tversion);
tfsbasename(pf, _bname);
tsdbParseDFilename(_bname, &tvid, &tfid, &ttype, &tversion);
ASSERT(tvid == REPO_ID(pRepo));
......
......@@ -410,7 +410,7 @@ int tsdbUpdateDFileHeader(SDFile *pDFile) {
int tsdbLoadDFileHeader(SDFile *pDFile, SDFInfo *pInfo) {
char buf[TSDB_FILE_HEAD_SIZE] = "\0";
uint32_t version;
uint32_t _version;
ASSERT(TSDB_FILE_OPENED(pDFile));
......@@ -428,7 +428,7 @@ int tsdbLoadDFileHeader(SDFile *pDFile, SDFInfo *pInfo) {
}
void *pBuf = buf;
pBuf = taosDecodeFixedU32(pBuf, &version);
pBuf = taosDecodeFixedU32(pBuf, &_version);
pBuf = tsdbDecodeDFInfo(pBuf, pInfo);
return 0;
}
......@@ -660,12 +660,12 @@ int tsdbScanAndTryFixDFileSet(STsdbRepo *pRepo, SDFileSet *pSet) {
return 0;
}
int tsdbParseDFilename(const char *fname, int *vid, int *fid, TSDB_FILE_T *ftype, uint32_t *version) {
int tsdbParseDFilename(const char *fname, int *vid, int *fid, TSDB_FILE_T *ftype, uint32_t *_version) {
char *p = NULL;
*version = 0;
*_version = 0;
*ftype = TSDB_FILE_MAX;
sscanf(fname, "v%df%d.%m[a-z]-ver%" PRIu32, vid, fid, &p, version);
sscanf(fname, "v%df%d.%m[a-z]-ver%" PRIu32, vid, fid, &p, _version);
for (TSDB_FILE_T i = 0; i < TSDB_FILE_MAX; i++) {
if (strcmp(p, TSDB_FNAME_SUFFIX[i]) == 0) {
*ftype = i;
......
......@@ -531,8 +531,8 @@ STable *tsdbGetTableByUid(STsdbMeta *pMeta, uint64_t uid) {
return *(STable **)ptr;
}
STSchema *tsdbGetTableSchemaByVersion(STable *pTable, int16_t version) {
return tsdbGetTableSchemaImpl(pTable, true, false, version);
STSchema *tsdbGetTableSchemaByVersion(STable *pTable, int16_t _version) {
return tsdbGetTableSchemaImpl(pTable, true, false, _version);
}
int tsdbWLockRepoMeta(STsdbRepo *pRepo) {
......@@ -891,9 +891,9 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
maxCols = 0;
maxRowBytes = 0;
for (int i = 0; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable != NULL) {
pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
STable *_pTable = pMeta->tables[i];
if (_pTable != NULL) {
pSchema = tsdbGetTableSchemaImpl(_pTable, false, false, -1);
maxCols = MAX(maxCols, schemaNCols(pSchema));
maxRowBytes = MAX(maxRowBytes, schemaTLen(pSchema));
}
......
......@@ -96,6 +96,7 @@ typedef struct tSkipListState {
} tSkipListState;
typedef struct SSkipList {
unsigned int seed;
__compar_fn_t comparFn;
__sl_key_fn_t keyFn;
pthread_rwlock_t *lock;
......
......@@ -155,26 +155,26 @@ void generate_key(unsigned char* key) {
}
}
void print_key_set(key_set key_set) {
void print_key_set(key_set _key_set) {
int i;
printf("K: \n");
for (i = 0; i < 8; i++) {
printf("%02X : ", key_set.k[i]);
print_char_as_binary(key_set.k[i]);
printf("%02X : ", _key_set.k[i]);
print_char_as_binary(_key_set.k[i]);
printf("\n");
}
printf("\nC: \n");
for (i = 0; i < 4; i++) {
printf("%02X : ", key_set.c[i]);
print_char_as_binary(key_set.c[i]);
printf("%02X : ", _key_set.c[i]);
print_char_as_binary(_key_set.c[i]);
printf("\n");
}
printf("\nD: \n");
for (i = 0; i < 4; i++) {
printf("%02X : ", key_set.d[i]);
print_char_as_binary(key_set.d[i]);
printf("%02X : ", _key_set.d[i]);
print_char_as_binary(_key_set.d[i]);
printf("\n");
}
printf("\n");
......
......@@ -50,6 +50,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _
pSkipList->len = keyLen;
pSkipList->flags = flags;
pSkipList->keyFn = fn;
pSkipList->seed = rand();
if (comparFn == NULL) {
pSkipList->comparFn = getKeyComparFunc(keyType);
} else {
......@@ -545,7 +546,12 @@ static FORCE_INLINE int32_t getSkipListNodeRandomHeight(SSkipList *pSkipList) {
const uint32_t factor = 4;
int32_t n = 1;
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
while ((rand() % factor) == 0 && n <= pSkipList->maxLevel) {
#else
while ((rand_r(&(pSkipList->seed)) % factor) == 0 && n <= pSkipList->maxLevel) {
#endif
n++;
}
......
......@@ -199,7 +199,7 @@ int32_t walRestore(void *handle, void *pVnode, FWalWrite writeFp) {
snprintf(walName, sizeof(pWal->name), "%s/%s%" PRId64, pWal->path, WAL_PREFIX, fileId);
wInfo("vgId:%d, file:%s, will be restored", pWal->vgId, walName);
int32_t code = walRestoreWalFile(pWal, pVnode, writeFp, walName, fileId);
code = walRestoreWalFile(pWal, pVnode, writeFp, walName, fileId);
if (code != TSDB_CODE_SUCCESS) {
wError("vgId:%d, file:%s, failed to restore since %s", pWal->vgId, walName, tstrerror(code));
continue;
......
......@@ -8,8 +8,8 @@
3. mkdir debug; cd debug; cmake ..; make ; sudo make install
4. pip install ../src/connector/python/linux/python2 ; pip3 install
../src/connector/python/linux/python3
4. pip install ../src/connector/python ; pip3 install
../src/connector/python
5. pip install numpy; pip3 install numpy (numpy is required only if you need to run querySort.py)
......
......@@ -21,7 +21,7 @@ def pre_test(){
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/linux/python3/
pip3 install ${WKC}/src/connector/python
'''
return 1
}
......
......@@ -189,8 +189,8 @@ void writeDataImp(void *param) {
counter++;
if (counter >= arguments.rowsPerRequest) {
TAOS_RES *result = taos_query(taos, sql);
int32_t code = taos_errno(result);
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(taos));
}
......@@ -207,8 +207,8 @@ void writeDataImp(void *param) {
}
if (counter > 0) {
TAOS_RES *result = taos_query(taos, sql);
int32_t code = taos_errno(result);
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(taos));
}
......
#include "qSqlparser.h"
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size){
char *new_str = (char *)malloc(size+1);
if (new_str == NULL){
return 0;
}
memcpy(new_str, data, size);
new_str[size] = '\0';
qSqlParse(new_str);
free(new_str);
return 0;
}
......@@ -48,7 +48,7 @@ fi
PYTHON_EXEC=python3.8
# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work.
export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3:$(pwd)
export PYTHONPATH=$(pwd)/../../src/connector/python:$(pwd)
# Then let us set up the library path so that our compiled SO file can be loaded by Python
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR
......
......@@ -48,7 +48,7 @@ fi
PYTHON_EXEC=python3.8
# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work.
export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3:$(pwd)
export PYTHONPATH=$(pwd)/../../src/connector/python:$(pwd)
# Then let us set up the library path so that our compiled SO file can be loaded by Python
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR
......
# Helpful Ref: https://stackoverflow.com/questions/24100558/how-can-i-split-a-module-into-multiple-files-without-breaking-a-backwards-compa/24100645
from crash_gen.service_manager import ServiceManager, TdeInstance, TdeSubProcess
from __future__ import annotations
import argparse
gConfig: argparse.Namespace
def init():
global gConfig
gConfig = []
\ No newline at end of file
from __future__ import annotations
import argparse
from typing import Optional
from .misc import CrashGenError
# from crash_gen.misc import CrashGenError
# gConfig: Optional[argparse.Namespace]
class Config:
_config = None # type Optional[argparse.Namespace]
@classmethod
def init(cls, parser: argparse.ArgumentParser):
if cls._config is not None:
raise CrashGenError("Config can only be initialized once")
cls._config = parser.parse_args()
# print(cls._config)
@classmethod
def setConfig(cls, config: argparse.Namespace):
cls._config = config
@classmethod
# TODO: check items instead of exposing everything
def getConfig(cls) -> argparse.Namespace:
if cls._config is None:
raise CrashGenError("invalid state")
return cls._config
@classmethod
def clearConfig(cls):
cls._config = None
@classmethod
def isSet(cls, cfgKey):
cfg = cls.getConfig()
if cfgKey not in cfg:
return False
return cfg.__getattribute__(cfgKey)
\ No newline at end of file
from __future__ import annotations
import sys
import os
import datetime
import time
import threading
import requests
from requests.auth import HTTPBasicAuth
import taos
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.log import *
from .misc import Logging, CrashGenError, Helper, Dice
import os
import datetime
import traceback
# from .service_manager import TdeInstance
import crash_gen.settings
from .config import Config
from .misc import Logging, CrashGenError, Helper
from .types import QueryResult
class DbConn:
TYPE_NATIVE = "native-c"
......@@ -79,7 +81,7 @@ class DbConn:
raise RuntimeError("Cannot query database until connection is open")
nRows = self.query(sql)
if nRows != 1:
raise taos.error.ProgrammingError(
raise CrashGenError(
"Unexpected result for query: {}, rows = {}".format(sql, nRows),
(CrashGenError.INVALID_EMPTY_RESULT if nRows==0 else CrashGenError.INVALID_MULTIPLE_RESULT)
)
......@@ -115,7 +117,7 @@ class DbConn:
try:
self.execute(sql)
return True # ignore num of results, return success
except taos.error.ProgrammingError as err:
except taos.error.Error as err:
return False # failed, for whatever TAOS reason
# Not possile to reach here, non-TAOS exception would have been thrown
......@@ -126,7 +128,7 @@ class DbConn:
def openByType(self):
raise RuntimeError("Unexpected execution, should be overriden")
def getQueryResult(self):
def getQueryResult(self) -> QueryResult :
raise RuntimeError("Unexpected execution, should be overriden")
def getResultRows(self):
......@@ -221,7 +223,7 @@ class DbConnRest(DbConn):
class MyTDSql:
# Class variables
_clsLock = threading.Lock() # class wide locking
longestQuery = None # type: str
longestQuery = '' # type: str
longestQueryTime = 0.0 # seconds
lqStartTime = 0.0
# lqEndTime = 0.0 # Not needed, as we have the two above already
......@@ -249,7 +251,13 @@ class MyTDSql:
def _execInternal(self, sql):
startTime = time.time()
# Logging.debug("Executing SQL: " + sql)
# ret = None # TODO: use strong type here
# try: # Let's not capture the error, and let taos.error.ProgrammingError pass through
ret = self._cursor.execute(sql)
# except taos.error.ProgrammingError as err:
# Logging.warning("Taos SQL execution error: {}, SQL: {}".format(err.msg, sql))
# raise CrashGenError(err.msg)
# print("\nSQL success: {}".format(sql))
queryTime = time.time() - startTime
# Record the query time
......@@ -261,7 +269,7 @@ class MyTDSql:
cls.lqStartTime = startTime
# Now write to the shadow database
if crash_gen.settings.gConfig.use_shadow_db:
if Config.isSet('use_shadow_db'):
if sql[:11] == "INSERT INTO":
if sql[:16] == "INSERT INTO db_0":
sql2 = "INSERT INTO db_s" + sql[16:]
......@@ -453,31 +461,11 @@ class DbManager():
''' Release the underlying DB connection upon deletion of DbManager '''
self.cleanUp()
def getDbConn(self):
def getDbConn(self) -> DbConn :
if self._dbConn is None:
raise CrashGenError("Unexpected empty DbConn")
return self._dbConn
# TODO: not used any more, to delete
def pickAndAllocateTable(self): # pick any table, and "use" it
return self.tableNumQueue.pickAndAllocate()
# TODO: Not used any more, to delete
def addTable(self):
with self._lock:
tIndex = self.tableNumQueue.push()
return tIndex
# Not used any more, to delete
def releaseTable(self, i): # return the table back, so others can use it
self.tableNumQueue.release(i)
# TODO: not used any more, delete
def getTableNameToDelete(self):
tblNum = self.tableNumQueue.pop() # TODO: race condition!
if (not tblNum): # maybe false
return False
return "table_{}".format(tblNum)
def cleanUp(self):
if self._dbConn:
self._dbConn.close()
......
......@@ -3,6 +3,7 @@ import random
import logging
import os
import sys
from typing import Optional
import taos
......@@ -39,14 +40,14 @@ class MyLoggingAdapter(logging.LoggerAdapter):
class Logging:
logger = None
logger = None # type: Optional[MyLoggingAdapter]
@classmethod
def getLogger(cls):
return logger
return cls.logger
@classmethod
def clsInit(cls, gConfig): # TODO: refactor away gConfig
def clsInit(cls, debugMode: bool):
if cls.logger:
return
......@@ -60,13 +61,9 @@ class Logging:
# Logging adapter, to be used as a logger
# print("setting logger variable")
# global logger
cls.logger = MyLoggingAdapter(_logger, [])
if (gConfig.debug):
cls.logger.setLevel(logging.DEBUG) # default seems to be INFO
else:
cls.logger.setLevel(logging.INFO)
cls.logger = MyLoggingAdapter(_logger, {})
cls.logger.setLevel(logging.DEBUG if debugMode else logging.INFO) # default seems to be INFO
@classmethod
def info(cls, msg):
cls.logger.info(msg)
......@@ -84,6 +81,7 @@ class Logging:
cls.logger.error(msg)
class Status:
STATUS_EMPTY = 99
STATUS_STARTING = 1
STATUS_RUNNING = 2
STATUS_STOPPING = 3
......@@ -95,12 +93,16 @@ class Status:
def __repr__(self):
return "[Status: v={}]".format(self._status)
def set(self, status):
def set(self, status: int):
self._status = status
def get(self):
return self._status
def isEmpty(self):
''' Empty/Undefined '''
return self._status == Status.STATUS_EMPTY
def isStarting(self):
return self._status == Status.STATUS_STARTING
......@@ -117,6 +119,9 @@ class Status:
def isStable(self):
return self.isRunning() or self.isStopped()
def isActive(self):
return self.isStarting() or self.isRunning() or self.isStopping()
# Deterministic random number generator
class Dice():
seeded = False # static, uninitialized
......
from typing import Any, List, Dict, NewType
from enum import Enum
DirPath = NewType('DirPath', str)
QueryResult = NewType('QueryResult', List[List[Any]])
class TdDataType(Enum):
'''
Use a Python Enum types of represent all the data types in TDengine.
Ref: https://www.taosdata.com/cn/documentation/taos-sql#data-type
'''
TIMESTAMP = 'TIMESTAMP'
INT = 'INT'
BIGINT = 'BIGINT'
FLOAT = 'FLOAT'
DOUBLE = 'DOUBLE'
BINARY = 'BINARY'
BINARY16 = 'BINARY(16)' # TODO: get rid of this hack
BINARY200 = 'BINARY(200)'
SMALLINT = 'SMALLINT'
TINYINT = 'TINYINT'
BOOL = 'BOOL'
NCHAR = 'NCHAR'
TdColumns = Dict[str, TdDataType]
TdTags = Dict[str, TdDataType]
......@@ -10,7 +10,7 @@
#
###################################################################
# install pip
# pip install src/connector/python/linux/python2/
# pip install src/connector/python/
import sys
import os
import os.path
......
#!/usr/bin/python3.8
from abc import abstractmethod
import time
from datetime import datetime
from influxdb_client import InfluxDBClient, Point, WritePrecision, BucketsApi
from influxdb_client.client.write_api import SYNCHRONOUS
import argparse
import textwrap
import subprocess
import sys
import taos
from crash_gen.crash_gen_main import Database, TdSuperTable
from crash_gen.service_manager import TdeInstance
from crash_gen.shared.config import Config
from crash_gen.shared.db import DbConn
from crash_gen.shared.misc import Dice, Logging, Helper
from crash_gen.shared.types import TdDataType
# NUM_PROCESSES = 10
# NUM_REPS = 1000
tick = int(time.time() - 5000000.0) # for now we will create max 5M record
value = 101
DB_NAME = 'mydb'
TIME_SERIES_NAME = 'widget'
MAX_SHELF = 500 # shelf number runs up to this, non-inclusive
ITEMS_PER_SHELF = 5
BATCH_SIZE = 2000 # Number of data points per request
# None_RW:
# INFLUX_TOKEN='RRzVQZs8ERCpV9cS2RXqgtM_Y6FEZuJ7Tuk0aHtZItFTfcM9ajixtGDhW8HzqNIBmG3hmztw-P4sHOstfJvjFA=='
# DevOrg_RW:
# INFLUX_TOKEN='o1P8sEhBmXKhxBmNuiCyOUKv8d7qm5wUjMff9AbskBu2LcmNPQzU77NrAn5hDil8hZ0-y1AGWpzpL-4wqjFdkA=='
# DevOrg_All_Access
INFLUX_TOKEN='T2QTr4sloJhINH_oSrwSS-WIIZYjDfD123NK4ou3b7ajRs0c0IphCh3bNc0OsDZQRW1HyCby7opdEndVYFGTWQ=='
INFLUX_ORG="DevOrg"
INFLUX_BUCKET="Bucket01"
def writeTaosBatch(dbc, tblName):
# Database.setupLastTick()
global value, tick
data = []
for i in range(0, 100):
data.append("('{}', {})".format(Database.getNextTick(), value) )
value += 1
sql = "INSERT INTO {} VALUES {}".format(tblName, ''.join(data))
dbc.execute(sql)
class PerfGenError(taos.error.ProgrammingError):
pass
class Benchmark():
# @classmethod
# def create(cls, dbType):
# if dbType == 'taos':
# return TaosBenchmark()
# elif dbType == 'influx':
# return InfluxBenchmark()
# else:
# raise RuntimeError("Unknown DB type: {}".format(dbType))
def __init__(self, dbType, loopCount = 0):
self._dbType = dbType
self._setLoopCount(loopCount)
def _setLoopCount(self, loopCount):
cfgLoopCount = Config.getConfig().loop_count
if loopCount == 0: # use config
self._loopCount = cfgLoopCount
else:
if cfgLoopCount :
Logging.warning("Ignoring loop count for fixed-loop-count benchmarks: {}".format(cfgLoopCount))
self._loopCount = loopCount
@abstractmethod
def doIterate(self):
'''
Execute the benchmark directly, without invoking sub processes,
effectively using one execution thread.
'''
pass
@abstractmethod
def prepare(self):
'''
Preparation needed to run a certain benchmark
'''
pass
@abstractmethod
def execute(self):
'''
Actually execute the benchmark
'''
Logging.warning("Unexpected execution")
@property
def name(self):
return self.__class__.__name__
def run(self):
print("Running benchmark: {}, class={} ...".format(self.name, self.__class__))
startTime = time.time()
# Prepare to execute the benchmark
self.prepare()
# Actually execute the benchmark
self.execute()
# if Config.getConfig().iterate_directly: # execute directly
# Logging.debug("Iterating...")
# self.doIterate()
# else:
# Logging.debug("Executing via sub process...")
# startTime = time.time()
# self.prepare()
# self.spawnProcesses()
# self.waitForProcecess()
# duration = time.time() - startTime
# Logging.info("Benchmark execution completed in {:.3f} seconds".format(duration))
Logging.info("Benchmark {} finished in {:.3f} seconds".format(
self.name, time.time()-startTime))
def spawnProcesses(self):
self._subProcs = []
for j in range(0, Config.getConfig().subprocess_count):
ON_POSIX = 'posix' in sys.builtin_module_names
tblName = 'cars_reg_{}'.format(j)
cmdLineStr = './perf_gen.sh -t {} -i -n {} -l {}'.format(
self._dbType,
tblName,
Config.getConfig().loop_count
)
if Config.getConfig().debug:
cmdLineStr += ' -d'
subProc = subprocess.Popen(cmdLineStr,
shell = True,
close_fds = ON_POSIX)
self._subProcs.append(subProc)
def waitForProcecess(self):
for sp in self._subProcs:
sp.wait(300)
class TaosBenchmark(Benchmark):
def __init__(self, loopCount):
super().__init__('taos', loopCount)
# self._dbType = 'taos'
tInst = TdeInstance()
self._dbc = DbConn.createNative(tInst.getDbTarget())
self._dbc.open()
self._sTable = TdSuperTable(TIME_SERIES_NAME + '_s', DB_NAME)
def doIterate(self):
tblName = Config.getConfig().target_table_name
print("Benchmarking TAOS database (1 pass) for: {}".format(tblName))
self._dbc.execute("USE {}".format(DB_NAME))
self._sTable.ensureRegTable(None, self._dbc, tblName)
try:
lCount = Config.getConfig().loop_count
print("({})".format(lCount))
for i in range(0, lCount):
writeTaosBatch(self._dbc, tblName)
except taos.error.ProgrammingError as err:
Logging.error("Failed to write batch")
def prepare(self):
self._dbc.execute("CREATE DATABASE IF NOT EXISTS {}".format(DB_NAME))
self._dbc.execute("USE {}".format(DB_NAME))
# Create the super table
self._sTable.drop(self._dbc, True)
self._sTable.create(self._dbc,
{'ts': TdDataType.TIMESTAMP,
'temperature': TdDataType.INT,
'pressure': TdDataType.INT,
'notes': TdDataType.BINARY200
},
{'rack': TdDataType.INT,
'shelf': TdDataType.INT,
'barcode': TdDataType.BINARY16
})
def execSql(self, sql):
try:
self._dbc.execute(sql)
except taos.error.ProgrammingError as err:
Logging.warning("SQL Error: 0x{:X}, {}, SQL: {}".format(
Helper.convertErrno(err.errno), err.msg, sql))
raise
def executeWrite(self):
# Sample: INSERT INTO t1 USING st TAGS(1) VALUES(now, 1) t2 USING st TAGS(2) VALUES(now, 2)
sqlPrefix = "INSERT INTO "
dataTemplate = "{} USING {} TAGS({},{},'barcode_{}') VALUES('{}',{},{},'{}') "
stName = self._sTable.getName()
BATCH_SIZE = 2000 # number of items per request batch
ITEMS_PER_SHELF = 5
# rackSize = 10 # shelves per rack
# shelfSize = 100 # items per shelf
batchCount = self._loopCount // BATCH_SIZE
lastRack = 0
for i in range(batchCount):
sql = sqlPrefix
for j in range(BATCH_SIZE):
n = i*BATCH_SIZE + j # serial number
# values first
# rtName = 'rt_' + str(n) # table name contains serial number, has info
temperature = 20 + (n % 10)
pressure = 70 + (n % 10)
# tags
shelf = (n // ITEMS_PER_SHELF) % MAX_SHELF # shelf number
rack = n // (ITEMS_PER_SHELF * MAX_SHELF) # rack number
barcode = rack + shelf
# table name
tableName = "reg_" + str(rack) + '_' + str(shelf)
# now the SQL
sql += dataTemplate.format(tableName, stName,# table name
rack, shelf, barcode, # tags
Database.getNextTick(), temperature, pressure, 'xxx') # values
lastRack = rack
self.execSql(sql)
Logging.info("Last Rack: {}".format(lastRack))
class TaosWriteBenchmark(TaosBenchmark):
def execute(self):
self.executeWrite()
class Taos100kWriteBenchmark(TaosWriteBenchmark):
def __init__(self):
super().__init__(100*1000)
class Taos10kWriteBenchmark(TaosWriteBenchmark):
def __init__(self):
super().__init__(10*1000)
class Taos1mWriteBenchmark(TaosWriteBenchmark):
def __init__(self):
super().__init__(1000*1000)
class Taos5mWriteBenchmark(TaosWriteBenchmark):
def __init__(self):
super().__init__(5*1000*1000)
class Taos1kQueryBenchmark(TaosBenchmark):
def __init__(self):
super().__init__(1000)
class Taos1MCreationBenchmark(TaosBenchmark):
def __init__(self):
super().__init__(1000000)
class InfluxBenchmark(Benchmark):
def __init__(self, loopCount):
super().__init__('influx', loopCount)
# self._dbType = 'influx'
# self._client = InfluxDBClient(host='localhost', port=8086)
# def _writeBatch(self, tblName):
# global value, tick
# data = []
# for i in range(0, 100):
# line = "{},device={} value={} {}".format(
# TIME_SERIES_NAME,
# tblName,
# value,
# tick*1000000000)
# # print(line)
# data.append(line)
# value += 1
# tick +=1
# self._client.write(data, {'db':DB_NAME}, protocol='line')
def executeWrite(self):
global tick # influx tick #TODO refactor
lineTemplate = TIME_SERIES_NAME + ",rack={},shelf={},barcode='barcode_{}' temperature={},pressure={} {}"
batchCount = self._loopCount // BATCH_SIZE
for i in range(batchCount):
lineBatch = []
for j in range(BATCH_SIZE):
n = i*BATCH_SIZE + j # serial number
# values first
# rtName = 'rt_' + str(n) # table name contains serial number, has info
temperature = 20 + (n % 10)
pressure = 70 + (n % 10)
# tags
shelf = (n // ITEMS_PER_SHELF) % MAX_SHELF # shelf number
rack = n // (ITEMS_PER_SHELF * MAX_SHELF) # rack number
barcode = rack + shelf
# now the SQL
line = lineTemplate.format(
rack, shelf, barcode, # tags
temperature, pressure, # values
tick * 1000000000 )
tick += 1
lineBatch.append(line)
write_api = self._client.write_api(write_options=SYNCHRONOUS)
write_api.write(INFLUX_BUCKET, INFLUX_ORG, lineBatch)
# self._client.write(lineBatch, {'db':DB_NAME}, protocol='line')
# def doIterate(self):
# tblName = Config.getConfig().target_table_name
# print("Benchmarking INFLUX database (1 pass) for: {}".format(tblName))
# for i in range(0, Config.getConfig().loop_count):
# self._writeBatch(tblName)
def _getOrgIdByName(self, orgName):
"""Find org by name.
"""
orgApi = self._client.organizations_api()
orgs = orgApi.find_organizations()
for org in orgs:
if org.name == orgName:
return org.id
raise PerfGenError("Org not found with name: {}".format(orgName))
def _fetchAuth(self):
authApi = self._client.authorizations_api()
auths = authApi.find_authorizations()
for auth in auths:
if auth.token == INFLUX_TOKEN :
return auth
raise PerfGenError("No proper auth found")
def _verifyPermissions(self, perms: list):
if list:
return #OK
raise PerfGenError("No permission found")
def prepare(self):
self._client = InfluxDBClient(
url="http://127.0.0.1:8086",
token=INFLUX_TOKEN,
org=INFLUX_ORG)
auth = self._fetchAuth()
self._verifyPermissions(auth.permissions)
bktApi = self._client.buckets_api()
# Delete
bkt = bktApi.find_bucket_by_name(INFLUX_BUCKET)
if bkt:
bktApi.delete_bucket(bkt)
# Recreate
orgId = self._getOrgIdByName(INFLUX_ORG)
bktApi.create_bucket(bucket=None, bucket_name=INFLUX_BUCKET, org_id=orgId)
# self._client.drop_database(DB_NAME)
# self._client.create_database(DB_NAME)
# self._client.switch_database(DB_NAME)
class InfluxWriteBenchmark(InfluxBenchmark):
def execute(self):
return self.executeWrite()
class Influx10kWriteBenchmark(InfluxWriteBenchmark):
def __init__(self):
super().__init__(10*1000)
class Influx100kWriteBenchmark(InfluxWriteBenchmark):
def __init__(self):
super().__init__(100*1000)
class Influx1mWriteBenchmark(InfluxWriteBenchmark):
def __init__(self):
super().__init__(1000*1000)
class Influx5mWriteBenchmark(InfluxWriteBenchmark):
def __init__(self):
super().__init__(5*1000*1000)
def _buildCmdLineParser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
TDengine Performance Benchmarking Tool
---------------------------------------------------------------------
'''))
parser.add_argument(
'-b',
'--benchmark-name',
action='store',
default='Taos1kQuery',
type=str,
help='Benchmark to use (default: Taos1kQuery)')
parser.add_argument(
'-d',
'--debug',
action='store_true',
help='Turn on DEBUG mode for more logging (default: false)')
parser.add_argument(
'-i',
'--iterate-directly',
action='store_true',
help='Execution operations directly without sub-process (default: false)')
parser.add_argument(
'-l',
'--loop-count',
action='store',
default=1000,
type=int,
help='Number of loops to perform, 100 operations per loop. (default: 1000)')
parser.add_argument(
'-n',
'--target-table-name',
action='store',
default=None,
type=str,
help='Regular table name in target DB (default: None)')
parser.add_argument(
'-s',
'--subprocess-count',
action='store',
default=4,
type=int,
help='Number of sub processes to spawn. (default: 10)')
parser.add_argument(
'-t',
'--target-database',
action='store',
default='taos',
type=str,
help='Benchmark target: taos, influx (default: taos)')
return parser
def main():
parser = _buildCmdLineParser()
Config.init(parser)
Logging.clsInit(Config.getConfig().debug)
Dice.seed(0) # initial seeding of dice
bName = Config.getConfig().benchmark_name
bClassName = bName + 'Benchmark'
x = globals()
if bClassName in globals():
bClass = globals()[bClassName]
bm = bClass() # Benchmark object
bm.run()
else:
raise PerfGenError("No such benchmark: {}".format(bName))
# bm = Benchmark.create(Config.getConfig().target_database)
# bm.run()
if __name__ == "__main__":
main()
#!/bin/bash
# This is the script for us to try to cause the TDengine server or client to crash
#
# PREPARATION
#
# 1. Build an compile the TDengine source code that comes with this script, in the same directory tree
# 2. Please follow the direction in our README.md, and build TDengine in the build/ directory
# 3. Adjust the configuration file if needed under build/test/cfg/taos.cfg
# 4. Run the TDengine server instance: cd build; ./build/bin/taosd -c test/cfg
# 5. Make sure you have a working Python3 environment: run /usr/bin/python3 --version, and you should get 3.6 or above
# 6. Make sure you have the proper Python packages: # sudo apt install python3-setuptools python3-pip python3-distutils
#
# RUNNING THIS SCRIPT
#
# This script assumes the source code directory is intact, and that the binaries has been built in the
# build/ directory, as such, will will load the Python libraries in the directory tree, and also load
# the TDengine client shared library (so) file, in the build/directory, as evidenced in the env
# variables below.
#
# Running the script is simple, no parameter is needed (for now, but will change in the future).
#
# Happy Crashing...
# Due to the heavy path name assumptions/usage, let us require that the user be in the current directory
EXEC_DIR=`dirname "$0"`
if [[ $EXEC_DIR != "." ]]
then
echo "ERROR: Please execute `basename "$0"` in its own directory (for now anyway, pardon the dust)"
exit -1
fi
CURR_DIR=`pwd`
IN_TDINTERNAL="community"
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
TAOS_DIR=$CURR_DIR/../../..
TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1`
LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6,7|rev`/lib
else
TAOS_DIR=$CURR_DIR/../..
TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1`
LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib
fi
# Now getting ready to execute Python
# The following is the default of our standard dev env (Ubuntu 20.04), modify/adjust at your own risk
PYTHON_EXEC=python3.8
# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work.
export PYTHONPATH=$(pwd)/../../src/connector/python:$(pwd)
# Then let us set up the library path so that our compiled SO file can be loaded by Python
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR
# Now we are all let, and let's see if we can find a crash. Note we pass all params
PERF_GEN_EXEC=perf_gen.py
$PYTHON_EXEC $PERF_GEN_EXEC $@
......@@ -4,7 +4,7 @@
# 2. No files are needed outside the development tree, everything is done in the local source code directory
# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work.
export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3
export PYTHONPATH=$(pwd)/../../src/connector/python
# Then let us set up the library path so that our compiled SO file can be loaded by Python
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/../../build/build/lib
......
......@@ -88,10 +88,9 @@ class TDTestCase:
# TSIM:
# TSIM: print =============== step4
tdLog.info('=============== step4')
# TSIM: sql create table $tb (ts timestamp,
# a0123456789012345678901234567890123456789 int)
# TSIM: sql create table $tb (ts timestamp, a0123456789012345678901234567890123456789 int)
getMaxColNum = "grep -w '#define TSDB_COL_NAME_LEN' ../../src/inc/taosdef.h|awk '{print $3}'"
boundary = int(subprocess.check_output(getMaxColNum, shell=True))
boundary = int(subprocess.check_output(getMaxColNum, shell=True)) - 1
tdLog.info("get max column name length is %d" % boundary)
chars = string.ascii_uppercase + string.ascii_lowercase
......
......@@ -10,7 +10,7 @@
#
###################################################################
# install pip
# pip install src/connector/python/linux/python2/
# pip install src/connector/python/
# -*- coding: utf-8 -*-
import sys
......
......@@ -13,7 +13,7 @@ else
fi
TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1`
LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib
export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3
export PYTHONPATH=$(pwd)/../../src/connector/python
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR
if [[ "$1" == *"test.py"* ]]; then
......
......@@ -10,7 +10,7 @@
#
###################################################################
# install pip
# pip install src/connector/python/linux/python2/
# pip install src/connector/python/
# -*- coding: utf-8 -*-
import sys
......
......@@ -10,7 +10,7 @@
#
###################################################################
# install pip
# pip install src/connector/python/linux/python2/
# pip install src/connector/python/
# -*- coding: utf-8 -*-
import sys
......
......@@ -10,7 +10,7 @@
#
###################################################################
# install pip
# pip install src/connector/python/linux/python2/
# pip install src/connector/python/
# -*- coding: utf-8 -*-
import sys
......
......@@ -22,7 +22,7 @@ sql_error alter table mt1 change tag a 1
sql_error create table mtx1 (ts timestamp, c1 int) tags (123 int)
sql create table mt2 (ts timestamp, c1 int) tags (abc012345678901234567890123456789012345678901234567890123456789def int)
sql_error create table mt2 (ts timestamp, c1 int) tags (abc012345678901234567890123456789012345678901234567890123456789def int)
sql create table mt3 (ts timestamp, c1 int) tags (abc012345678901234567890123456789012345678901234567890123456789 int)
sql_error alter table mt3 change tag abc012345678901234567890123456789012345678901234567890123456789 abcdefg012345678901234567890123456789012345678901234567890123456789
sql alter table mt3 change tag abc012345678901234567890123456789012345678901234567890123456789 abcdefg0123456789012345678901234567890123456789
......
......@@ -114,7 +114,11 @@ sql_error create table $tb (ts timestamp, $tag int)
sql_error create table $tb (ts timestamp, $tags int)
sql_error create table $tb (ts timestamp, $sint int)
sql_error create table $tb (ts timestamp, $tint int)
sql_error create table $tb (ts timestamp, $nchar int)
sql_error create table $tb (ts timestamp, $nchar int)
# too long column name
sql_error create table $tb (ts timestamp, abcde_123456789_123456789_123456789_123456789_123456789_123456789 int)
sql_error create table tx(ts timestamp, k int) tags(abcd5_123456789_123456789_123456789_123456789_123456789_123456789 int)
print illegal_column_names test passed
# case5: chinese_char_in_table_support
......
......@@ -119,4 +119,8 @@ if $rows != 4 then
return -1
endi
print ================>td-4147
sql_error create table tx(ts timestamp, a1234_0123456789_0123456789_0123456789_0123456789_0123456789_0123456789 int)
system sh/exec.sh -n dnode1 -s stop -x SIGINT
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册