Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
慢慢CG
TDengine
提交
529f906e
T
TDengine
项目概览
慢慢CG
/
TDengine
与 Fork 源项目一致
Fork自
taosdata / TDengine
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
529f906e
编写于
6月 25, 2021
作者:
T
tickduan
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into fix/lhhuo/TD-4872-O3-core
上级
f4931b4d
d5155996
变更
9
显示空白变更内容
内联
并排
Showing
9 changed file
with
648 addition
and
185 deletion
+648
-185
README-CN.md
README-CN.md
+8
-2
README.md
README.md
+6
-0
src/common/inc/tdataformat.h
src/common/inc/tdataformat.h
+5
-0
src/common/src/tdataformat.c
src/common/src/tdataformat.c
+1
-1
src/kit/taosdemo/taosdemo.c
src/kit/taosdemo/taosdemo.c
+65
-83
tests/pytest/fulltest.sh
tests/pytest/fulltest.sh
+1
-1
tests/pytest/insert/in_function.py
tests/pytest/insert/in_function.py
+486
-98
tests/pytest/tools/taosdumpTest.py
tests/pytest/tools/taosdumpTest.py
+2
-0
tests/pytest/tools/taosdumpTest2.py
tests/pytest/tools/taosdumpTest2.py
+74
-0
未找到文件。
README-CN.md
浏览文件 @
529f906e
...
@@ -23,7 +23,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维
...
@@ -23,7 +23,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维
TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是
[
数据模型
](
https://www.taosdata.com/cn/documentation/architecture
)
与
[
数据建模
](
https://www.taosdata.com/cn/documentation/model
)
。除本文档之外,欢迎
[
下载产品白皮书
](
https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf
)
。
TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是
[
数据模型
](
https://www.taosdata.com/cn/documentation/architecture
)
与
[
数据建模
](
https://www.taosdata.com/cn/documentation/model
)
。除本文档之外,欢迎
[
下载产品白皮书
](
https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf
)
。
#
生成
#
构建
TDengine目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、macOS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过
[
源码
](
https://www.taosdata.com/cn/getting-started/#通过源码安装
)
或者
[
安装包
](
https://www.taosdata.com/cn/getting-started/#通过安装包安装
)
来安装。本快速指南仅适用于通过源码安装。
TDengine目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、macOS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过
[
源码
](
https://www.taosdata.com/cn/getting-started/#通过源码安装
)
或者
[
安装包
](
https://www.taosdata.com/cn/getting-started/#通过安装包安装
)
来安装。本快速指南仅适用于通过源码安装。
...
@@ -107,7 +107,7 @@ Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话
...
@@ -107,7 +107,7 @@ Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话
git submodule update
--init
--recursive
git submodule update
--init
--recursive
```
```
##
生成
TDengine
##
构建
TDengine
### Linux 系统
### Linux 系统
...
@@ -116,6 +116,12 @@ mkdir debug && cd debug
...
@@ -116,6 +116,12 @@ mkdir debug && cd debug
cmake ..
&&
cmake
--build
.
cmake ..
&&
cmake
--build
.
```
```
您可以选择使用 Jemalloc 作为内存分配器,替代默认的 glibc:
```
bash
apt
install
autoconf
cmake ..
-DJEMALLOC_ENABLED
=
true
```
在X86-64、X86、arm64、arm32 和 mips64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
在X86-64、X86、arm64、arm32 和 mips64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。
aarch64:
aarch64:
...
...
README.md
浏览文件 @
529f906e
...
@@ -110,6 +110,12 @@ mkdir debug && cd debug
...
@@ -110,6 +110,12 @@ mkdir debug && cd debug
cmake ..
&&
cmake
--build
.
cmake ..
&&
cmake
--build
.
```
```
You can use Jemalloc as memory allocator instead of glibc:
```
apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true
```
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform.
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform.
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct:
You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct:
...
...
src/common/inc/tdataformat.h
浏览文件 @
529f906e
...
@@ -289,6 +289,11 @@ static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) {
...
@@ -289,6 +289,11 @@ static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) {
}
}
}
}
static
FORCE_INLINE
TSKEY
dataColsKeyAtRow
(
SDataCols
*
pCols
,
int
row
)
{
ASSERT
(
row
<
pCols
->
numOfRows
);
return
dataColsKeyAt
(
pCols
,
row
);
}
static
FORCE_INLINE
TSKEY
dataColsKeyFirst
(
SDataCols
*
pCols
)
{
static
FORCE_INLINE
TSKEY
dataColsKeyFirst
(
SDataCols
*
pCols
)
{
if
(
pCols
->
numOfRows
)
{
if
(
pCols
->
numOfRows
)
{
return
dataColsKeyAt
(
pCols
,
0
);
return
dataColsKeyAt
(
pCols
,
0
);
...
...
src/common/src/tdataformat.c
浏览文件 @
529f906e
...
@@ -452,7 +452,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *
...
@@ -452,7 +452,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *
SDataCols
*
pTarget
=
NULL
;
SDataCols
*
pTarget
=
NULL
;
if
((
target
->
numOfRows
==
0
)
||
(
dataColsKeyLast
(
target
)
<
dataColsKey
First
(
source
)))
{
// No overlap
if
((
target
->
numOfRows
==
0
)
||
(
dataColsKeyLast
(
target
)
<
dataColsKey
AtRow
(
source
,
*
pOffset
)))
{
// No overlap
ASSERT
(
target
->
numOfRows
+
rowsToMerge
<=
target
->
maxPoints
);
ASSERT
(
target
->
numOfRows
+
rowsToMerge
<=
target
->
maxPoints
);
for
(
int
i
=
0
;
i
<
rowsToMerge
;
i
++
)
{
for
(
int
i
=
0
;
i
<
rowsToMerge
;
i
++
)
{
for
(
int
j
=
0
;
j
<
source
->
numOfCols
;
j
++
)
{
for
(
int
j
=
0
;
j
<
source
->
numOfCols
;
j
++
)
{
...
...
src/kit/taosdemo/taosdemo.c
浏览文件 @
529f906e
...
@@ -79,10 +79,9 @@ enum TEST_MODE {
...
@@ -79,10 +79,9 @@ enum TEST_MODE {
#define MAX_SQL_SIZE 65536
#define MAX_SQL_SIZE 65536
#define BUFFER_SIZE (65536*2)
#define BUFFER_SIZE (65536*2)
#define COND_BUF_LEN
BUFFER_SIZE - 30
#define COND_BUF_LEN
(BUFFER_SIZE - 30)
#define MAX_USERNAME_SIZE 64
#define MAX_USERNAME_SIZE 64
#define MAX_PASSWORD_SIZE 64
#define MAX_PASSWORD_SIZE 64
#define MAX_DB_NAME_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space
#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space
...
@@ -90,7 +89,7 @@ enum TEST_MODE {
...
@@ -90,7 +89,7 @@ enum TEST_MODE {
#define OPT_ABORT 1
/* –abort */
#define OPT_ABORT 1
/* –abort */
#define STRING_LEN 60000
#define STRING_LEN 60000
#define MAX_PREPARED_RAND 1000000
#define MAX_PREPARED_RAND 1000000
#define MAX_FILE_NAME_LEN
128
#define MAX_FILE_NAME_LEN
256 // max file name length on linux is 255.
#define MAX_SAMPLES_ONCE_FROM_FILE 10000
#define MAX_SAMPLES_ONCE_FROM_FILE 10000
#define MAX_NUM_DATATYPE 10
#define MAX_NUM_DATATYPE 10
...
@@ -195,13 +194,6 @@ enum _describe_table_index {
...
@@ -195,13 +194,6 @@ enum _describe_table_index {
TSDB_MAX_DESCRIBE_METRIC
TSDB_MAX_DESCRIBE_METRIC
};
};
typedef
struct
{
char
field
[
TSDB_COL_NAME_LEN
+
1
];
char
type
[
16
];
int
length
;
char
note
[
128
];
}
SColDes
;
/* Used by main to communicate with parse_opt. */
/* Used by main to communicate with parse_opt. */
static
char
*
g_dupstr
=
NULL
;
static
char
*
g_dupstr
=
NULL
;
...
@@ -247,16 +239,16 @@ typedef struct SArguments_S {
...
@@ -247,16 +239,16 @@ typedef struct SArguments_S {
}
SArguments
;
}
SArguments
;
typedef
struct
SColumn_S
{
typedef
struct
SColumn_S
{
char
field
[
TSDB_COL_NAME_LEN
+
1
];
char
field
[
TSDB_COL_NAME_LEN
];
char
dataType
[
MAX_TB_NAME_SIZE
];
char
dataType
[
16
];
uint32_t
dataLen
;
uint32_t
dataLen
;
char
note
[
128
];
char
note
[
128
];
}
StrColumn
;
}
StrColumn
;
typedef
struct
SSuperTable_S
{
typedef
struct
SSuperTable_S
{
char
sTblName
[
MAX_TB_NAME_SIZE
+
1
];
char
sTblName
[
TSDB_TABLE_NAME_LEN
];
char
dataSource
[
MAX_TB_NAME_SIZE
+
1
];
// rand_gen or sample
char
dataSource
[
MAX_TB_NAME_SIZE
];
// rand_gen or sample
char
childTblPrefix
[
MAX_TB_NAME_SIZE
];
char
childTblPrefix
[
TSDB_TABLE_NAME_LEN
-
20
];
// 20 characters reserved for seq
char
insertMode
[
MAX_TB_NAME_SIZE
];
// taosc, rest
char
insertMode
[
MAX_TB_NAME_SIZE
];
// taosc, rest
uint16_t
childTblExists
;
uint16_t
childTblExists
;
int64_t
childTblCount
;
int64_t
childTblCount
;
...
@@ -277,8 +269,8 @@ typedef struct SSuperTable_S {
...
@@ -277,8 +269,8 @@ typedef struct SSuperTable_S {
int64_t
timeStampStep
;
int64_t
timeStampStep
;
char
startTimestamp
[
MAX_TB_NAME_SIZE
];
char
startTimestamp
[
MAX_TB_NAME_SIZE
];
char
sampleFormat
[
MAX_TB_NAME_SIZE
];
// csv, json
char
sampleFormat
[
MAX_TB_NAME_SIZE
];
// csv, json
char
sampleFile
[
MAX_FILE_NAME_LEN
+
1
];
char
sampleFile
[
MAX_FILE_NAME_LEN
];
char
tagsFile
[
MAX_FILE_NAME_LEN
+
1
];
char
tagsFile
[
MAX_FILE_NAME_LEN
];
uint32_t
columnCount
;
uint32_t
columnCount
;
StrColumn
columns
[
MAX_COLUMN_COUNT
];
StrColumn
columns
[
MAX_COLUMN_COUNT
];
...
@@ -305,7 +297,7 @@ typedef struct SSuperTable_S {
...
@@ -305,7 +297,7 @@ typedef struct SSuperTable_S {
}
SSuperTable
;
}
SSuperTable
;
typedef
struct
{
typedef
struct
{
char
name
[
TSDB_DB_NAME_LEN
+
1
];
char
name
[
TSDB_DB_NAME_LEN
];
char
create_time
[
32
];
char
create_time
[
32
];
int64_t
ntables
;
int64_t
ntables
;
int32_t
vgroups
;
int32_t
vgroups
;
...
@@ -341,11 +333,11 @@ typedef struct SDbCfg_S {
...
@@ -341,11 +333,11 @@ typedef struct SDbCfg_S {
int
cache
;
int
cache
;
int
blocks
;
int
blocks
;
int
quorum
;
int
quorum
;
char
precision
[
MAX_TB_NAME_SIZE
];
char
precision
[
8
];
}
SDbCfg
;
}
SDbCfg
;
typedef
struct
SDataBase_S
{
typedef
struct
SDataBase_S
{
char
dbName
[
MAX_DB_NAME_SIZE
];
char
dbName
[
TSDB_DB_NAME_LEN
];
bool
drop
;
// 0: use exists, 1: if exists, drop then new create
bool
drop
;
// 0: use exists, 1: if exists, drop then new create
SDbCfg
dbCfg
;
SDbCfg
dbCfg
;
uint64_t
superTblCount
;
uint64_t
superTblCount
;
...
@@ -353,14 +345,14 @@ typedef struct SDataBase_S {
...
@@ -353,14 +345,14 @@ typedef struct SDataBase_S {
}
SDataBase
;
}
SDataBase
;
typedef
struct
SDbs_S
{
typedef
struct
SDbs_S
{
char
cfgDir
[
MAX_FILE_NAME_LEN
+
1
];
char
cfgDir
[
MAX_FILE_NAME_LEN
];
char
host
[
MAX_HOSTNAME_SIZE
];
char
host
[
MAX_HOSTNAME_SIZE
];
struct
sockaddr_in
serv_addr
;
struct
sockaddr_in
serv_addr
;
uint16_t
port
;
uint16_t
port
;
char
user
[
MAX_USERNAME_SIZE
];
char
user
[
MAX_USERNAME_SIZE
];
char
password
[
MAX_PASSWORD_SIZE
];
char
password
[
MAX_PASSWORD_SIZE
];
char
resultFile
[
MAX_FILE_NAME_LEN
+
1
];
char
resultFile
[
MAX_FILE_NAME_LEN
];
bool
use_metric
;
bool
use_metric
;
bool
insert_only
;
bool
insert_only
;
bool
do_aggreFunc
;
bool
do_aggreFunc
;
...
@@ -387,7 +379,7 @@ typedef struct SpecifiedQueryInfo_S {
...
@@ -387,7 +379,7 @@ typedef struct SpecifiedQueryInfo_S {
bool
subscribeRestart
;
bool
subscribeRestart
;
int
subscribeKeepProgress
;
int
subscribeKeepProgress
;
char
sql
[
MAX_QUERY_SQL_COUNT
][
MAX_QUERY_SQL_LENGTH
+
1
];
char
sql
[
MAX_QUERY_SQL_COUNT
][
MAX_QUERY_SQL_LENGTH
+
1
];
char
result
[
MAX_QUERY_SQL_COUNT
][
MAX_FILE_NAME_LEN
+
1
];
char
result
[
MAX_QUERY_SQL_COUNT
][
MAX_FILE_NAME_LEN
];
int
resubAfterConsume
[
MAX_QUERY_SQL_COUNT
];
int
resubAfterConsume
[
MAX_QUERY_SQL_COUNT
];
int
endAfterConsume
[
MAX_QUERY_SQL_COUNT
];
int
endAfterConsume
[
MAX_QUERY_SQL_COUNT
];
TAOS_SUB
*
tsub
[
MAX_QUERY_SQL_COUNT
];
TAOS_SUB
*
tsub
[
MAX_QUERY_SQL_COUNT
];
...
@@ -398,7 +390,7 @@ typedef struct SpecifiedQueryInfo_S {
...
@@ -398,7 +390,7 @@ typedef struct SpecifiedQueryInfo_S {
}
SpecifiedQueryInfo
;
}
SpecifiedQueryInfo
;
typedef
struct
SuperQueryInfo_S
{
typedef
struct
SuperQueryInfo_S
{
char
sTblName
[
MAX_TB_NAME_SIZE
+
1
];
char
sTblName
[
TSDB_TABLE_NAME_LEN
];
uint64_t
queryInterval
;
// 0: unlimit > 0 loop/s
uint64_t
queryInterval
;
// 0: unlimit > 0 loop/s
uint32_t
threadCnt
;
uint32_t
threadCnt
;
uint32_t
asyncMode
;
// 0: sync, 1: async
uint32_t
asyncMode
;
// 0: sync, 1: async
...
@@ -407,10 +399,10 @@ typedef struct SuperQueryInfo_S {
...
@@ -407,10 +399,10 @@ typedef struct SuperQueryInfo_S {
int
subscribeKeepProgress
;
int
subscribeKeepProgress
;
uint64_t
queryTimes
;
uint64_t
queryTimes
;
int64_t
childTblCount
;
int64_t
childTblCount
;
char
childTblPrefix
[
MAX_TB_NAME_SIZE
];
char
childTblPrefix
[
TSDB_TABLE_NAME_LEN
-
20
];
// 20 characters reserved for seq
int
sqlCount
;
int
sqlCount
;
char
sql
[
MAX_QUERY_SQL_COUNT
][
MAX_QUERY_SQL_LENGTH
+
1
];
char
sql
[
MAX_QUERY_SQL_COUNT
][
MAX_QUERY_SQL_LENGTH
+
1
];
char
result
[
MAX_QUERY_SQL_COUNT
][
MAX_FILE_NAME_LEN
+
1
];
char
result
[
MAX_QUERY_SQL_COUNT
][
MAX_FILE_NAME_LEN
];
int
resubAfterConsume
;
int
resubAfterConsume
;
int
endAfterConsume
;
int
endAfterConsume
;
TAOS_SUB
*
tsub
[
MAX_QUERY_SQL_COUNT
];
TAOS_SUB
*
tsub
[
MAX_QUERY_SQL_COUNT
];
...
@@ -420,13 +412,13 @@ typedef struct SuperQueryInfo_S {
...
@@ -420,13 +412,13 @@ typedef struct SuperQueryInfo_S {
}
SuperQueryInfo
;
}
SuperQueryInfo
;
typedef
struct
SQueryMetaInfo_S
{
typedef
struct
SQueryMetaInfo_S
{
char
cfgDir
[
MAX_FILE_NAME_LEN
+
1
];
char
cfgDir
[
MAX_FILE_NAME_LEN
];
char
host
[
MAX_HOSTNAME_SIZE
];
char
host
[
MAX_HOSTNAME_SIZE
];
uint16_t
port
;
uint16_t
port
;
struct
sockaddr_in
serv_addr
;
struct
sockaddr_in
serv_addr
;
char
user
[
MAX_USERNAME_SIZE
];
char
user
[
MAX_USERNAME_SIZE
];
char
password
[
MAX_PASSWORD_SIZE
];
char
password
[
MAX_PASSWORD_SIZE
];
char
dbName
[
MAX_DB_NAME_SIZE
+
1
];
char
dbName
[
TSDB_DB_NAME_LEN
];
char
queryMode
[
MAX_TB_NAME_SIZE
];
// taosc, rest
char
queryMode
[
MAX_TB_NAME_SIZE
];
// taosc, rest
SpecifiedQueryInfo
specifiedQueryInfo
;
SpecifiedQueryInfo
specifiedQueryInfo
;
...
@@ -438,11 +430,11 @@ typedef struct SThreadInfo_S {
...
@@ -438,11 +430,11 @@ typedef struct SThreadInfo_S {
TAOS
*
taos
;
TAOS
*
taos
;
TAOS_STMT
*
stmt
;
TAOS_STMT
*
stmt
;
int
threadID
;
int
threadID
;
char
db_name
[
MAX_DB_NAME_SIZE
+
1
];
char
db_name
[
TSDB_DB_NAME_LEN
];
uint32_t
time_precision
;
uint32_t
time_precision
;
char
filePath
[
4096
];
char
filePath
[
4096
];
FILE
*
fp
;
FILE
*
fp
;
char
tb_prefix
[
MAX_TB_NAME_SIZE
];
char
tb_prefix
[
TSDB_TABLE_NAME_LEN
];
uint64_t
start_table_from
;
uint64_t
start_table_from
;
uint64_t
end_table_to
;
uint64_t
end_table_to
;
int64_t
ntables
;
int64_t
ntables
;
...
@@ -990,9 +982,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
...
@@ -990,9 +982,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
arguments
->
len_of_binary
=
atoi
(
argv
[
++
i
]);
arguments
->
len_of_binary
=
atoi
(
argv
[
++
i
]);
}
else
if
(
strcmp
(
argv
[
i
],
"-m"
)
==
0
)
{
}
else
if
(
strcmp
(
argv
[
i
],
"-m"
)
==
0
)
{
if
((
argc
==
i
+
1
)
||
if
((
argc
==
i
+
1
)
||
(
!
isStringNumber
(
argv
[
i
+
1
])))
{
(
isStringNumber
(
argv
[
i
+
1
])))
{
printHelp
();
printHelp
();
errorPrint
(
"%s"
,
"
\n\t
-m need a
number
following!
\n
"
);
errorPrint
(
"%s"
,
"
\n\t
-m need a
letter-initial string
following!
\n
"
);
exit
(
EXIT_FAILURE
);
exit
(
EXIT_FAILURE
);
}
}
arguments
->
tb_prefix
=
argv
[
++
i
];
arguments
->
tb_prefix
=
argv
[
++
i
];
...
@@ -2501,6 +2493,13 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
...
@@ -2501,6 +2493,13 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos,
char
*
pTblName
=
childTblName
;
char
*
pTblName
=
childTblName
;
while
((
row
=
taos_fetch_row
(
res
))
!=
NULL
)
{
while
((
row
=
taos_fetch_row
(
res
))
!=
NULL
)
{
int32_t
*
len
=
taos_fetch_lengths
(
res
);
int32_t
*
len
=
taos_fetch_lengths
(
res
);
if
(
0
==
strlen
((
char
*
)
row
[
0
]))
{
errorPrint
(
"%s() LN%d, No.%"
PRId64
" table return empty name
\n
"
,
__func__
,
__LINE__
,
count
);
exit
(
-
1
);
}
tstrncpy
(
pTblName
,
(
char
*
)
row
[
0
],
len
[
0
]
+
1
);
tstrncpy
(
pTblName
,
(
char
*
)
row
[
0
],
len
[
0
]
+
1
);
//printf("==== sub table name: %s\n", pTblName);
//printf("==== sub table name: %s\n", pTblName);
count
++
;
count
++
;
...
@@ -3035,7 +3034,7 @@ static int startMultiThreadCreateChildTable(
...
@@ -3035,7 +3034,7 @@ static int startMultiThreadCreateChildTable(
for
(
int64_t
i
=
0
;
i
<
threads
;
i
++
)
{
for
(
int64_t
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
pThreadInfo
=
infos
+
i
;
threadInfo
*
pThreadInfo
=
infos
+
i
;
pThreadInfo
->
threadID
=
i
;
pThreadInfo
->
threadID
=
i
;
tstrncpy
(
pThreadInfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
tstrncpy
(
pThreadInfo
->
db_name
,
db_name
,
TSDB_DB_NAME_LEN
);
pThreadInfo
->
superTblInfo
=
superTblInfo
;
pThreadInfo
->
superTblInfo
=
superTblInfo
;
verbosePrint
(
"%s() %d db_name: %s
\n
"
,
__func__
,
__LINE__
,
db_name
);
verbosePrint
(
"%s() %d db_name: %s
\n
"
,
__func__
,
__LINE__
,
db_name
);
pThreadInfo
->
taos
=
taos_connect
(
pThreadInfo
->
taos
=
taos_connect
(
...
@@ -3326,7 +3325,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
...
@@ -3326,7 +3325,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
//tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
//tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE);
tstrncpy
(
columnCase
.
dataType
,
dataType
->
valuestring
,
MAX_TB_NAME_SIZE
);
tstrncpy
(
columnCase
.
dataType
,
dataType
->
valuestring
,
strlen
(
dataType
->
valuestring
)
+
1
);
cJSON
*
dataLen
=
cJSON_GetObjectItem
(
column
,
"len"
);
cJSON
*
dataLen
=
cJSON_GetObjectItem
(
column
,
"len"
);
if
(
dataLen
&&
dataLen
->
type
==
cJSON_Number
)
{
if
(
dataLen
&&
dataLen
->
type
==
cJSON_Number
)
{
...
@@ -3341,7 +3340,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
...
@@ -3341,7 +3340,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
for
(
int
n
=
0
;
n
<
count
;
++
n
)
{
for
(
int
n
=
0
;
n
<
count
;
++
n
)
{
tstrncpy
(
superTbls
->
columns
[
index
].
dataType
,
tstrncpy
(
superTbls
->
columns
[
index
].
dataType
,
columnCase
.
dataType
,
MAX_TB_NAME_SIZE
);
columnCase
.
dataType
,
strlen
(
columnCase
.
dataType
)
+
1
);
superTbls
->
columns
[
index
].
dataLen
=
columnCase
.
dataLen
;
superTbls
->
columns
[
index
].
dataLen
=
columnCase
.
dataLen
;
index
++
;
index
++
;
}
}
...
@@ -3397,7 +3396,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
...
@@ -3397,7 +3396,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
__func__
,
__LINE__
);
__func__
,
__LINE__
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
tstrncpy
(
columnCase
.
dataType
,
dataType
->
valuestring
,
MAX_TB_NAME_SIZE
);
tstrncpy
(
columnCase
.
dataType
,
dataType
->
valuestring
,
strlen
(
dataType
->
valuestring
)
+
1
);
cJSON
*
dataLen
=
cJSON_GetObjectItem
(
tag
,
"len"
);
cJSON
*
dataLen
=
cJSON_GetObjectItem
(
tag
,
"len"
);
if
(
dataLen
&&
dataLen
->
type
==
cJSON_Number
)
{
if
(
dataLen
&&
dataLen
->
type
==
cJSON_Number
)
{
...
@@ -3412,7 +3411,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
...
@@ -3412,7 +3411,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(
for
(
int
n
=
0
;
n
<
count
;
++
n
)
{
for
(
int
n
=
0
;
n
<
count
;
++
n
)
{
tstrncpy
(
superTbls
->
tags
[
index
].
dataType
,
columnCase
.
dataType
,
tstrncpy
(
superTbls
->
tags
[
index
].
dataType
,
columnCase
.
dataType
,
MAX_TB_NAME_SIZE
);
strlen
(
columnCase
.
dataType
)
+
1
);
superTbls
->
tags
[
index
].
dataLen
=
columnCase
.
dataLen
;
superTbls
->
tags
[
index
].
dataLen
=
columnCase
.
dataLen
;
index
++
;
index
++
;
}
}
...
@@ -3635,7 +3634,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3635,7 +3634,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
printf
(
"ERROR: failed to read json, db name not found
\n
"
);
printf
(
"ERROR: failed to read json, db name not found
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
tstrncpy
(
g_Dbs
.
db
[
i
].
dbName
,
dbName
->
valuestring
,
MAX_DB_NAME_SIZE
);
tstrncpy
(
g_Dbs
.
db
[
i
].
dbName
,
dbName
->
valuestring
,
TSDB_DB_NAME_LEN
);
cJSON
*
drop
=
cJSON_GetObjectItem
(
dbinfo
,
"drop"
);
cJSON
*
drop
=
cJSON_GetObjectItem
(
dbinfo
,
"drop"
);
if
(
drop
&&
drop
->
type
==
cJSON_String
&&
drop
->
valuestring
!=
NULL
)
{
if
(
drop
&&
drop
->
type
==
cJSON_String
&&
drop
->
valuestring
!=
NULL
)
{
...
@@ -3656,10 +3655,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3656,10 +3655,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if
(
precision
&&
precision
->
type
==
cJSON_String
if
(
precision
&&
precision
->
type
==
cJSON_String
&&
precision
->
valuestring
!=
NULL
)
{
&&
precision
->
valuestring
!=
NULL
)
{
tstrncpy
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
precision
->
valuestring
,
tstrncpy
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
precision
->
valuestring
,
MAX_DB_NAME_SIZE
);
8
);
}
else
if
(
!
precision
)
{
}
else
if
(
!
precision
)
{
//tstrncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE);
memset
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
0
,
8
);
memset
(
g_Dbs
.
db
[
i
].
dbCfg
.
precision
,
0
,
MAX_DB_NAME_SIZE
);
}
else
{
}
else
{
printf
(
"ERROR: failed to read json, precision not found
\n
"
);
printf
(
"ERROR: failed to read json, precision not found
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
...
@@ -3836,7 +3834,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3836,7 +3834,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sTblName
,
stbName
->
valuestring
,
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sTblName
,
stbName
->
valuestring
,
MAX_TB_NAME_SIZE
);
TSDB_TABLE_NAME_LEN
);
cJSON
*
prefix
=
cJSON_GetObjectItem
(
stbInfo
,
"childtable_prefix"
);
cJSON
*
prefix
=
cJSON_GetObjectItem
(
stbInfo
,
"childtable_prefix"
);
if
(
!
prefix
||
prefix
->
type
!=
cJSON_String
||
prefix
->
valuestring
==
NULL
)
{
if
(
!
prefix
||
prefix
->
type
!=
cJSON_String
||
prefix
->
valuestring
==
NULL
)
{
...
@@ -3844,7 +3842,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3844,7 +3842,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto
PARSE_OVER
;
goto
PARSE_OVER
;
}
}
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblPrefix
,
prefix
->
valuestring
,
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
childTblPrefix
,
prefix
->
valuestring
,
MAX_DB_NAME_SIZE
);
TSDB_TABLE_NAME_LEN
-
20
);
cJSON
*
autoCreateTbl
=
cJSON_GetObjectItem
(
stbInfo
,
"auto_create_table"
);
cJSON
*
autoCreateTbl
=
cJSON_GetObjectItem
(
stbInfo
,
"auto_create_table"
);
if
(
autoCreateTbl
if
(
autoCreateTbl
...
@@ -3912,9 +3910,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3912,9 +3910,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if
(
dataSource
&&
dataSource
->
type
==
cJSON_String
if
(
dataSource
&&
dataSource
->
type
==
cJSON_String
&&
dataSource
->
valuestring
!=
NULL
)
{
&&
dataSource
->
valuestring
!=
NULL
)
{
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
dataSource
,
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
dataSource
,
dataSource
->
valuestring
,
MAX_DB_NAME_SIZE
);
dataSource
->
valuestring
,
TSDB_DB_NAME_LEN
);
}
else
if
(
!
dataSource
)
{
}
else
if
(
!
dataSource
)
{
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
dataSource
,
"rand"
,
MAX_DB_NAME_SIZE
);
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
dataSource
,
"rand"
,
TSDB_DB_NAME_LEN
);
}
else
{
}
else
{
errorPrint
(
"%s() LN%d, failed to read json, data_source not found
\n
"
,
errorPrint
(
"%s() LN%d, failed to read json, data_source not found
\n
"
,
__func__
,
__LINE__
);
__func__
,
__LINE__
);
...
@@ -3972,10 +3970,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3972,10 +3970,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
cJSON
*
ts
=
cJSON_GetObjectItem
(
stbInfo
,
"start_timestamp"
);
cJSON
*
ts
=
cJSON_GetObjectItem
(
stbInfo
,
"start_timestamp"
);
if
(
ts
&&
ts
->
type
==
cJSON_String
&&
ts
->
valuestring
!=
NULL
)
{
if
(
ts
&&
ts
->
type
==
cJSON_String
&&
ts
->
valuestring
!=
NULL
)
{
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
startTimestamp
,
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
startTimestamp
,
ts
->
valuestring
,
MAX_DB_NAME_SIZE
);
ts
->
valuestring
,
TSDB_DB_NAME_LEN
);
}
else
if
(
!
ts
)
{
}
else
if
(
!
ts
)
{
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
startTimestamp
,
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
startTimestamp
,
"now"
,
MAX_DB_NAME_SIZE
);
"now"
,
TSDB_DB_NAME_LEN
);
}
else
{
}
else
{
printf
(
"ERROR: failed to read json, start_timestamp not found
\n
"
);
printf
(
"ERROR: failed to read json, start_timestamp not found
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
...
@@ -3995,9 +3993,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
...
@@ -3995,9 +3993,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
if
(
sampleFormat
&&
sampleFormat
->
type
if
(
sampleFormat
&&
sampleFormat
->
type
==
cJSON_String
&&
sampleFormat
->
valuestring
!=
NULL
)
{
==
cJSON_String
&&
sampleFormat
->
valuestring
!=
NULL
)
{
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFormat
,
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFormat
,
sampleFormat
->
valuestring
,
MAX_DB_NAME_SIZE
);
sampleFormat
->
valuestring
,
TSDB_DB_NAME_LEN
);
}
else
if
(
!
sampleFormat
)
{
}
else
if
(
!
sampleFormat
)
{
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFormat
,
"csv"
,
MAX_DB_NAME_SIZE
);
tstrncpy
(
g_Dbs
.
db
[
i
].
superTbls
[
j
].
sampleFormat
,
"csv"
,
TSDB_DB_NAME_LEN
);
}
else
{
}
else
{
printf
(
"ERROR: failed to read json, sample_format not found
\n
"
);
printf
(
"ERROR: failed to read json, sample_format not found
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
...
@@ -4242,7 +4240,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
...
@@ -4242,7 +4240,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
cJSON
*
dbs
=
cJSON_GetObjectItem
(
root
,
"databases"
);
cJSON
*
dbs
=
cJSON_GetObjectItem
(
root
,
"databases"
);
if
(
dbs
&&
dbs
->
type
==
cJSON_String
&&
dbs
->
valuestring
!=
NULL
)
{
if
(
dbs
&&
dbs
->
type
==
cJSON_String
&&
dbs
->
valuestring
!=
NULL
)
{
tstrncpy
(
g_queryInfo
.
dbName
,
dbs
->
valuestring
,
MAX_DB_NAME_SIZE
);
tstrncpy
(
g_queryInfo
.
dbName
,
dbs
->
valuestring
,
TSDB_DB_NAME_LEN
);
}
else
if
(
!
dbs
)
{
}
else
if
(
!
dbs
)
{
printf
(
"ERROR: failed to read json, databases not found
\n
"
);
printf
(
"ERROR: failed to read json, databases not found
\n
"
);
goto
PARSE_OVER
;
goto
PARSE_OVER
;
...
@@ -4492,7 +4490,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
...
@@ -4492,7 +4490,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) {
if
(
stblname
&&
stblname
->
type
==
cJSON_String
if
(
stblname
&&
stblname
->
type
==
cJSON_String
&&
stblname
->
valuestring
!=
NULL
)
{
&&
stblname
->
valuestring
!=
NULL
)
{
tstrncpy
(
g_queryInfo
.
superQueryInfo
.
sTblName
,
stblname
->
valuestring
,
tstrncpy
(
g_queryInfo
.
superQueryInfo
.
sTblName
,
stblname
->
valuestring
,
MAX_TB_NAME_SIZE
);
TSDB_TABLE_NAME_LEN
);
}
else
{
}
else
{
errorPrint
(
"%s() LN%d, failed to read json, super table name input error
\n
"
,
errorPrint
(
"%s() LN%d, failed to read json, super table name input error
\n
"
,
__func__
,
__LINE__
);
__func__
,
__LINE__
);
...
@@ -6302,16 +6300,6 @@ static void startMultiThreadInsertData(int threads, char* db_name,
...
@@ -6302,16 +6300,6 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
}
}
}
// read sample data from file first
if
((
superTblInfo
)
&&
(
0
==
strncasecmp
(
superTblInfo
->
dataSource
,
"sample"
,
strlen
(
"sample"
))))
{
if
(
0
!=
prepareSampleDataForSTable
(
superTblInfo
))
{
errorPrint
(
"%s() LN%d, prepare sample data for stable failed!
\n
"
,
__func__
,
__LINE__
);
exit
(
-
1
);
}
}
TAOS
*
taos0
=
taos_connect
(
TAOS
*
taos0
=
taos_connect
(
g_Dbs
.
host
,
g_Dbs
.
user
,
g_Dbs
.
host
,
g_Dbs
.
user
,
g_Dbs
.
password
,
db_name
,
g_Dbs
.
port
);
g_Dbs
.
password
,
db_name
,
g_Dbs
.
port
);
...
@@ -6417,7 +6405,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
...
@@ -6417,7 +6405,7 @@ static void startMultiThreadInsertData(int threads, char* db_name,
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
for
(
int
i
=
0
;
i
<
threads
;
i
++
)
{
threadInfo
*
pThreadInfo
=
infos
+
i
;
threadInfo
*
pThreadInfo
=
infos
+
i
;
pThreadInfo
->
threadID
=
i
;
pThreadInfo
->
threadID
=
i
;
tstrncpy
(
pThreadInfo
->
db_name
,
db_name
,
MAX_DB_NAME_SIZE
);
tstrncpy
(
pThreadInfo
->
db_name
,
db_name
,
TSDB_DB_NAME_LEN
);
pThreadInfo
->
time_precision
=
timePrec
;
pThreadInfo
->
time_precision
=
timePrec
;
pThreadInfo
->
superTblInfo
=
superTblInfo
;
pThreadInfo
->
superTblInfo
=
superTblInfo
;
...
@@ -6861,7 +6849,7 @@ static void *specifiedTableQuery(void *sarg) {
...
@@ -6861,7 +6849,7 @@ static void *specifiedTableQuery(void *sarg) {
}
}
}
}
char
sqlStr
[
MAX_DB_NAME_SIZE
+
5
];
char
sqlStr
[
TSDB_DB_NAME_LEN
+
5
];
sprintf
(
sqlStr
,
"use %s"
,
g_queryInfo
.
dbName
);
sprintf
(
sqlStr
,
"use %s"
,
g_queryInfo
.
dbName
);
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
sqlStr
,
NO_INSERT_TYPE
,
false
))
{
if
(
0
!=
queryDbExec
(
pThreadInfo
->
taos
,
sqlStr
,
NO_INSERT_TYPE
,
false
))
{
taos_close
(
pThreadInfo
->
taos
);
taos_close
(
pThreadInfo
->
taos
);
...
@@ -7337,12 +7325,6 @@ static void *superSubscribe(void *sarg) {
...
@@ -7337,12 +7325,6 @@ static void *superSubscribe(void *sarg) {
performancePrint
(
"st: %"
PRIu64
" et: %"
PRIu64
" delta: %"
PRIu64
"
\n
"
,
st
,
et
,
(
et
-
st
));
performancePrint
(
"st: %"
PRIu64
" et: %"
PRIu64
" delta: %"
PRIu64
"
\n
"
,
st
,
et
,
(
et
-
st
));
if
(
res
)
{
if
(
res
)
{
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
fetchResult
(
res
,
pThreadInfo
);
}
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
if
(
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
][
0
]
!=
0
)
{
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
superQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
...
@@ -7449,10 +7431,10 @@ static void *specifiedSubscribe(void *sarg) {
...
@@ -7449,10 +7431,10 @@ static void *specifiedSubscribe(void *sarg) {
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
sprintf
(
pThreadInfo
->
filePath
,
"%s-%d"
,
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
g_queryInfo
.
specifiedQueryInfo
.
result
[
pThreadInfo
->
querySeq
],
pThreadInfo
->
threadID
);
pThreadInfo
->
threadID
);
}
fetchResult
(
fetchResult
(
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
],
g_queryInfo
.
specifiedQueryInfo
.
res
[
pThreadInfo
->
threadID
],
pThreadInfo
);
pThreadInfo
);
}
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
++
;
g_queryInfo
.
specifiedQueryInfo
.
consumed
[
pThreadInfo
->
threadID
]
++
;
if
((
g_queryInfo
.
specifiedQueryInfo
.
resubAfterConsume
[
pThreadInfo
->
querySeq
]
!=
-
1
)
if
((
g_queryInfo
.
specifiedQueryInfo
.
resubAfterConsume
[
pThreadInfo
->
querySeq
]
!=
-
1
)
...
@@ -7689,9 +7671,9 @@ static void setParaFromArg(){
...
@@ -7689,9 +7671,9 @@ static void setParaFromArg(){
g_Dbs
.
dbCount
=
1
;
g_Dbs
.
dbCount
=
1
;
g_Dbs
.
db
[
0
].
drop
=
true
;
g_Dbs
.
db
[
0
].
drop
=
true
;
tstrncpy
(
g_Dbs
.
db
[
0
].
dbName
,
g_args
.
database
,
MAX_DB_NAME_SIZE
);
tstrncpy
(
g_Dbs
.
db
[
0
].
dbName
,
g_args
.
database
,
TSDB_DB_NAME_LEN
);
g_Dbs
.
db
[
0
].
dbCfg
.
replica
=
g_args
.
replica
;
g_Dbs
.
db
[
0
].
dbCfg
.
replica
=
g_args
.
replica
;
tstrncpy
(
g_Dbs
.
db
[
0
].
dbCfg
.
precision
,
"ms"
,
MAX_DB_NAME_SIZE
);
tstrncpy
(
g_Dbs
.
db
[
0
].
dbCfg
.
precision
,
"ms"
,
8
);
tstrncpy
(
g_Dbs
.
resultFile
,
g_args
.
output_file
,
MAX_FILE_NAME_LEN
);
tstrncpy
(
g_Dbs
.
resultFile
,
g_args
.
output_file
,
MAX_FILE_NAME_LEN
);
...
@@ -7713,7 +7695,7 @@ static void setParaFromArg(){
...
@@ -7713,7 +7695,7 @@ static void setParaFromArg(){
if
(
g_args
.
use_metric
)
{
if
(
g_args
.
use_metric
)
{
g_Dbs
.
db
[
0
].
superTblCount
=
1
;
g_Dbs
.
db
[
0
].
superTblCount
=
1
;
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
sTblName
,
"meters"
,
MAX_TB_NAME_SIZE
);
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
sTblName
,
"meters"
,
TSDB_TABLE_NAME_LEN
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
=
g_args
.
num_of_tables
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
=
g_args
.
num_of_tables
;
g_Dbs
.
threadCount
=
g_args
.
num_of_threads
;
g_Dbs
.
threadCount
=
g_args
.
num_of_threads
;
g_Dbs
.
threadCountByCreateTbl
=
g_args
.
num_of_threads
;
g_Dbs
.
threadCountByCreateTbl
=
g_args
.
num_of_threads
;
...
@@ -7724,7 +7706,7 @@ static void setParaFromArg(){
...
@@ -7724,7 +7706,7 @@ static void setParaFromArg(){
g_Dbs
.
db
[
0
].
superTbls
[
0
].
disorderRange
=
g_args
.
disorderRange
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
disorderRange
=
g_args
.
disorderRange
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
disorderRatio
=
g_args
.
disorderRatio
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
disorderRatio
=
g_args
.
disorderRatio
;
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblPrefix
,
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblPrefix
,
g_args
.
tb_prefix
,
MAX_TB_NAME_SIZE
);
g_args
.
tb_prefix
,
TSDB_TABLE_NAME_LEN
-
20
);
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
dataSource
,
"rand"
,
MAX_TB_NAME_SIZE
);
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
dataSource
,
"rand"
,
MAX_TB_NAME_SIZE
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
iface
=
g_args
.
iface
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
iface
=
g_args
.
iface
;
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
startTimestamp
,
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
startTimestamp
,
...
@@ -7741,7 +7723,7 @@ static void setParaFromArg(){
...
@@ -7741,7 +7723,7 @@ static void setParaFromArg(){
}
}
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataType
,
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataType
,
data_type
[
i
],
MAX_TB_NAME_SIZE
);
data_type
[
i
],
strlen
(
data_type
[
i
])
+
1
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
g_args
.
len_of_binary
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
g_args
.
len_of_binary
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
++
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
++
;
}
}
...
@@ -7752,18 +7734,18 @@ static void setParaFromArg(){
...
@@ -7752,18 +7734,18 @@ static void setParaFromArg(){
for
(
int
i
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
;
for
(
int
i
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
;
i
<
g_args
.
num_of_CPR
;
i
++
)
{
i
<
g_args
.
num_of_CPR
;
i
++
)
{
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataType
,
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataType
,
"INT"
,
MAX_TB_NAME_SIZE
);
"INT"
,
strlen
(
"INT"
)
+
1
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
0
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columns
[
i
].
dataLen
=
0
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
++
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
columnCount
++
;
}
}
}
}
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
0
].
dataType
,
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
0
].
dataType
,
"INT"
,
MAX_TB_NAME_SIZE
);
"INT"
,
strlen
(
"INT"
)
+
1
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
0
].
dataLen
=
0
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
0
].
dataLen
=
0
;
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
1
].
dataType
,
tstrncpy
(
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
1
].
dataType
,
"BINARY"
,
MAX_TB_NAME_SIZE
);
"BINARY"
,
strlen
(
"BINARY"
)
+
1
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
1
].
dataLen
=
g_args
.
len_of_binary
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tags
[
1
].
dataLen
=
g_args
.
len_of_binary
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tagCount
=
2
;
g_Dbs
.
db
[
0
].
superTbls
[
0
].
tagCount
=
2
;
}
else
{
}
else
{
...
@@ -7899,11 +7881,11 @@ static void queryResult() {
...
@@ -7899,11 +7881,11 @@ static void queryResult() {
pThreadInfo
->
end_table_to
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
-
1
;
pThreadInfo
->
end_table_to
=
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblCount
-
1
;
pThreadInfo
->
superTblInfo
=
&
g_Dbs
.
db
[
0
].
superTbls
[
0
];
pThreadInfo
->
superTblInfo
=
&
g_Dbs
.
db
[
0
].
superTbls
[
0
];
tstrncpy
(
pThreadInfo
->
tb_prefix
,
tstrncpy
(
pThreadInfo
->
tb_prefix
,
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblPrefix
,
MAX_TB_NAME_SIZE
);
g_Dbs
.
db
[
0
].
superTbls
[
0
].
childTblPrefix
,
TSDB_TABLE_NAME_LEN
-
20
);
}
else
{
}
else
{
pThreadInfo
->
ntables
=
g_args
.
num_of_tables
;
pThreadInfo
->
ntables
=
g_args
.
num_of_tables
;
pThreadInfo
->
end_table_to
=
g_args
.
num_of_tables
-
1
;
pThreadInfo
->
end_table_to
=
g_args
.
num_of_tables
-
1
;
tstrncpy
(
pThreadInfo
->
tb_prefix
,
g_args
.
tb_prefix
,
MAX_TB_NAME_SIZE
);
tstrncpy
(
pThreadInfo
->
tb_prefix
,
g_args
.
tb_prefix
,
TSDB_TABLE_NAME_LEN
);
}
}
pThreadInfo
->
taos
=
taos_connect
(
pThreadInfo
->
taos
=
taos_connect
(
...
...
tests/pytest/fulltest.sh
浏览文件 @
529f906e
...
@@ -235,7 +235,7 @@ python3 ./test.py -f query/queryTscomputWithNow.py
...
@@ -235,7 +235,7 @@ python3 ./test.py -f query/queryTscomputWithNow.py
python3 ./test.py
-f
query/computeErrorinWhere.py
python3 ./test.py
-f
query/computeErrorinWhere.py
python3 ./test.py
-f
query/queryTsisNull.py
python3 ./test.py
-f
query/queryTsisNull.py
python3 ./test.py
-f
query/subqueryFilter.py
python3 ./test.py
-f
query/subqueryFilter.py
python3 ./test.py
-f
query/nestedQuery/queryInterval.py
#
python3 ./test.py -f query/nestedQuery/queryInterval.py
python3 ./test.py
-f
query/queryStateWindow.py
python3 ./test.py
-f
query/queryStateWindow.py
...
...
tests/pytest/insert/in_function.py
浏览文件 @
529f906e
...
@@ -18,7 +18,6 @@ from util.log import *
...
@@ -18,7 +18,6 @@ from util.log import *
from
util.cases
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.sql
import
*
class
TDTestCase
:
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
...
@@ -27,6 +26,7 @@ class TDTestCase:
...
@@ -27,6 +26,7 @@ class TDTestCase:
def
run
(
self
):
def
run
(
self
):
tdSql
.
prepare
()
tdSql
.
prepare
()
# test case for https://jira.taosdata.com:18080/browse/TD-4568
# test case for https://jira.taosdata.com:18080/browse/TD-4568
# test case for https://jira.taosdata.com:18080/browse/TD-4824
tdLog
.
info
(
"=============== step1,check bool and tinyint data type"
)
tdLog
.
info
(
"=============== step1,check bool and tinyint data type"
)
...
@@ -137,8 +137,28 @@ class TDTestCase:
...
@@ -137,8 +137,28 @@ class TDTestCase:
tdSql
.
checkData
(
0
,
1
,
'True'
)
tdSql
.
checkData
(
0
,
1
,
'True'
)
tdSql
.
checkData
(
0
,
2
,
'0'
)
tdSql
.
checkData
(
0
,
2
,
'0'
)
tdLog
.
info
(
"=============== step1.3,multiple column and multiple tag check in function"
)
cmd1
=
'''select * from in_stable_1
where in_bool in (true,false) and in_tinyint in (0,127,-127)
and tin_bool in (true,false) and tin_tinyint in (0,127,-127)
order by ts desc ;'''
tdLog
.
info
(
cmd1
)
tdSql
.
query
(
cmd1
)
tdSql
.
checkData
(
0
,
1
,
'True'
)
tdSql
.
checkData
(
0
,
2
,
'0'
)
tdSql
.
checkData
(
0
,
3
,
'False'
)
tdSql
.
checkData
(
0
,
4
,
'0'
)
tdSql
.
checkData
(
1
,
1
,
'False'
)
tdSql
.
checkData
(
1
,
2
,
'127'
)
tdSql
.
checkData
(
1
,
3
,
'False'
)
tdSql
.
checkData
(
1
,
4
,
'-127'
)
tdSql
.
checkData
(
2
,
1
,
'True'
)
tdSql
.
checkData
(
2
,
2
,
'-127'
)
tdSql
.
checkData
(
2
,
3
,
'True'
)
tdSql
.
checkData
(
2
,
4
,
'127'
)
tdLog
.
info
(
"=============== step1.
3
,drop normal table && create table"
)
tdLog
.
info
(
"=============== step1.
4
,drop normal table && create table"
)
cmd1
=
'drop table if exists normal_in_bool_tinyint_1 ;'
cmd1
=
'drop table if exists normal_in_bool_tinyint_1 ;'
cmd2
=
'create table normal_in_bool_tinyint_1 (ts timestamp,in_bool bool,in_tinyint tinyint) ; '
cmd2
=
'create table normal_in_bool_tinyint_1 (ts timestamp,in_bool bool,in_tinyint tinyint) ; '
tdLog
.
info
(
cmd1
)
tdLog
.
info
(
cmd1
)
...
@@ -147,7 +167,7 @@ class TDTestCase:
...
@@ -147,7 +167,7 @@ class TDTestCase:
tdSql
.
execute
(
cmd2
)
tdSql
.
execute
(
cmd2
)
tdLog
.
info
(
"=============== step1.
4
,insert normal table right data and check in function"
)
tdLog
.
info
(
"=============== step1.
5
,insert normal table right data and check in function"
)
cmd1
=
'insert into normal_in_bool_tinyint_1 values(now,
\'
true
\'
,
\'
-127
\'
) ;'
cmd1
=
'insert into normal_in_bool_tinyint_1 values(now,
\'
true
\'
,
\'
-127
\'
) ;'
tdLog
.
info
(
cmd1
)
tdLog
.
info
(
cmd1
)
tdSql
.
execute
(
cmd1
)
tdSql
.
execute
(
cmd1
)
...
@@ -175,6 +195,17 @@ class TDTestCase:
...
@@ -175,6 +195,17 @@ class TDTestCase:
tdSql
.
checkData
(
0
,
1
,
'True'
)
tdSql
.
checkData
(
0
,
1
,
'True'
)
tdSql
.
checkData
(
0
,
2
,
'0'
)
tdSql
.
checkData
(
0
,
2
,
'0'
)
cmd4
=
'''select * from normal_in_bool_tinyint_1
where in_bool in (true,false) and in_tinyint in (0,127,-127)
order by ts desc ;'''
tdLog
.
info
(
cmd4
)
tdSql
.
query
(
cmd4
)
tdSql
.
checkData
(
0
,
1
,
'True'
)
tdSql
.
checkData
(
0
,
2
,
'0'
)
tdSql
.
checkData
(
1
,
1
,
'False'
)
tdSql
.
checkData
(
1
,
2
,
'127'
)
tdSql
.
checkData
(
2
,
1
,
'True'
)
tdSql
.
checkData
(
2
,
2
,
'-127'
)
tdLog
.
info
(
"=============== step2,check int、smallint and bigint data type"
)
tdLog
.
info
(
"=============== step2,check int、smallint and bigint data type"
)
...
@@ -381,7 +412,36 @@ class TDTestCase:
...
@@ -381,7 +412,36 @@ class TDTestCase:
tdSql
.
checkData
(
0
,
3
,
'-9223372036854775807'
)
tdSql
.
checkData
(
0
,
3
,
'-9223372036854775807'
)
tdLog
.
info
(
"=============== step2.3,drop normal table && create table"
)
tdLog
.
info
(
"=============== step2.3,multiple column and multiple tag check in function"
)
cmd1
=
'''select * from in_stable_2
where in_int in (0,2147483647,-2147483647) and in_small in (0,32767,-32767)
and in_big in (0,9223372036854775807,-9223372036854775807)
and tin_int in (0,2147483647,-2147483647) and tin_small in (0,32767,-32767)
and tin_big in (0,9223372036854775807,-9223372036854775807)
order by ts desc ;'''
tdLog
.
info
(
cmd1
)
tdSql
.
query
(
cmd1
)
tdSql
.
checkData
(
0
,
1
,
'0'
)
tdSql
.
checkData
(
0
,
2
,
'32767'
)
tdSql
.
checkData
(
0
,
3
,
'-9223372036854775807'
)
tdSql
.
checkData
(
0
,
4
,
'0'
)
tdSql
.
checkData
(
0
,
5
,
'32767'
)
tdSql
.
checkData
(
0
,
6
,
'-9223372036854775807'
)
tdSql
.
checkData
(
1
,
1
,
'-2147483647'
)
tdSql
.
checkData
(
1
,
2
,
'0'
)
tdSql
.
checkData
(
1
,
3
,
'9223372036854775807'
)
tdSql
.
checkData
(
1
,
4
,
'-2147483647'
)
tdSql
.
checkData
(
1
,
5
,
'0'
)
tdSql
.
checkData
(
1
,
6
,
'9223372036854775807'
)
tdSql
.
checkData
(
2
,
1
,
'2147483647'
)
tdSql
.
checkData
(
2
,
2
,
'-32767'
)
tdSql
.
checkData
(
2
,
3
,
'0'
)
tdSql
.
checkData
(
2
,
4
,
'2147483647'
)
tdSql
.
checkData
(
2
,
5
,
'-32767'
)
tdSql
.
checkData
(
2
,
6
,
'0'
)
tdLog
.
info
(
"=============== step2.4,drop normal table && create table"
)
cmd1
=
'drop table if exists normal_int_smallint_bigint_1 ;'
cmd1
=
'drop table if exists normal_int_smallint_bigint_1 ;'
cmd2
=
'create table normal_int_smallint_bigint_1 (ts timestamp,in_int int,in_small smallint , in_big bigint) ; '
cmd2
=
'create table normal_int_smallint_bigint_1 (ts timestamp,in_int int,in_small smallint , in_big bigint) ; '
tdLog
.
info
(
cmd1
)
tdLog
.
info
(
cmd1
)
...
@@ -390,7 +450,7 @@ class TDTestCase:
...
@@ -390,7 +450,7 @@ class TDTestCase:
tdSql
.
execute
(
cmd2
)
tdSql
.
execute
(
cmd2
)
tdLog
.
info
(
"=============== step2.
4
,insert normal table right data and check in function"
)
tdLog
.
info
(
"=============== step2.
5
,insert normal table right data and check in function"
)
cmd1
=
'insert into normal_int_smallint_bigint_1 values(now,
\'
2147483647
\'
,
\'
-32767
\'
,
\'
0
\'
) ;'
cmd1
=
'insert into normal_int_smallint_bigint_1 values(now,
\'
2147483647
\'
,
\'
-32767
\'
,
\'
0
\'
) ;'
tdLog
.
info
(
cmd1
)
tdLog
.
info
(
cmd1
)
tdSql
.
execute
(
cmd1
)
tdSql
.
execute
(
cmd1
)
...
@@ -439,6 +499,22 @@ class TDTestCase:
...
@@ -439,6 +499,22 @@ class TDTestCase:
tdSql
.
checkData
(
0
,
2
,
'32767'
)
tdSql
.
checkData
(
0
,
2
,
'32767'
)
tdSql
.
checkData
(
0
,
3
,
'-9223372036854775807'
)
tdSql
.
checkData
(
0
,
3
,
'-9223372036854775807'
)
cmd4
=
'''select * from normal_int_smallint_bigint_1
where in_int in (0,2147483647,-2147483647) and in_small in (0,32767,-32767)
and in_big in (0,9223372036854775807,-9223372036854775807)
order by ts desc ;'''
tdLog
.
info
(
cmd4
)
tdSql
.
query
(
cmd4
)
tdSql
.
checkData
(
0
,
1
,
'0'
)
tdSql
.
checkData
(
0
,
2
,
'32767'
)
tdSql
.
checkData
(
0
,
3
,
'-9223372036854775807'
)
tdSql
.
checkData
(
1
,
1
,
'-2147483647'
)
tdSql
.
checkData
(
1
,
2
,
'0'
)
tdSql
.
checkData
(
1
,
3
,
'9223372036854775807'
)
tdSql
.
checkData
(
2
,
1
,
'2147483647'
)
tdSql
.
checkData
(
2
,
2
,
'-32767'
)
tdSql
.
checkData
(
2
,
3
,
'0'
)
tdLog
.
info
(
"=============== step3,check binary and nchar data type"
)
tdLog
.
info
(
"=============== step3,check binary and nchar data type"
)
...
@@ -560,7 +636,30 @@ class TDTestCase:
...
@@ -560,7 +636,30 @@ class TDTestCase:
tdSql
.
checkData
(
0
,
2
,
'北京涛思数据科技有限公司'
)
tdSql
.
checkData
(
0
,
2
,
'北京涛思数据科技有限公司'
)
tdLog
.
info
(
"=============== step3.3,drop normal table && create table"
)
tdLog
.
info
(
"=============== step3.3,multiple column and multiple tag check in function"
)
cmd1
=
'''select * from in_stable_3
where in_binary in (
\'
0
\'
,
\'
TDengine
\'
,
\'
TAOS
\'
)
and in_nchar in (
\'
0
\'
,
\'
北京涛思数据科技有限公司
\'
,
\'
涛思数据TAOSdata
\'
)
and tin_binary in (
\'
0
\'
,
\'
TDengine
\'
,
\'
taosdataTDengine
\'
)
and tin_nchar in (
\'
0
\'
,
\'
北京涛思数据科技有限公司
\'
,
\'
北京涛思数据科技有限公司TDengine
\'
)
order by ts desc ;'''
tdLog
.
info
(
cmd1
)
tdSql
.
query
(
cmd1
)
tdSql
.
checkData
(
0
,
1
,
'TDengine'
)
tdSql
.
checkData
(
0
,
2
,
'北京涛思数据科技有限公司'
)
tdSql
.
checkData
(
0
,
3
,
'taosdataTDengine'
)
tdSql
.
checkData
(
0
,
4
,
'北京涛思数据科技有限公司TDengine'
)
tdSql
.
checkData
(
1
,
1
,
'TAOS'
)
tdSql
.
checkData
(
1
,
2
,
'涛思数据TAOSdata'
)
tdSql
.
checkData
(
1
,
3
,
'TDengine'
)
tdSql
.
checkData
(
1
,
4
,
'北京涛思数据科技有限公司'
)
tdSql
.
checkData
(
2
,
1
,
'0'
)
tdSql
.
checkData
(
2
,
2
,
'0'
)
tdSql
.
checkData
(
2
,
3
,
'0'
)
tdSql
.
checkData
(
2
,
4
,
'0'
)
tdLog
.
info
(
"=============== step3.4,drop normal table && create table"
)
cmd1
=
'drop table if exists normal_in_binary_nchar_1 ;'
cmd1
=
'drop table if exists normal_in_binary_nchar_1 ;'
cmd2
=
'create table normal_in_binary_nchar_1 (ts timestamp,in_binary binary(8),in_nchar nchar(12)) ; '
cmd2
=
'create table normal_in_binary_nchar_1 (ts timestamp,in_binary binary(8),in_nchar nchar(12)) ; '
tdLog
.
info
(
cmd1
)
tdLog
.
info
(
cmd1
)
...
@@ -569,7 +668,7 @@ class TDTestCase:
...
@@ -569,7 +668,7 @@ class TDTestCase:
tdSql
.
execute
(
cmd2
)
tdSql
.
execute
(
cmd2
)
tdLog
.
info
(
"=============== step3.
4
,insert normal table right data and check in function"
)
tdLog
.
info
(
"=============== step3.
5
,insert normal table right data and check in function"
)
cmd1
=
'insert into normal_in_binary_nchar_1 values(now,
\'
0
\'
,
\'
0
\'
) ;'
cmd1
=
'insert into normal_in_binary_nchar_1 values(now,
\'
0
\'
,
\'
0
\'
) ;'
tdLog
.
info
(
cmd1
)
tdLog
.
info
(
cmd1
)
tdSql
.
execute
(
cmd1
)
tdSql
.
execute
(
cmd1
)
...
@@ -600,122 +699,411 @@ class TDTestCase:
...
@@ -600,122 +699,411 @@ class TDTestCase:
tdSql
.
checkData
(
0
,
1
,
'TDengine'
)
tdSql
.
checkData
(
0
,
1
,
'TDengine'
)
tdSql
.
checkData
(
0
,
2
,
'北京涛思数据科技有限公司'
)
tdSql
.
checkData
(
0
,
2
,
'北京涛思数据科技有限公司'
)
tdLog
.
info
(
"=============== step4,check float and double data type,not support"
)
cmd4
=
'''select * from normal_in_binary_nchar_1
where in_binary in (
\'
0
\'
,
\'
TDengine
\'
,
\'
TAOS
\'
)
and in_nchar in (
\'
0
\'
,
\'
北京涛思数据科技有限公司
\'
,
\'
涛思数据TAOSdata
\'
)
order by ts desc ;'''
tdLog
.
info
(
cmd4
)
tdSql
.
query
(
cmd4
)
tdSql
.
checkData
(
0
,
1
,
'TDengine'
)
tdSql
.
checkData
(
0
,
2
,
'北京涛思数据科技有限公司'
)
tdSql
.
checkData
(
1
,
1
,
'TAOS'
)
tdSql
.
checkData
(
1
,
2
,
'涛思数据TAOSdata'
)
tdSql
.
checkData
(
2
,
1
,
'0'
)
tdSql
.
checkData
(
2
,
2
,
'0'
)
tdLog
.
info
(
"=============== step4,check float and double data type"
)
tdLog
.
info
(
"=============== step4.1,drop table && create table"
)
tdLog
.
info
(
"=============== step4.1,drop table && create table"
)
cmd1
=
'drop table if exists in_float_double_1 ;'
cmd1
=
'drop table if exists in_ts_float_double_1 ;'
cmd2
=
'drop table if exists in_ts_float_double_2 ;'
cmd3
=
'drop table if exists in_ts_float_double_3 ;'
cmd10
=
'drop table if exists in_stable_4 ;'
cmd10
=
'drop table if exists in_stable_4 ;'
cmd11
=
'create stable in_stable_4(ts timestamp,in_float float,in_double double) tags (tin_float float,tin_double double) ;'
cmd11
=
'create stable in_stable_4(ts timestamp,in_ts timestamp,in_float float,in_double double) tags (tin_ts timestamp,tin_float float,tin_double double) ;'
cmd12
=
'create table in_float_double_1 using in_stable_4 tags(
\'
666
\'
,
\'
88888
\'
) ; '
cmd12
=
'create table in_ts_float_double_1 using in_stable_4 tags(
\'
0
\'
,
\'
0
\'
,
\'
0
\'
) ; '
cmd13
=
'create table in_ts_float_double_2 using in_stable_4 tags(
\'
2020-01-01 08:00:00.001
\'
,
\'
666
\'
,
\'
-88888
\'
) ; '
cmd14
=
'create table in_ts_float_double_3 using in_stable_4 tags(
\'
2021-01-01 08:00:00.001
\'
,
\'
-888.00000
\'
,
\'
66666.000000000
\'
) ; '
tdLog
.
info
(
cmd1
)
tdLog
.
info
(
cmd1
)
tdSql
.
execute
(
cmd1
)
tdSql
.
execute
(
cmd1
)
tdLog
.
info
(
cmd2
)
tdSql
.
execute
(
cmd2
)
tdLog
.
info
(
cmd3
)
tdSql
.
execute
(
cmd3
)
tdLog
.
info
(
cmd10
)
tdLog
.
info
(
cmd10
)
tdSql
.
execute
(
cmd10
)
tdSql
.
execute
(
cmd10
)
tdLog
.
info
(
cmd11
)
tdLog
.
info
(
cmd11
)
tdSql
.
execute
(
cmd11
)
tdSql
.
execute
(
cmd11
)
tdLog
.
info
(
cmd12
)
tdLog
.
info
(
cmd12
)
tdSql
.
execute
(
cmd12
)
tdSql
.
execute
(
cmd12
)
tdLog
.
info
(
cmd13
)
tdSql
.
execute
(
cmd13
)
tdLog
.
info
(
cmd14
)
tdSql
.
execute
(
cmd14
)
tdLog
.
info
(
"=============== step4.2,insert stable right data and check in function"
)
tdLog
.
info
(
"=============== step4.2,insert stable right data and check in function"
)
cmd1
=
'insert into in_
float_double_1 values(now,
\'
888
\'
,
\'
66666
\'
) ;'
cmd1
=
'insert into in_
ts_float_double_1 values(now,
\'
0
\'
,
\'
0
\'
,
\'
0
\'
) ;'
tdLog
.
info
(
cmd1
)
tdLog
.
info
(
cmd1
)
tdSql
.
execute
(
cmd1
)
tdSql
.
execute
(
cmd1
)
cmd2
=
'select * from in_stable_4 where in_float in (
\'
888
\'
);'
tdSql
.
query
(
'select * from in_stable_4 where in_ts in (
\'
0
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
tdSql
.
checkData
(
0
,
4
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
5
,
0.00000
)
tdSql
.
checkData
(
0
,
6
,
0.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where in_ts in (
\'
1970-01-01 08:00:00.000
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
tdSql
.
checkData
(
0
,
4
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
5
,
0.00000
)
tdSql
.
checkData
(
0
,
6
,
0.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where in_float in (0.00000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
tdSql
.
checkData
(
0
,
4
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
5
,
0.00000
)
tdSql
.
checkData
(
0
,
6
,
0.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where in_double in (0.000000000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
tdSql
.
checkData
(
0
,
4
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
5
,
0.00000
)
tdSql
.
checkData
(
0
,
6
,
0.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where tin_ts in (
\'
0
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
tdSql
.
checkData
(
0
,
4
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
5
,
0.00000
)
tdSql
.
checkData
(
0
,
6
,
0.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where tin_ts in (
\'
1970-01-01 08:00:00.000
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
tdSql
.
checkData
(
0
,
4
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
5
,
0.00000
)
tdSql
.
checkData
(
0
,
6
,
0.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where tin_float in (0.00000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
tdSql
.
checkData
(
0
,
4
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
5
,
0.00000
)
tdSql
.
checkData
(
0
,
6
,
0.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where tin_double in (0.000000000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
tdSql
.
checkData
(
0
,
4
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
5
,
0.00000
)
tdSql
.
checkData
(
0
,
6
,
0.000000000
)
tdSql
.
query
(
'select * from in_ts_float_double_1 where in_ts in (
\'
0
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
tdSql
.
query
(
'select * from in_ts_float_double_1 where in_ts in (
\'
1970-01-01 08:00:00.000
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
tdSql
.
query
(
'select * from in_ts_float_double_1 where in_float in (0.00000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
tdSql
.
query
(
'select * from in_ts_float_double_1 where in_double in (0.000000000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
cmd2
=
'insert into in_ts_float_double_2 values(now,
\'
2020-01-01 08:00:00.001
\'
,
\'
666
\'
,
\'
-88888
\'
) ;'
tdLog
.
info
(
cmd2
)
tdLog
.
info
(
cmd2
)
tdSql
.
error
(
cmd2
)
try
:
tdSql
.
execute
(
cmd2
)
tdSql
.
execute
(
cmd2
)
tdLog
.
exit
(
"invalid operation: not supported filter condition"
)
except
Exception
as
e
:
tdLog
.
info
(
repr
(
e
))
tdLog
.
info
(
"invalid operation: not supported filter condition"
)
cmd3
=
'select * from in_stable_4 where in_double in (
\'
66666
\'
);'
tdSql
.
query
(
'select * from in_stable_4 where in_ts in (
\'
2020-01-01 08:00:00.001
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001000'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
checkData
(
0
,
4
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
5
,
666.00000
)
tdSql
.
checkData
(
0
,
6
,
-
88888.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where in_ts in (
\'
1577836800001
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001000'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
checkData
(
0
,
4
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
5
,
666.00000
)
tdSql
.
checkData
(
0
,
6
,
-
88888.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where in_float in (666.00000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001000'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
checkData
(
0
,
4
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
5
,
666.00000
)
tdSql
.
checkData
(
0
,
6
,
-
88888.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where in_double in (-88888.000000000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001000'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
checkData
(
0
,
4
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
5
,
666.00000
)
tdSql
.
checkData
(
0
,
6
,
-
88888.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where tin_ts in (
\'
2020-01-01 08:00:00.001000
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001000'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
checkData
(
0
,
4
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
5
,
666.00000
)
tdSql
.
checkData
(
0
,
6
,
-
88888.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where tin_ts in (
\'
1577836800001
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001000'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
checkData
(
0
,
4
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
5
,
666.00000
)
tdSql
.
checkData
(
0
,
6
,
-
88888.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where tin_float in (666.00000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001000'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
checkData
(
0
,
4
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
5
,
666.00000
)
tdSql
.
checkData
(
0
,
6
,
-
88888.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where tin_double in (-88888.000000000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001000'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
checkData
(
0
,
4
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
5
,
666.00000
)
tdSql
.
checkData
(
0
,
6
,
-
88888.000000000
)
tdSql
.
query
(
'select * from in_ts_float_double_2 where in_ts in (
\'
1577836800001
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
query
(
'select * from in_ts_float_double_2 where in_ts in (
\'
2020-01-01 08:00:00.001
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
query
(
'select * from in_ts_float_double_2 where in_float in (666.00000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
query
(
'select * from in_ts_float_double_2 where in_double in (-88888.000000000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
cmd3
=
'insert into in_ts_float_double_3 values(now,
\'
2021-01-01 08:00:00.001
\'
,
\'
-888.00000
\'
,
\'
66666.000000000
\'
) ;'
tdLog
.
info
(
cmd3
)
tdLog
.
info
(
cmd3
)
tdSql
.
error
(
cmd3
)
try
:
tdSql
.
execute
(
cmd3
)
tdSql
.
execute
(
cmd3
)
tdLog
.
exit
(
"invalid operation: not supported filter condition"
)
except
Exception
as
e
:
tdLog
.
info
(
repr
(
e
))
tdLog
.
info
(
"invalid operation: not supported filter condition"
)
cmd4
=
'select * from in_stable_4 where tin_float in (
\'
666
\'
);'
tdSql
.
query
(
'select * from in_stable_4 where in_ts in (
\'
2021-01-01 08:00:00.001
\'
) order by ts desc'
)
tdLog
.
info
(
cmd4
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001000'
)
tdSql
.
error
(
cmd4
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
try
:
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdSql
.
execute
(
cmd4
)
tdSql
.
checkData
(
0
,
4
,
'2021-01-01 08:00:00.001'
)
tdLog
.
exit
(
"invalid operation: not supported filter condition"
)
tdSql
.
checkData
(
0
,
5
,
-
888.00000
)
except
Exception
as
e
:
tdSql
.
checkData
(
0
,
6
,
66666.000000000
)
tdLog
.
info
(
repr
(
e
))
tdSql
.
query
(
'select * from in_stable_4 where in_ts in (
\'
1609459200001
\'
) order by ts desc'
)
tdLog
.
info
(
"invalid operation: not supported filter condition"
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001000'
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
cmd5
=
'select * from in_stable_4 where tin_double in (
\'
88888
\'
);'
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdLog
.
info
(
cmd5
)
tdSql
.
checkData
(
0
,
4
,
'2021-01-01 08:00:00.001'
)
tdSql
.
error
(
cmd5
)
tdSql
.
checkData
(
0
,
5
,
-
888.00000
)
try
:
tdSql
.
checkData
(
0
,
6
,
66666.000000000
)
tdSql
.
execute
(
cmd5
)
tdSql
.
query
(
'select * from in_stable_4 where in_float in (-888.00000) order by ts desc'
)
tdLog
.
exit
(
"invalid operation: not supported filter condition"
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001000'
)
except
Exception
as
e
:
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdLog
.
info
(
repr
(
e
))
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdLog
.
info
(
"invalid operation: not supported filter condition"
)
tdSql
.
checkData
(
0
,
4
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
5
,
-
888.00000
)
cmd6
=
'select * from in_float_double_1 where in_float in (
\'
888
\'
);'
tdSql
.
checkData
(
0
,
6
,
66666.000000000
)
tdLog
.
info
(
cmd6
)
tdSql
.
query
(
'select * from in_stable_4 where in_double in (66666.000000000) order by ts desc'
)
tdSql
.
error
(
cmd6
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001000'
)
try
:
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
execute
(
cmd6
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdLog
.
exit
(
"invalid operation: not supported filter condition"
)
tdSql
.
checkData
(
0
,
4
,
'2021-01-01 08:00:00.001'
)
except
Exception
as
e
:
tdSql
.
checkData
(
0
,
5
,
-
888.00000
)
tdLog
.
info
(
repr
(
e
))
tdSql
.
checkData
(
0
,
6
,
66666.000000000
)
tdLog
.
info
(
"invalid operation: not supported filter condition"
)
tdSql
.
query
(
'select * from in_stable_4 where tin_ts in (
\'
2021-01-01 08:00:00.001000
\'
) order by ts desc'
)
cmd7
=
'select * from in_float_double_1 where in_double in (
\'
66666
\'
);'
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001000'
)
tdLog
.
info
(
cmd7
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
error
(
cmd7
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
try
:
tdSql
.
checkData
(
0
,
4
,
'2021-01-01 08:00:00.001'
)
tdSql
.
execute
(
cmd7
)
tdSql
.
checkData
(
0
,
5
,
-
888.00000
)
tdLog
.
exit
(
"invalid operation: not supported filter condition"
)
tdSql
.
checkData
(
0
,
6
,
66666.000000000
)
except
Exception
as
e
:
tdSql
.
query
(
'select * from in_stable_4 where tin_ts in (
\'
1609459200001
\'
) order by ts desc'
)
tdLog
.
info
(
repr
(
e
))
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001000'
)
tdLog
.
info
(
"invalid operation: not supported filter condition"
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdSql
.
checkData
(
0
,
4
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
5
,
-
888.00000
)
tdLog
.
info
(
"=============== step4.3,drop normal table && create table"
)
tdSql
.
checkData
(
0
,
6
,
66666.000000000
)
cmd1
=
'drop table if exists normal_in_float_double_1 ;'
tdSql
.
query
(
'select * from in_stable_4 where tin_float in (-888.00000) order by ts desc'
)
cmd2
=
'create table normal_in_float_double_1 (ts timestamp,in_float float,in_double double) ; '
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001000'
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdSql
.
checkData
(
0
,
4
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
5
,
-
888.00000
)
tdSql
.
checkData
(
0
,
6
,
66666.000000000
)
tdSql
.
query
(
'select * from in_stable_4 where tin_double in (66666.000000000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001000'
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdSql
.
checkData
(
0
,
4
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
5
,
-
888.00000
)
tdSql
.
checkData
(
0
,
6
,
66666.000000000
)
tdSql
.
query
(
'select * from in_ts_float_double_3 where in_ts in (
\'
1609459200001
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdSql
.
query
(
'select * from in_ts_float_double_3 where in_ts in (
\'
2021-01-01 08:00:00.001
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdSql
.
query
(
'select * from in_ts_float_double_3 where in_float in (-888.00000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdSql
.
query
(
'select * from in_ts_float_double_3 where in_double in (66666.000000000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdLog
.
info
(
"=============== step4.3,multiple column and multiple tag check in function"
)
cmd1
=
'''select * from in_stable_4
where in_ts in (
\'
1609459200001
\'
,
\'
2021-01-01 08:00:00.001
\'
,
\'
1577836800001
\'
,
\'
2020-01-01 08:00:00.001000
\'
,
\'
0
\'
,
\'
1970-01-01 08:00:00.000
\'
)
and in_float in (0.00000,666.00000,-888.00000)
and in_double in (0.000000000,66666.000000000,-88888.000000000)
and tin_ts in (
\'
1609459200001
\'
,
\'
2021-01-01 08:00:00.001
\'
,
\'
1577836800001
\'
,
\'
2020-01-01 08:00:00.001000
\'
,
\'
0
\'
,
\'
1970-01-01 08:00:00.000
\'
)
and tin_float in (0.00000,666.00000,-888.00000)
and tin_double in (0.000000000,66666.000000000,-88888.000000000)
order by ts desc ;'''
tdLog
.
info
(
cmd1
)
tdSql
.
query
(
cmd1
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001000'
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdSql
.
checkData
(
0
,
4
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
5
,
-
888.00000
)
tdSql
.
checkData
(
0
,
6
,
66666.000000000
)
tdSql
.
checkData
(
1
,
1
,
'2020-01-01 08:00:00.001000'
)
tdSql
.
checkData
(
1
,
2
,
666.00000
)
tdSql
.
checkData
(
1
,
3
,
-
88888.000000000
)
tdSql
.
checkData
(
1
,
4
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
1
,
5
,
666.00000
)
tdSql
.
checkData
(
1
,
6
,
-
88888.000000000
)
tdSql
.
checkData
(
2
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
2
,
2
,
0.00000
)
tdSql
.
checkData
(
2
,
3
,
0.000000000
)
tdSql
.
checkData
(
2
,
4
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
2
,
5
,
0.00000
)
tdSql
.
checkData
(
2
,
6
,
0.000000000
)
tdLog
.
info
(
"=============== step4.4,drop normal table && create table"
)
cmd1
=
'drop table if exists normal_in_ts_float_double_1 ;'
cmd2
=
'create table normal_in_ts_float_double_1 (ts timestamp,in_ts timestamp,in_float float,in_double double) ; '
tdLog
.
info
(
cmd1
)
tdLog
.
info
(
cmd1
)
tdSql
.
execute
(
cmd1
)
tdSql
.
execute
(
cmd1
)
tdLog
.
info
(
cmd2
)
tdLog
.
info
(
cmd2
)
tdSql
.
execute
(
cmd2
)
tdSql
.
execute
(
cmd2
)
tdLog
.
info
(
"=============== step4.
4
,insert normal table right data and check in function"
)
tdLog
.
info
(
"=============== step4.
5
,insert normal table right data and check in function"
)
cmd1
=
'insert into normal_in_
float_double_1 values(now,
\'
888
\'
,
\'
666666
\'
) ;'
cmd1
=
'insert into normal_in_
ts_float_double_1 values(now,
\'
0
\'
,
\'
0
\'
,
\'
0
\'
) ;'
tdLog
.
info
(
cmd1
)
tdLog
.
info
(
cmd1
)
tdSql
.
execute
(
cmd1
)
tdSql
.
execute
(
cmd1
)
cmd2
=
'select * from normal_in_float_double_1 where in_float in (
\'
888
\'
);'
tdSql
.
query
(
'select * from normal_in_ts_float_double_1 where in_ts in (
\'
0
\'
) order by ts desc'
)
#tdLog.info(cmd2)
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
#tdSql.error(cmd2)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
#try:
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
# tdSql.execute(cmd2)
tdSql
.
query
(
'select * from normal_in_ts_float_double_1 where in_ts in (
\'
1970-01-01 08:00:00.000
\'
) order by ts desc'
)
# tdLog.exit("invalid operation: not supported filter condition")
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
#except Exception as e:
tdSql
.
checkData
(
0
,
2
,
0.00000
)
# tdLog.info(repr(e))
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
# tdLog.info("invalid operation: not supported filter condition")
tdSql
.
query
(
'select * from normal_in_ts_float_double_1 where in_float in (0.00000) order by ts desc'
)
#
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
#cmd3 = 'select * from normal_in_float_double_1 where in_double in (\'66666\');'
tdSql
.
checkData
(
0
,
2
,
0.00000
)
#tdLog.info(cmd3)
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
#tdSql.error(cmd3)
tdSql
.
query
(
'select * from normal_in_ts_float_double_1 where in_double in (0.000000000) order by ts desc'
)
#try:
tdSql
.
checkData
(
0
,
1
,
'1970-01-01 08:00:00.000'
)
# tdSql.execute(cmd3)
tdSql
.
checkData
(
0
,
2
,
0.00000
)
# tdLog.exit("invalid operation: not supported filter condition")
tdSql
.
checkData
(
0
,
3
,
0.000000000
)
#except Exception as e:
# tdLog.info(repr(e))
cmd2
=
'insert into normal_in_ts_float_double_1 values(now,
\'
2020-01-01 08:00:00.001
\'
,
\'
666
\'
,
\'
-88888
\'
) ;'
# tdLog.info("invalid operation: not supported filter condition")
tdLog
.
info
(
cmd2
)
tdSql
.
execute
(
cmd2
)
tdSql
.
query
(
'select * from normal_in_ts_float_double_1 where in_ts in (
\'
1577836800001
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
query
(
'select * from normal_in_ts_float_double_1 where in_ts in (
\'
2020-01-01 08:00:00.001
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
query
(
'select * from normal_in_ts_float_double_1 where in_float in (666.00000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
tdSql
.
query
(
'select * from normal_in_ts_float_double_1 where in_double in (-88888.000000000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
666.00000
)
tdSql
.
checkData
(
0
,
3
,
-
88888.000000000
)
cmd3
=
'insert into normal_in_ts_float_double_1 values(now,
\'
2021-01-01 08:00:00.001
\'
,
\'
-888.00000
\'
,
\'
66666.000000000
\'
) ;'
tdLog
.
info
(
cmd3
)
tdSql
.
execute
(
cmd3
)
tdSql
.
query
(
'select * from normal_in_ts_float_double_1 where in_ts in (
\'
1609459200001
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdSql
.
query
(
'select * from normal_in_ts_float_double_1 where in_ts in (
\'
2021-01-01 08:00:00.001
\'
) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdSql
.
query
(
'select * from normal_in_ts_float_double_1 where in_float in (-888.00000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdSql
.
query
(
'select * from normal_in_ts_float_double_1 where in_double in (66666.000000000) order by ts desc'
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
cmd4
=
'''select * from normal_in_ts_float_double_1
where in_ts in (
\'
1609459200001
\'
,
\'
2021-01-01 08:00:00.001
\'
,
\'
1577836800001
\'
,
\'
2020-01-01 08:00:00.001000
\'
,
\'
0
\'
,
\'
1970-01-01 08:00:00.000
\'
)
and in_double in (0.000000000,66666.000000000,-88888.000000000)
and in_float in (0.00000,666.00000,-888.00000)
order by ts desc ;'''
tdLog
.
info
(
cmd4
)
tdSql
.
query
(
cmd4
)
tdSql
.
checkData
(
0
,
1
,
'2021-01-01 08:00:00.001'
)
tdSql
.
checkData
(
0
,
2
,
-
888.00000
)
tdSql
.
checkData
(
0
,
3
,
66666.000000000
)
tdSql
.
checkData
(
1
,
1
,
'2020-01-01 08:00:00.001'
)
tdSql
.
checkData
(
1
,
2
,
666.00000
)
tdSql
.
checkData
(
1
,
3
,
-
88888.000000000
)
tdSql
.
checkData
(
2
,
1
,
'1970-01-01 08:00:00.000'
)
tdSql
.
checkData
(
2
,
2
,
0.00000
)
tdSql
.
checkData
(
2
,
3
,
0.000000000
)
def
stop
(
self
):
def
stop
(
self
):
tdSql
.
close
()
tdSql
.
close
()
...
...
tests/pytest/tools/taosdumpTest.py
浏览文件 @
529f906e
...
@@ -46,6 +46,8 @@ class TDTestCase:
...
@@ -46,6 +46,8 @@ class TDTestCase:
sql
+=
"(%d, %d, 'nchar%d')"
%
(
currts
+
i
,
i
%
100
,
i
%
100
)
sql
+=
"(%d, %d, 'nchar%d')"
%
(
currts
+
i
,
i
%
100
,
i
%
100
)
tdSql
.
execute
(
sql
)
tdSql
.
execute
(
sql
)
os
.
system
(
"rm /tmp/*.sql"
)
os
.
system
(
"taosdump --databases db -o /tmp"
)
os
.
system
(
"taosdump --databases db -o /tmp"
)
tdSql
.
execute
(
"drop database db"
)
tdSql
.
execute
(
"drop database db"
)
...
...
tests/pytest/tools/taosdumpTest2.py
0 → 100644
浏览文件 @
529f906e
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
os
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
*
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
self
.
ts
=
1601481600000
self
.
numberOfTables
=
1
self
.
numberOfRecords
=
15000
def
run
(
self
):
tdSql
.
prepare
()
tdSql
.
execute
(
"create table st(ts timestamp, c1 timestamp, c2 int, c3 bigint, c4 float, c5 double, c6 binary(8), c7 smallint, c8 tinyint, c9 bool, c10 nchar(8)) tags(t1 int)"
)
tdSql
.
execute
(
"create table t1 using st tags(0)"
)
currts
=
self
.
ts
finish
=
0
while
(
finish
<
self
.
numberOfRecords
):
sql
=
"insert into t1 values"
for
i
in
range
(
finish
,
self
.
numberOfRecords
):
sql
+=
"(%d, 1019774612, 29931, 1442173978, 165092.468750, 1128.643179, 'MOCq1pTu', 18405, 82, 0, 'g0A6S0Fu')"
%
(
currts
+
i
)
finish
=
i
+
1
if
(
1048576
-
len
(
sql
))
<
16384
:
break
tdSql
.
execute
(
sql
)
os
.
system
(
"rm /tmp/*.sql"
)
os
.
system
(
"taosdump --databases db -o /tmp -B 32766 -L 1048576"
)
tdSql
.
execute
(
"drop database db"
)
tdSql
.
query
(
"show databases"
)
tdSql
.
checkRows
(
0
)
os
.
system
(
"taosdump -i /tmp"
)
tdSql
.
query
(
"show databases"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
'db'
)
tdSql
.
execute
(
"use db"
)
tdSql
.
query
(
"show stables"
)
tdSql
.
checkRows
(
1
)
tdSql
.
checkData
(
0
,
0
,
'st'
)
tdSql
.
query
(
"select count(*) from t1"
)
tdSql
.
checkData
(
0
,
0
,
self
.
numberOfRecords
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录