Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
慢慢CG
TDengine
提交
1017a803
T
TDengine
项目概览
慢慢CG
/
TDengine
与 Fork 源项目一致
Fork自
taosdata / TDengine
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1017a803
编写于
7月 28, 2021
作者:
M
markswang
浏览文件
操作
浏览文件
下载
差异文件
[TD-5534]<fix>:fix the coverity high risk of client
上级
a7dc581f
0d7c2fb4
变更
35
展开全部
隐藏空白更改
内联
并排
Showing
35 changed file
with
2371 addition
and
332 deletion
+2371
-332
documentation20/cn/03.architecture/02.replica/docs.md
documentation20/cn/03.architecture/02.replica/docs.md
+1
-1
documentation20/cn/12.taos-sql/docs.md
documentation20/cn/12.taos-sql/docs.md
+1
-1
src/client/inc/tscUtil.h
src/client/inc/tscUtil.h
+1
-0
src/client/src/tscParseLineProtocol.c
src/client/src/tscParseLineProtocol.c
+334
-158
src/client/src/tscSQLParser.c
src/client/src/tscSQLParser.c
+24
-10
src/client/src/tscServer.c
src/client/src/tscServer.c
+2
-2
src/client/src/tscSubquery.c
src/client/src/tscSubquery.c
+1
-1
src/client/src/tscUtil.c
src/client/src/tscUtil.c
+17
-1
src/inc/ttype.h
src/inc/ttype.h
+2
-0
src/kit/shell/inc/shellCommand.h
src/kit/shell/inc/shellCommand.h
+2
-0
src/kit/shell/src/shellCommand.c
src/kit/shell/src/shellCommand.c
+22
-0
src/kit/shell/src/shellDarwin.c
src/kit/shell/src/shellDarwin.c
+6
-0
src/kit/shell/src/shellLinux.c
src/kit/shell/src/shellLinux.c
+6
-0
src/plugins/http/src/httpGcJson.c
src/plugins/http/src/httpGcJson.c
+1
-1
src/query/inc/qExecutor.h
src/query/inc/qExecutor.h
+2
-1
src/query/inc/qTableMeta.h
src/query/inc/qTableMeta.h
+2
-1
src/query/src/qExecutor.c
src/query/src/qExecutor.c
+69
-10
src/query/src/qPlan.c
src/query/src/qPlan.c
+12
-5
src/tsdb/src/tsdbMain.c
src/tsdb/src/tsdbMain.c
+7
-5
src/tsdb/src/tsdbMemTable.c
src/tsdb/src/tsdbMemTable.c
+11
-8
src/tsdb/src/tsdbRead.c
src/tsdb/src/tsdbRead.c
+8
-6
src/util/src/tcompression.c
src/util/src/tcompression.c
+2
-2
src/util/src/tstrbuild.c
src/util/src/tstrbuild.c
+5
-5
tests/examples/c/apitest.c
tests/examples/c/apitest.c
+5
-7
tests/examples/c/schemaless.c
tests/examples/c/schemaless.c
+40
-0
tests/pytest/fulltest.sh
tests/pytest/fulltest.sh
+3
-1
tests/pytest/query/nestedQuery/queryWithOrderLimit.py
tests/pytest/query/nestedQuery/queryWithOrderLimit.py
+8
-0
tests/pytest/query/operator.py
tests/pytest/query/operator.py
+536
-0
tests/pytest/query/queryError.py
tests/pytest/query/queryError.py
+1
-1
tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py
...demoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py
+703
-0
tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv
...tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv
+3
-10
tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json
...ools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json
+40
-15
tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
.../tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
+118
-79
tests/pytest/tools/taosdumpTestNanoSupport.py
tests/pytest/tools/taosdumpTestNanoSupport.py
+362
-0
tests/pytest/util/sql.py
tests/pytest/util/sql.py
+14
-1
未找到文件。
documentation20/cn/03.architecture/02.replica/docs.md
浏览文件 @
1017a803
...
...
@@ -140,7 +140,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
整个数据恢复流程分为两大步骤,第一步,先恢复archived data(file), 然后恢复wal。具体流程如下:


1.
通过已经建立的TCP连接,发送sync req给master节点
2.
master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP连接(syncFd)
...
...
documentation20/cn/12.taos-sql/docs.md
浏览文件 @
1017a803
...
...
@@ -208,7 +208,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传
说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。
通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。
(如果希望匹配表名中带有的下划线,那么这里可以用反斜线进行转义,也就是说 '\\\_' 会被用于匹配表名中原始带有的下划线符号)
通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。
-
**显示一个数据表的创建语句**
...
...
src/client/inc/tscUtil.h
浏览文件 @
1017a803
...
...
@@ -344,6 +344,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, v
STableMeta
*
tscTableMetaDup
(
STableMeta
*
pTableMeta
);
SVgroupsInfo
*
tscVgroupsInfoDup
(
SVgroupsInfo
*
pVgroupsInfo
);
int32_t
tscGetColFilterSerializeLen
(
SQueryInfo
*
pQueryInfo
);
int32_t
tscCreateQueryFromQueryInfo
(
SQueryInfo
*
pQueryInfo
,
SQueryAttr
*
pQueryAttr
,
void
*
addr
);
void
*
createQInfoFromQueryNode
(
SQueryInfo
*
pQueryInfo
,
STableGroupInfo
*
pTableGroupInfo
,
SOperatorInfo
*
pOperator
,
char
*
sql
,
void
*
addr
,
int32_t
stage
,
uint64_t
qId
);
...
...
src/client/src/tscParseLineProtocol.c
浏览文件 @
1017a803
此差异已折叠。
点击以展开。
src/client/src/tscSQLParser.c
浏览文件 @
1017a803
...
...
@@ -1948,12 +1948,13 @@ bool isValidDistinctSql(SQueryInfo* pQueryInfo) {
if
(
pQueryInfo
==
NULL
)
{
return
false
;
}
if
((
pQueryInfo
->
type
&
TSDB_QUERY_TYPE_STABLE_QUERY
)
!=
TSDB_QUERY_TYPE_STABLE_QUERY
)
{
if
((
pQueryInfo
->
type
&
TSDB_QUERY_TYPE_STABLE_QUERY
)
!=
TSDB_QUERY_TYPE_STABLE_QUERY
&&
(
pQueryInfo
->
type
&
TSDB_QUERY_TYPE_TABLE_QUERY
)
!=
TSDB_QUERY_TYPE_TABLE_QUERY
)
{
return
false
;
}
if
(
tsc
QueryTags
(
pQueryInfo
)
&&
tsc
NumOfExprs
(
pQueryInfo
)
==
1
){
if
(
tscNumOfExprs
(
pQueryInfo
)
==
1
){
return
true
;
}
}
return
false
;
}
...
...
@@ -2047,7 +2048,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
const
char
*
msg1
=
"too many items in selection clause"
;
const
char
*
msg2
=
"functions or others can not be mixed up"
;
const
char
*
msg3
=
"not support query expression"
;
const
char
*
msg4
=
"only support distinct one tag"
;
const
char
*
msg4
=
"only support distinct one
column or
tag"
;
const
char
*
msg5
=
"invalid function name"
;
// too many result columns not support order by in query
...
...
@@ -2107,13 +2108,13 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
}
if
(
hasDistinct
==
true
)
{
if
(
!
isValidDistinctSql
(
pQueryInfo
))
{
if
(
!
isValidDistinctSql
(
pQueryInfo
)
)
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg4
);
}
pQueryInfo
->
distinctTag
=
true
;
pQueryInfo
->
distinct
=
true
;
}
// there is only one user-defined column in the final result field, add the timestamp column.
size_t
numOfSrcCols
=
taosArrayGetSize
(
pQueryInfo
->
colList
);
if
((
numOfSrcCols
<=
0
||
!
hasNoneUserDefineExpr
(
pQueryInfo
))
&&
!
tscQueryTags
(
pQueryInfo
)
&&
!
tscQueryBlockInfo
(
pQueryInfo
))
{
...
...
@@ -3977,8 +3978,10 @@ static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr*
static
int32_t
getColumnQueryCondInfo
(
SSqlCmd
*
pCmd
,
SQueryInfo
*
pQueryInfo
,
tSqlExpr
*
pExpr
,
int32_t
relOptr
)
{
if
(
pExpr
==
NULL
)
{
pQueryInfo
->
onlyHasTagCond
&=
true
;
return
TSDB_CODE_SUCCESS
;
}
pQueryInfo
->
onlyHasTagCond
&=
false
;
if
(
!
tSqlExprIsParentOfLeaf
(
pExpr
))
{
// internal node
int32_t
ret
=
getColumnQueryCondInfo
(
pCmd
,
pQueryInfo
,
pExpr
->
pLeft
,
pExpr
->
tokenId
);
...
...
@@ -4105,6 +4108,7 @@ static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tS
static
int32_t
getJoinCondInfo
(
SSqlCmd
*
pCmd
,
SQueryInfo
*
pQueryInfo
,
tSqlExpr
*
pExpr
)
{
if
(
pExpr
==
NULL
)
{
pQueryInfo
->
onlyHasTagCond
&=
true
;
return
TSDB_CODE_SUCCESS
;
}
...
...
@@ -4784,8 +4788,11 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlE
int32_t
code
=
0
;
if
(
pExpr
==
NULL
)
{
pQueryInfo
->
onlyHasTagCond
&=
true
;
return
TSDB_CODE_SUCCESS
;
}
pQueryInfo
->
onlyHasTagCond
&=
false
;
if
(
!
tSqlExprIsParentOfLeaf
(
pExpr
))
{
if
(
pExpr
->
tokenId
==
TK_OR
)
{
...
...
@@ -4834,11 +4841,13 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr
if
(
!
QUERY_IS_JOIN_QUERY
(
pQueryInfo
->
type
))
{
if
(
pQueryInfo
->
numOfTables
==
1
)
{
pQueryInfo
->
onlyHasTagCond
&=
true
;
return
TSDB_CODE_SUCCESS
;
}
else
{
return
invalidOperationMsg
(
tscGetErrorMsgPayload
(
pCmd
),
msg3
);
}
}
pQueryInfo
->
onlyHasTagCond
&=
false
;
STableMetaInfo
*
pTableMetaInfo
=
tscGetMetaInfo
(
pQueryInfo
,
0
);
if
(
UTIL_TABLE_IS_SUPER_TABLE
(
pTableMetaInfo
))
{
// for stable join, tag columns
...
...
@@ -5151,7 +5160,7 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq
if
(
pExpr
==
NULL
)
{
return
TSDB_CODE_SUCCESS
;
}
const
char
*
msg1
=
"invalid expression"
;
const
char
*
msg2
=
"invalid filter expression"
;
...
...
@@ -5184,6 +5193,7 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq
if
((
ret
=
getTimeRangeFromExpr
(
&
pSql
->
cmd
,
pQueryInfo
,
condExpr
.
pTimewindow
))
!=
TSDB_CODE_SUCCESS
)
{
return
ret
;
}
// 3. get the tag query condition
if
((
ret
=
getTagQueryCondExpr
(
&
pSql
->
cmd
,
pQueryInfo
,
&
condExpr
,
pExpr
))
!=
TSDB_CODE_SUCCESS
)
{
...
...
@@ -5493,7 +5503,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
STableMetaInfo
*
pTableMetaInfo
=
tscGetMetaInfo
(
pQueryInfo
,
0
);
if
(
pQueryInfo
->
distinct
Tag
==
true
)
{
if
(
pQueryInfo
->
distinct
==
true
)
{
pQueryInfo
->
order
.
order
=
TSDB_ORDER_ASC
;
pQueryInfo
->
order
.
orderColId
=
0
;
return
TSDB_CODE_SUCCESS
;
...
...
@@ -8574,7 +8584,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
if
(
validateGroupbyNode
(
pQueryInfo
,
pSqlNode
->
pGroupby
,
pCmd
)
!=
TSDB_CODE_SUCCESS
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
pQueryInfo
->
onlyHasTagCond
=
true
;
// set where info
if
(
pSqlNode
->
pWhere
!=
NULL
)
{
if
(
validateWhereNode
(
pQueryInfo
,
&
pSqlNode
->
pWhere
,
pSql
)
!=
TSDB_CODE_SUCCESS
)
{
...
...
@@ -8597,6 +8607,10 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
if
(
isSTable
&&
tscQueryTags
(
pQueryInfo
)
&&
pQueryInfo
->
distinct
&&
!
pQueryInfo
->
onlyHasTagCond
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
}
// parse the window_state
if
(
validateStateWindowNode
(
pCmd
,
pQueryInfo
,
pSqlNode
,
isSTable
)
!=
TSDB_CODE_SUCCESS
)
{
return
TSDB_CODE_TSC_INVALID_OPERATION
;
...
...
src/client/src/tscServer.c
浏览文件 @
1017a803
...
...
@@ -632,7 +632,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
SQueryInfo
*
pQueryInfo
=
tscGetQueryInfo
(
pCmd
);
int32_t
srcColListSize
=
(
int32_t
)(
taosArrayGetSize
(
pQueryInfo
->
colList
)
*
sizeof
(
SColumnInfo
));
int32_t
srcColFilterSize
=
tscGetColFilterSerializeLen
(
pQueryInfo
);
size_t
numOfExprs
=
tscNumOfExprs
(
pQueryInfo
);
int32_t
exprSize
=
(
int32_t
)(
sizeof
(
SSqlExpr
)
*
numOfExprs
*
2
);
...
...
@@ -653,7 +653,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) {
tableSerialize
=
totalTables
*
sizeof
(
STableIdInfo
);
}
return
MIN_QUERY_MSG_PKT_SIZE
+
minMsgSize
()
+
sizeof
(
SQueryTableMsg
)
+
srcColListSize
+
exprSize
+
tsBufSize
+
return
MIN_QUERY_MSG_PKT_SIZE
+
minMsgSize
()
+
sizeof
(
SQueryTableMsg
)
+
srcColListSize
+
srcColFilterSize
+
exprSize
+
tsBufSize
+
tableSerialize
+
sqlLen
+
4096
+
pQueryInfo
->
bufLen
;
}
...
...
src/client/src/tscSubquery.c
浏览文件 @
1017a803
...
...
@@ -2896,7 +2896,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
tscDebug
(
"0x%"
PRIx64
" sub:0x%"
PRIx64
" retrieve numOfRows:%d totalNumOfRows:%"
PRIu64
" from ep:%s, orderOfSub:%d"
,
pParentSql
->
self
,
pSql
->
self
,
pRes
->
numOfRows
,
pState
->
numOfRetrievedRows
,
pSql
->
epSet
.
fqdn
[
pSql
->
epSet
.
inUse
],
idx
);
if
(
num
>
tsMaxNumOfOrderedResults
&&
tscIsProjectionQueryOnSTable
(
pQueryInfo
,
0
)
&&
!
(
tscGetQueryInfo
(
&
pParentSql
->
cmd
)
->
distinctTag
))
{
if
(
num
>
tsMaxNumOfOrderedResults
&&
/*tscIsProjectionQueryOnSTable(pQueryInfo, 0) &&*/
!
(
tscGetQueryInfo
(
&
pParentSql
->
cmd
)
->
distinct
))
{
tscError
(
"0x%"
PRIx64
" sub:0x%"
PRIx64
" num of OrderedRes is too many, max allowed:%"
PRId32
" , current:%"
PRId64
,
pParentSql
->
self
,
pSql
->
self
,
tsMaxNumOfOrderedResults
,
num
);
tscAbortFurtherRetryRetrieval
(
trsupport
,
tres
,
TSDB_CODE_TSC_SORTED_RES_TOO_MANY
);
...
...
src/client/src/tscUtil.c
浏览文件 @
1017a803
...
...
@@ -4571,6 +4571,22 @@ static int32_t createTagColumnInfo(SQueryAttr* pQueryAttr, SQueryInfo* pQueryInf
return
TSDB_CODE_SUCCESS
;
}
int32_t
tscGetColFilterSerializeLen
(
SQueryInfo
*
pQueryInfo
)
{
int16_t
numOfCols
=
(
int16_t
)
taosArrayGetSize
(
pQueryInfo
->
colList
);
int32_t
len
=
0
;
for
(
int32_t
i
=
0
;
i
<
numOfCols
;
++
i
)
{
SColumn
*
pCol
=
taosArrayGetP
(
pQueryInfo
->
colList
,
i
);
for
(
int32_t
j
=
0
;
j
<
pCol
->
info
.
flist
.
numOfFilters
;
++
j
)
{
len
+=
sizeof
(
SColumnFilterInfo
);
if
(
pCol
->
info
.
flist
.
filterInfo
[
j
].
filterstr
)
{
len
+=
(
int32_t
)
pCol
->
info
.
flist
.
filterInfo
[
j
].
len
+
1
*
TSDB_NCHAR_SIZE
;
}
}
}
return
len
;
}
int32_t
tscCreateQueryFromQueryInfo
(
SQueryInfo
*
pQueryInfo
,
SQueryAttr
*
pQueryAttr
,
void
*
addr
)
{
memset
(
pQueryAttr
,
0
,
sizeof
(
SQueryAttr
));
...
...
@@ -4589,7 +4605,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
pQueryAttr
->
queryBlockDist
=
isBlockDistQuery
(
pQueryInfo
);
pQueryAttr
->
pointInterpQuery
=
tscIsPointInterpQuery
(
pQueryInfo
);
pQueryAttr
->
timeWindowInterpo
=
timeWindowInterpoRequired
(
pQueryInfo
);
pQueryAttr
->
distinct
Tag
=
pQueryInfo
->
distinctTag
;
pQueryAttr
->
distinct
=
pQueryInfo
->
distinct
;
pQueryAttr
->
sw
=
pQueryInfo
->
sessionWindow
;
pQueryAttr
->
stateWindow
=
pQueryInfo
->
stateWindow
;
...
...
src/inc/ttype.h
浏览文件 @
1017a803
...
...
@@ -138,6 +138,8 @@ typedef struct {
#define IS_VALID_USMALLINT(_t) ((_t) >= 0 && (_t) < UINT16_MAX)
#define IS_VALID_UINT(_t) ((_t) >= 0 && (_t) < UINT32_MAX)
#define IS_VALID_UBIGINT(_t) ((_t) >= 0 && (_t) < UINT64_MAX)
#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX)
#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX)
static
FORCE_INLINE
bool
isNull
(
const
char
*
val
,
int32_t
type
)
{
switch
(
type
)
{
...
...
src/kit/shell/inc/shellCommand.h
浏览文件 @
1017a803
...
...
@@ -35,6 +35,8 @@ struct Command {
};
extern
void
backspaceChar
(
Command
*
cmd
);
extern
void
clearLineBefore
(
Command
*
cmd
);
extern
void
clearLineAfter
(
Command
*
cmd
);
extern
void
deleteChar
(
Command
*
cmd
);
extern
void
moveCursorLeft
(
Command
*
cmd
);
extern
void
moveCursorRight
(
Command
*
cmd
);
...
...
src/kit/shell/src/shellCommand.c
浏览文件 @
1017a803
...
...
@@ -102,6 +102,28 @@ void backspaceChar(Command *cmd) {
}
}
void
clearLineBefore
(
Command
*
cmd
)
{
assert
(
cmd
->
cursorOffset
<=
cmd
->
commandSize
&&
cmd
->
endOffset
>=
cmd
->
screenOffset
);
clearScreen
(
cmd
->
endOffset
+
prompt_size
,
cmd
->
screenOffset
+
prompt_size
);
memmove
(
cmd
->
command
,
cmd
->
command
+
cmd
->
cursorOffset
,
cmd
->
commandSize
-
cmd
->
cursorOffset
);
cmd
->
commandSize
-=
cmd
->
cursorOffset
;
cmd
->
cursorOffset
=
0
;
cmd
->
screenOffset
=
0
;
cmd
->
endOffset
=
cmd
->
commandSize
;
showOnScreen
(
cmd
);
}
void
clearLineAfter
(
Command
*
cmd
)
{
assert
(
cmd
->
cursorOffset
<=
cmd
->
commandSize
&&
cmd
->
endOffset
>=
cmd
->
screenOffset
);
clearScreen
(
cmd
->
endOffset
+
prompt_size
,
cmd
->
screenOffset
+
prompt_size
);
cmd
->
commandSize
-=
cmd
->
endOffset
-
cmd
->
cursorOffset
;
cmd
->
endOffset
=
cmd
->
cursorOffset
;
showOnScreen
(
cmd
);
}
void
deleteChar
(
Command
*
cmd
)
{
assert
(
cmd
->
cursorOffset
<=
cmd
->
commandSize
&&
cmd
->
endOffset
>=
cmd
->
screenOffset
);
...
...
src/kit/shell/src/shellDarwin.c
浏览文件 @
1017a803
...
...
@@ -238,10 +238,16 @@ int32_t shellReadCommand(TAOS *con, char *command) {
updateBuffer
(
&
cmd
);
}
break
;
case
11
:
// Ctrl + K;
clearLineAfter
(
&
cmd
);
break
;
case
12
:
// Ctrl + L;
system
(
"clear"
);
showOnScreen
(
&
cmd
);
break
;
case
21
:
// Ctrl + U
clearLineBefore
(
&
cmd
);
break
;
}
}
else
if
(
c
==
'\033'
)
{
c
=
getchar
();
...
...
src/kit/shell/src/shellLinux.c
浏览文件 @
1017a803
...
...
@@ -238,10 +238,16 @@ int32_t shellReadCommand(TAOS *con, char *command) {
updateBuffer
(
&
cmd
);
}
break
;
case
11
:
// Ctrl + K;
clearLineAfter
(
&
cmd
);
break
;
case
12
:
// Ctrl + L;
system
(
"clear"
);
showOnScreen
(
&
cmd
);
break
;
case
21
:
// Ctrl + U;
clearLineBefore
(
&
cmd
);
break
;
}
}
else
if
(
c
==
'\033'
)
{
c
=
(
char
)
getchar
();
...
...
src/plugins/http/src/httpGcJson.c
浏览文件 @
1017a803
...
...
@@ -199,7 +199,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
for
(
int32_t
i
=
dataFields
;
i
>=
0
;
i
--
)
{
httpJsonItemToken
(
jsonBuf
);
if
(
row
[
i
]
==
NULL
)
{
if
(
row
==
NULL
||
i
>=
num_fields
||
row
[
i
]
==
NULL
)
{
httpJsonOriginString
(
jsonBuf
,
"null"
,
4
);
continue
;
}
...
...
src/query/inc/qExecutor.h
浏览文件 @
1017a803
...
...
@@ -216,7 +216,7 @@ typedef struct SQueryAttr {
bool
simpleAgg
;
bool
pointInterpQuery
;
// point interpolation query
bool
needReverseScan
;
// need reverse scan
bool
distinct
Tag
;
// distinct tag query
bool
distinct
;
// distinct query or not
bool
stateWindow
;
// window State on sub/normal table
bool
createFilterOperator
;
// if filter operator is needed
int32_t
interBufSize
;
// intermediate buffer sizse
...
...
@@ -514,6 +514,7 @@ typedef struct SDistinctOperatorInfo {
bool
recordNullVal
;
//has already record the null value, no need to try again
int64_t
threshold
;
int64_t
outputCapacity
;
int32_t
colIndex
;
}
SDistinctOperatorInfo
;
struct
SGlobalMerger
;
...
...
src/query/inc/qTableMeta.h
浏览文件 @
1017a803
...
...
@@ -121,7 +121,8 @@ typedef struct SQueryInfo {
int64_t
vgroupLimit
;
// table limit in case of super table projection query + global order + limit
int32_t
udColumnId
;
// current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
bool
distinctTag
;
// distinct tag or not
bool
distinct
;
// distinct tag or not
bool
onlyHasTagCond
;
int32_t
round
;
// 0/1/....
int32_t
bufLen
;
char
*
buf
;
...
...
src/query/src/qExecutor.c
浏览文件 @
1017a803
...
...
@@ -98,12 +98,47 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList)
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0)
#define TSKEY_MAX_ADD(a,b) \
do { \
if (a < 0) { a = a + b; break;} \
if (sizeof(a) == sizeof(int32_t)) { \
if((b) > 0 && ((b) >= INT32_MAX - (a))){\
a = INT32_MAX; \
} else { \
a = a + b; \
} \
} else { \
if((b) > 0 && ((b) >= INT64_MAX - (a))){\
a = INT64_MAX; \
} else { \
a = a + b; \
} \
} \
} while(0)
#define TSKEY_MIN_SUB(a,b) \
do { \
if (a >= 0) { a = a + b; break;} \
if (sizeof(a) == sizeof(int32_t)){ \
if((b) < 0 && ((b) <= INT32_MIN - (a))){\
a = INT32_MIN; \
} else { \
a = a + b; \
} \
} else { \
if((b) < 0 && ((b) <= INT64_MIN-(a))) {\
a = INT64_MIN; \
} else { \
a = a + b; \
} \
} \
} while (0)
uint64_t
queryHandleId
=
0
;
int32_t
getMaximumIdleDurationSec
()
{
return
tsShellActivityTimer
*
2
;
}
int64_t
genQueryId
(
void
)
{
int64_t
uid
=
0
;
int64_t
did
=
tsDnodeId
;
...
...
@@ -3129,7 +3164,9 @@ void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCt
||
pLocalExprInfo
->
base
.
resType
==
TSDB_DATA_TYPE_TIMESTAMP
)
{
memcpy
(
pRuntimeEnv
->
tagVal
+
offset
,
&
pCtx
[
idx
].
tag
.
i64
,
pLocalExprInfo
->
base
.
resBytes
);
}
else
{
memcpy
(
pRuntimeEnv
->
tagVal
+
offset
,
pCtx
[
idx
].
tag
.
pz
,
pCtx
[
idx
].
tag
.
nLen
);
if
(
pCtx
[
idx
].
tag
.
pz
!=
NULL
)
{
memcpy
(
pRuntimeEnv
->
tagVal
+
offset
,
pCtx
[
idx
].
tag
.
pz
,
pCtx
[
idx
].
tag
.
nLen
);
}
}
offset
+=
pLocalExprInfo
->
base
.
resBytes
;
...
...
@@ -3940,8 +3977,8 @@ static void toSSDataBlock(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv* pRunti
// refactor : extract method
SColumnInfoData
*
pInfoData
=
taosArrayGet
(
pBlock
->
pDataBlock
,
0
);
if
(
pInfoData
->
info
.
type
==
TSDB_DATA_TYPE_TIMESTAMP
)
{
//add condition (pBlock->info.rows >= 1) just to runtime happy
if
(
pInfoData
->
info
.
type
==
TSDB_DATA_TYPE_TIMESTAMP
&&
pBlock
->
info
.
rows
>=
1
)
{
STimeWindow
*
w
=
&
pBlock
->
info
.
window
;
w
->
skey
=
*
(
int64_t
*
)
pInfoData
->
pData
;
w
->
ekey
=
*
(
int64_t
*
)(((
char
*
)
pInfoData
->
pData
)
+
TSDB_KEYSIZE
*
(
pBlock
->
info
.
rows
-
1
));
...
...
@@ -5279,7 +5316,15 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock
(
pOperator
,
pInfo
->
pCtx
,
pBlock
,
order
);
TSKEY
key
=
QUERY_IS_ASC_QUERY
(
pQueryAttr
)
?
pBlock
->
info
.
window
.
ekey
+
1
:
pBlock
->
info
.
window
.
skey
-
1
;
TSKEY
key
=
0
;
if
(
QUERY_IS_ASC_QUERY
(
pQueryAttr
))
{
key
=
pBlock
->
info
.
window
.
ekey
;
TSKEY_MAX_ADD
(
key
,
1
);
}
else
{
key
=
pBlock
->
info
.
window
.
skey
;
TSKEY_MIN_SUB
(
key
,
-
1
);
}
setExecutionContext
(
pRuntimeEnv
,
pInfo
,
pOperator
->
numOfOutput
,
pRuntimeEnv
->
current
->
groupIndex
,
key
);
doAggregateImpl
(
pOperator
,
pQueryAttr
->
window
.
skey
,
pInfo
->
pCtx
,
pBlock
);
}
...
...
@@ -6485,7 +6530,7 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) {
pOperator
->
status
=
OP_EXEC_DONE
;
qDebug
(
"QInfo:0x%"
PRIx64
" create count(tbname) query, res:%d rows:1"
,
GET_QID
(
pRuntimeEnv
),
count
);
}
else
{
// return only the tags|table name etc.
SExprInfo
*
pExprInfo
=
pOperator
->
pExpr
;
// todo use the column list instead of exprinfo
SExprInfo
*
pExprInfo
=
&
pOperator
->
pExpr
[
0
]
;
// todo use the column list instead of exprinfo
count
=
0
;
while
(
pInfo
->
curPos
<
pInfo
->
totalTables
&&
count
<
maxNumOfTables
)
{
...
...
@@ -6571,13 +6616,25 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
publishOperatorProfEvent
(
pOperator
->
upstream
[
0
],
QUERY_PROF_AFTER_OPERATOR_EXEC
);
if
(
pBlock
==
NULL
)
{
setQueryStatus
(
pOperator
->
pRuntimeEnv
,
QUERY_COMPLETED
);
pOperator
->
status
=
OP_EXEC_DONE
;
break
;
}
if
(
pInfo
->
colIndex
==
-
1
)
{
for
(
int
i
=
0
;
i
<
taosArrayGetSize
(
pBlock
->
pDataBlock
);
i
++
)
{
SColumnInfoData
*
pColDataInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
i
);
if
(
pColDataInfo
->
info
.
colId
==
pOperator
->
pExpr
[
0
].
base
.
resColId
)
{
pInfo
->
colIndex
=
i
;
break
;
}
}
}
if
(
pInfo
->
colIndex
==
-
1
)
{
setQueryStatus
(
pOperator
->
pRuntimeEnv
,
QUERY_COMPLETED
);
pOperator
->
status
=
OP_EXEC_DONE
;
return
NULL
;
}
assert
(
pBlock
->
info
.
numOfCols
==
1
);
SColumnInfoData
*
pColInfoData
=
taosArrayGet
(
pBlock
->
pDataBlock
,
0
);
SColumnInfoData
*
pColInfoData
=
taosArrayGet
(
pBlock
->
pDataBlock
,
pInfo
->
colIndex
);
int16_t
bytes
=
pColInfoData
->
info
.
bytes
;
int16_t
type
=
pColInfoData
->
info
.
type
;
...
...
@@ -6629,7 +6686,8 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) {
SOperatorInfo
*
createDistinctOperatorInfo
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SOperatorInfo
*
upstream
,
SExprInfo
*
pExpr
,
int32_t
numOfOutput
)
{
SDistinctOperatorInfo
*
pInfo
=
calloc
(
1
,
sizeof
(
SDistinctOperatorInfo
));
pInfo
->
colIndex
=
-
1
;
pInfo
->
threshold
=
10000000
;
// distinct result threshold
pInfo
->
outputCapacity
=
4096
;
pInfo
->
pSet
=
taosHashInit
(
64
,
taosGetDefaultHashFunction
(
pExpr
->
base
.
colType
),
false
,
HASH_NO_LOCK
);
pInfo
->
pRes
=
createOutputBuf
(
pExpr
,
numOfOutput
,
(
int32_t
)
pInfo
->
outputCapacity
);
...
...
@@ -6644,6 +6702,7 @@ SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperat
pOperator
->
info
=
pInfo
;
pOperator
->
pRuntimeEnv
=
pRuntimeEnv
;
pOperator
->
exec
=
hashDistinct
;
pOperator
->
pExpr
=
pExpr
;
pOperator
->
cleanup
=
destroyDistinctOperatorInfo
;
appendUpstream
(
pOperator
,
upstream
);
...
...
src/query/src/qPlan.c
浏览文件 @
1017a803
...
...
@@ -104,7 +104,7 @@ static SQueryNode* doAddTableColumnNode(SQueryInfo* pQueryInfo, STableMetaInfo*
int32_t
num
=
(
int32_t
)
taosArrayGetSize
(
pExprs
);
SQueryNode
*
pNode
=
createQueryNode
(
QNODE_TAGSCAN
,
"TableTagScan"
,
NULL
,
0
,
pExprs
->
pData
,
num
,
info
,
NULL
);
if
(
pQueryInfo
->
distinct
Tag
)
{
if
(
pQueryInfo
->
distinct
)
{
pNode
=
createQueryNode
(
QNODE_DISTINCT
,
"Distinct"
,
&
pNode
,
1
,
pExprs
->
pData
,
num
,
info
,
NULL
);
}
...
...
@@ -555,9 +555,11 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
int32_t
op
=
0
;
if
(
onlyQueryTags
(
pQueryAttr
))
{
// do nothing for tags query
op
=
OP_TagScan
;
taosArrayPush
(
plan
,
&
op
);
if
(
pQueryAttr
->
distinctTag
)
{
if
(
onlyQueryTags
(
pQueryAttr
))
{
op
=
OP_TagScan
;
taosArrayPush
(
plan
,
&
op
);
}
if
(
pQueryAttr
->
distinct
)
{
op
=
OP_Distinct
;
taosArrayPush
(
plan
,
&
op
);
}
...
...
@@ -634,8 +636,13 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
}
else
{
op
=
OP_Project
;
taosArrayPush
(
plan
,
&
op
);
if
(
pQueryAttr
->
distinct
)
{
op
=
OP_Distinct
;
taosArrayPush
(
plan
,
&
op
);
}
}
}
if
(
pQueryAttr
->
limit
.
limit
>
0
||
pQueryAttr
->
limit
.
offset
>
0
)
{
op
=
OP_Limit
;
...
...
@@ -655,7 +662,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
int32_t
op
=
OP_MultiwayMergeSort
;
taosArrayPush
(
plan
,
&
op
);
if
(
pQueryAttr
->
distinct
Tag
)
{
if
(
pQueryAttr
->
distinct
)
{
op
=
OP_Distinct
;
taosArrayPush
(
plan
,
&
op
);
}
...
...
src/tsdb/src/tsdbMain.c
浏览文件 @
1017a803
...
...
@@ -722,7 +722,8 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
// OK,let's load row from backward to get not-null column
for
(
int32_t
rowId
=
pBlock
->
numOfRows
-
1
;
rowId
>=
0
;
rowId
--
)
{
SDataCol
*
pDataCol
=
pReadh
->
pDCols
[
0
]
->
cols
+
i
;
tdAppendColVal
(
memRowDataBody
(
row
),
tdGetColDataOfRow
(
pDataCol
,
rowId
),
pCol
->
type
,
pCol
->
offset
);
const
void
*
pColData
=
tdGetColDataOfRow
(
pDataCol
,
rowId
);
tdAppendColVal
(
memRowDataBody
(
row
),
pColData
,
pCol
->
type
,
pCol
->
offset
);
//SDataCol *pDataCol = readh.pDCols[0]->cols + j;
void
*
value
=
tdGetRowDataOfCol
(
memRowDataBody
(
row
),
(
int8_t
)
pCol
->
type
,
TD_DATA_ROW_HEAD_SIZE
+
pCol
->
offset
);
if
(
isNull
(
value
,
pCol
->
type
))
{
...
...
@@ -735,11 +736,12 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
continue
;
}
// save not-null column
uint16_t
bytes
=
IS_VAR_DATA_TYPE
(
pCol
->
type
)
?
varDataTLen
(
pColData
)
:
pCol
->
bytes
;
SDataCol
*
pLastCol
=
&
(
pTable
->
lastCols
[
idx
]);
pLastCol
->
pData
=
malloc
(
pCol
->
bytes
);
pLastCol
->
bytes
=
pCol
->
bytes
;
pLastCol
->
pData
=
malloc
(
bytes
);
pLastCol
->
bytes
=
bytes
;
pLastCol
->
colId
=
pCol
->
colId
;
memcpy
(
pLastCol
->
pData
,
value
,
pCol
->
bytes
);
memcpy
(
pLastCol
->
pData
,
value
,
bytes
);
// save row ts(in column 0)
pDataCol
=
pReadh
->
pDCols
[
0
]
->
cols
+
0
;
...
...
@@ -991,4 +993,4 @@ int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
}
return
0
;
}
\ No newline at end of file
}
src/tsdb/src/tsdbMemTable.c
浏览文件 @
1017a803
...
...
@@ -1019,7 +1019,7 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
if
(
isDataRow
)
{
value
=
tdGetRowDataOfCol
(
memRowDataBody
(
row
),
(
int8_t
)
pTCol
->
type
,
TD_DATA_ROW_HEAD_SIZE
+
p
Schema
->
columns
[
j
].
offset
);
TD_DATA_ROW_HEAD_SIZE
+
p
TCol
->
offset
);
}
else
{
// SKVRow
SColIdx
*
pColIdx
=
tdGetKVRowIdxOfCol
(
memRowKvBody
(
row
),
pTCol
->
colId
);
...
...
@@ -1034,14 +1034,17 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
SDataCol
*
pDataCol
=
&
(
pLatestCols
[
idx
]);
if
(
pDataCol
->
pData
==
NULL
)
{
pDataCol
->
pData
=
malloc
(
p
Schema
->
columns
[
j
].
bytes
);
pDataCol
->
bytes
=
p
Schema
->
columns
[
j
].
bytes
;
}
else
if
(
pDataCol
->
bytes
<
p
Schema
->
columns
[
j
].
bytes
)
{
pDataCol
->
pData
=
realloc
(
pDataCol
->
pData
,
p
Schema
->
columns
[
j
].
bytes
);
pDataCol
->
bytes
=
p
Schema
->
columns
[
j
].
bytes
;
pDataCol
->
pData
=
malloc
(
p
TCol
->
bytes
);
pDataCol
->
bytes
=
p
TCol
->
bytes
;
}
else
if
(
pDataCol
->
bytes
<
p
TCol
->
bytes
)
{
pDataCol
->
pData
=
realloc
(
pDataCol
->
pData
,
p
TCol
->
bytes
);
pDataCol
->
bytes
=
p
TCol
->
bytes
;
}
memcpy
(
pDataCol
->
pData
,
value
,
pDataCol
->
bytes
);
// the actual value size
uint16_t
bytes
=
IS_VAR_DATA_TYPE
(
pTCol
->
type
)
?
varDataTLen
(
value
)
:
pTCol
->
bytes
;
// the actual data size CANNOT larger than column size
assert
(
pTCol
->
bytes
>=
bytes
);
memcpy
(
pDataCol
->
pData
,
value
,
bytes
);
//tsdbInfo("updateTableLatestColumn vgId:%d cache column %d for %d,%s", REPO_ID(pRepo), j, pDataCol->bytes, (char*)pDataCol->pData);
pDataCol
->
ts
=
memRowKey
(
row
);
}
...
...
src/tsdb/src/tsdbRead.c
浏览文件 @
1017a803
...
...
@@ -640,7 +640,7 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr
size_t
numOfGroup
=
taosArrayGetSize
(
pGroupList
->
pGroupList
);
STableGroupInfo
*
pNew
=
calloc
(
1
,
sizeof
(
STableGroupInfo
));
pNew
->
pGroupList
=
taosArrayInit
(
numOfGroup
,
sizeof
(
SArray
)
);
pNew
->
pGroupList
=
taosArrayInit
(
numOfGroup
,
POINTER_BYTES
);
for
(
int32_t
i
=
0
;
i
<
numOfGroup
;
++
i
)
{
SArray
*
oneGroup
=
taosArrayGetP
(
pGroupList
->
pGroupList
,
i
);
...
...
@@ -3383,11 +3383,13 @@ static int32_t tableGroupComparFn(const void *p1, const void *p2, const void *pa
type
=
TSDB_DATA_TYPE_BINARY
;
bytes
=
tGetTbnameColumnSchema
()
->
bytes
;
}
else
{
STColumn
*
pCol
=
schemaColAt
(
pTableGroupSupp
->
pTagSchema
,
colIndex
);
bytes
=
pCol
->
bytes
;
type
=
pCol
->
type
;
f1
=
tdGetKVRowValOfCol
(
pTable1
->
tagVal
,
pCol
->
colId
);
f2
=
tdGetKVRowValOfCol
(
pTable2
->
tagVal
,
pCol
->
colId
);
if
(
pTableGroupSupp
->
pTagSchema
&&
colIndex
<
pTableGroupSupp
->
pTagSchema
->
numOfCols
)
{
STColumn
*
pCol
=
schemaColAt
(
pTableGroupSupp
->
pTagSchema
,
colIndex
);
bytes
=
pCol
->
bytes
;
type
=
pCol
->
type
;
f1
=
tdGetKVRowValOfCol
(
pTable1
->
tagVal
,
pCol
->
colId
);
f2
=
tdGetKVRowValOfCol
(
pTable2
->
tagVal
,
pCol
->
colId
);
}
}
// this tags value may be NULL
...
...
src/util/src/tcompression.c
浏览文件 @
1017a803
...
...
@@ -159,7 +159,7 @@ int tsCompressINTImp(const char *const input, const int nelements, char *const o
break
;
}
// Get difference.
if
(
!
safeInt64Add
(
curr_value
,
-
prev_value
))
goto
_copy_and_exit
;
if
(
!
safeInt64Add
(
curr_value
,
-
prev_value
_tmp
))
goto
_copy_and_exit
;
int64_t
diff
=
curr_value
-
prev_value_tmp
;
// Zigzag encode the value.
...
...
@@ -993,4 +993,4 @@ int tsDecompressDoubleLossyImp(const char * input, int compressedSize, const int
// decompressed with sz
return
tdszDecompress
(
SZ_DOUBLE
,
input
+
1
,
compressedSize
-
1
,
nelements
,
output
);
}
#endif
\ No newline at end of file
#endif
src/util/src/tstrbuild.c
浏览文件 @
1017a803
...
...
@@ -69,12 +69,12 @@ void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendSt
void
taosStringBuilderAppendInteger
(
SStringBuilder
*
sb
,
int64_t
v
)
{
char
buf
[
64
];
size_t
len
=
s
printf
(
buf
,
"%"
PRId64
,
v
);
taosStringBuilderAppendStringLen
(
sb
,
buf
,
len
);
size_t
len
=
s
nprintf
(
buf
,
sizeof
(
buf
)
,
"%"
PRId64
,
v
);
taosStringBuilderAppendStringLen
(
sb
,
buf
,
MIN
(
len
,
sizeof
(
buf
))
);
}
void
taosStringBuilderAppendDouble
(
SStringBuilder
*
sb
,
double
v
)
{
char
buf
[
64
];
size_t
len
=
s
printf
(
buf
,
"%.9lf"
,
v
);
taosStringBuilderAppendStringLen
(
sb
,
buf
,
len
);
char
buf
[
512
];
size_t
len
=
s
nprintf
(
buf
,
sizeof
(
buf
)
,
"%.9lf"
,
v
);
taosStringBuilderAppendStringLen
(
sb
,
buf
,
MIN
(
len
,
sizeof
(
buf
))
);
}
tests/examples/c/apitest.c
浏览文件 @
1017a803
...
...
@@ -359,7 +359,7 @@ void verify_prepare(TAOS* taos) {
v
.
v8
=
(
int64_t
)(
i
*
8
);
v
.
f4
=
(
float
)(
i
*
40
);
v
.
f8
=
(
double
)(
i
*
80
);
for
(
int
j
=
0
;
j
<
sizeof
(
v
.
bin
)
-
1
;
++
j
)
{
for
(
int
j
=
0
;
j
<
sizeof
(
v
.
bin
);
++
j
)
{
v
.
bin
[
j
]
=
(
char
)(
i
+
'0'
);
}
...
...
@@ -556,7 +556,7 @@ void verify_prepare2(TAOS* taos) {
v
.
v8
[
i
]
=
(
int64_t
)(
i
*
8
);
v
.
f4
[
i
]
=
(
float
)(
i
*
40
);
v
.
f8
[
i
]
=
(
double
)(
i
*
80
);
for
(
int
j
=
0
;
j
<
sizeof
(
v
.
bin
[
0
])
-
1
;
++
j
)
{
for
(
int
j
=
0
;
j
<
sizeof
(
v
.
bin
[
0
]);
++
j
)
{
v
.
bin
[
i
][
j
]
=
(
char
)(
i
+
'0'
);
}
strcpy
(
v
.
blob
[
i
],
"一二三四五六七八九十"
);
...
...
@@ -808,7 +808,7 @@ void verify_prepare3(TAOS* taos) {
v
.
v8
[
i
]
=
(
int64_t
)(
i
*
8
);
v
.
f4
[
i
]
=
(
float
)(
i
*
40
);
v
.
f8
[
i
]
=
(
double
)(
i
*
80
);
for
(
int
j
=
0
;
j
<
sizeof
(
v
.
bin
[
0
])
-
1
;
++
j
)
{
for
(
int
j
=
0
;
j
<
sizeof
(
v
.
bin
[
0
]);
++
j
)
{
v
.
bin
[
i
][
j
]
=
(
char
)(
i
+
'0'
);
}
strcpy
(
v
.
blob
[
i
],
"一二三四五六七八九十"
);
...
...
@@ -1030,7 +1030,7 @@ int main(int argc, char *argv[]) {
printf
(
"server info: %s
\n
"
,
info
);
info
=
taos_get_client_info
(
taos
);
printf
(
"client info: %s
\n
"
,
info
);
printf
(
"************ verify shemaless *************
\n
"
);
verify_schema_less
(
taos
);
...
...
@@ -1049,14 +1049,12 @@ int main(int argc, char *argv[]) {
printf
(
"************ verify prepare2 *************
\n
"
);
verify_prepare2
(
taos
);
printf
(
"************ verify prepare3 *************
\n
"
);
verify_prepare3
(
taos
);
printf
(
"************ verify stream *************
\n
"
);
verify_stream
(
taos
);
printf
(
"done
\n
"
);
taos_close
(
taos
);
taos_cleanup
();
}
tests/examples/c/schemaless.c
浏览文件 @
1017a803
...
...
@@ -157,5 +157,45 @@ int main(int argc, char* argv[]) {
return
-
1
;
}
//Duplicate key check;
char
*
lines_003_1
[]
=
{
"std,id=
\"
std_3_1
\"
,t1=4i64,Id=
\"
std
\"
,t2=true c1=true 1626006834s"
};
code
=
taos_insert_lines
(
taos
,
lines_003_1
,
sizeof
(
lines_003_1
)
/
sizeof
(
char
*
));
if
(
0
==
code
)
{
printf
(
"taos_insert_lines() lines_003_1 return code:%d (%s)
\n
"
,
code
,
(
char
*
)
tstrerror
(
code
));
return
-
1
;
}
char
*
lines_003_2
[]
=
{
"std,id=
\"
std_3_2
\"
,tag1=4i64,Tag2=true,tAg3=2,TaG2=
\"
dup!
\"
c1=true 1626006834s"
};
code
=
taos_insert_lines
(
taos
,
lines_003_2
,
sizeof
(
lines_003_2
)
/
sizeof
(
char
*
));
if
(
0
==
code
)
{
printf
(
"taos_insert_lines() lines_003_2 return code:%d (%s)
\n
"
,
code
,
(
char
*
)
tstrerror
(
code
));
return
-
1
;
}
char
*
lines_003_3
[]
=
{
"std,id=
\"
std_3_3
\"
,tag1=4i64 field1=true,Field2=2,FIElD1=
\"
dup!
\"
,fIeLd4=true 1626006834s"
};
code
=
taos_insert_lines
(
taos
,
lines_003_3
,
sizeof
(
lines_003_3
)
/
sizeof
(
char
*
));
if
(
0
==
code
)
{
printf
(
"taos_insert_lines() lines_003_3 return code:%d (%s)
\n
"
,
code
,
(
char
*
)
tstrerror
(
code
));
return
-
1
;
}
char
*
lines_003_4
[]
=
{
"std,id=
\"
std_3_4
\"
,tag1=4i64,dupkey=4i16,tag2=T field1=true,dUpkEy=1e3f32,field2=
\"
1234
\"
1626006834s"
};
code
=
taos_insert_lines
(
taos
,
lines_003_4
,
sizeof
(
lines_003_4
)
/
sizeof
(
char
*
));
if
(
0
==
code
)
{
printf
(
"taos_insert_lines() lines_003_4 return code:%d (%s)
\n
"
,
code
,
(
char
*
)
tstrerror
(
code
));
return
-
1
;
}
return
0
;
}
tests/pytest/fulltest.sh
浏览文件 @
1017a803
...
...
@@ -163,7 +163,7 @@ python3 test.py -f tools/taosdemoTestSampleData.py
python3 test.py
-f
tools/taosdemoTestInterlace.py
python3 test.py
-f
tools/taosdemoTestQuery.py
python3 test.py
-f
tools/taosdumpTestNanoSupport.py
# update
python3 ./test.py
-f
update/allow_update.py
...
...
@@ -360,6 +360,8 @@ python3 ./test.py -f tag_lite/alter_tag.py
python3 test.py
-f
tools/taosdemoAllTest/TD-4985/query-limit-offset.py
python3 test.py
-f
tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py
python3 test.py
-f
tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
python3 ./test.py
-f
tag_lite/drop_auto_create.py
python3 test.py
-f
insert/insert_before_use_db.py
python3 test.py
-f
alter/alter_keep.py
...
...
tests/pytest/query/nestedQuery/queryWithOrderLimit.py
浏览文件 @
1017a803
...
...
@@ -70,6 +70,14 @@ class TDTestCase:
tdSql
.
query
(
"select * from (select avg(value), sum(value) from st group by tbname slimit 5 soffset 7)"
)
tdSql
.
checkRows
(
3
)
# https://jira.taosdata.com:18080/browse/TD-5497
tdSql
.
execute
(
"create table tt(ts timestamp ,i int)"
)
tdSql
.
execute
(
"insert into tt values(now, 11)(now + 1s, -12)"
)
tdSql
.
query
(
"select * from (select max(i),0-min(i) from tt)"
)
tdSql
.
checkRows
(
1
);
tdSql
.
checkData
(
0
,
0
,
11
);
tdSql
.
checkData
(
0
,
1
,
12.0
);
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
...
...
tests/pytest/query/operator.py
0 → 100644
浏览文件 @
1017a803
此差异已折叠。
点击以展开。
tests/pytest/query/queryError.py
浏览文件 @
1017a803
...
...
@@ -51,7 +51,7 @@ class TDTestCase:
tdSql
.
error
(
"select last_row as latest from st"
)
# query distinct on normal colnum
tdSql
.
error
(
"select distinct tagtype from st"
)
#
tdSql.error("select distinct tagtype from st")
# query .. order by non-time field
tdSql
.
error
(
"select * from st order by name"
)
...
...
tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py
0 → 100644
浏览文件 @
1017a803
此差异已折叠。
点击以展开。
tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv
浏览文件 @
1017a803
此差异已折叠。
点击以展开。
tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json
浏览文件 @
1017a803
...
...
@@ -15,7 +15,7 @@
"max_sql_len"
:
102400000
,
"databases"
:
[{
"dbinfo"
:
{
"name"
:
"
db
"
,
"name"
:
"
json
"
,
"drop"
:
"yes"
,
"replica"
:
1
,
"days"
:
10
,
...
...
@@ -35,13 +35,13 @@
"super_tables"
:
[{
"name"
:
"stb_old"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
1
0
,
"childtable_count"
:
1
,
"childtable_prefix"
:
"stb_old_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
5
,
"data_source"
:
"
sample
"
,
"data_source"
:
"
rand
"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10
0
,
"insert_rows"
:
10
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
...
...
@@ -55,18 +55,18 @@
"sample_format"
:
"csv"
,
"sample_file"
:
"./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
,
"count"
:
4000
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
1
}],
"columns"
:
[{
"type"
:
"INT"
,
"count"
:
1000
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
20
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
1
}]
},{
"name"
:
"stb_new"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
1
0
,
"childtable_count"
:
1
,
"childtable_prefix"
:
"stb_new_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
5
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10
0
,
"insert_rows"
:
10
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
...
...
@@ -80,18 +80,18 @@
"sample_format"
:
"csv"
,
"sample_file"
:
"./tools/taosdemoAllTest/sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"
DOUBLE"
,
"count"
:
102
0
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
1
}]
"columns"
:
[{
"type"
:
"
INT"
,
"count"
:
4000
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
9
0
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
3
}]
},{
"name"
:
"stb_
int
"
,
"name"
:
"stb_
mix
"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
1
0
,
"childtable_prefix"
:
"stb_
int
_"
,
"childtable_count"
:
1
,
"childtable_prefix"
:
"stb_
mix
_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
5
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10
0
,
"insert_rows"
:
10
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
...
...
@@ -105,8 +105,33 @@
"sample_format"
:
"csv"
,
"sample_file"
:
"./tools/taosdemoAllTest/sample.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"int"
,
"count"
:
1020
}],
"tags"
:
[{
"type"
:
"TINYINT"
,
"count"
:
2
},
{
"type"
:
"BINARY"
,
"len"
:
16
,
"count"
:
1
}]
"columns"
:
[{
"type"
:
"INT"
,
"count"
:
500
},{
"type"
:
"SMALLINT"
,
"count"
:
500
},{
"type"
:
"TINYINT"
,
"count"
:
500
},{
"type"
:
"DOUBLE"
,
"count"
:
500
},{
"type"
:
"FLOAT"
,
"count"
:
500
},{
"type"
:
"BOOL"
,
"count"
:
500
},{
"type"
:
"BIGINT"
,
"count"
:
500
},{
"type"
:
"NCHAR"
,
"len"
:
20
,
"count"
:
300
},{
"type"
:
"BINARY"
,
"len"
:
34
,
"count"
:
290
},{
"type"
:
"BINARY"
,
"len"
:
101
,
"count"
:
1
}],
"tags"
:
[{
"type"
:
"INT"
,
"count"
:
3
},
{
"type"
:
"NCHAR"
,
"len"
:
10
,
"count"
:
1
}]
},{
"name"
:
"stb_excel"
,
"child_table_exists"
:
"no"
,
"childtable_count"
:
1
,
"childtable_prefix"
:
"stb_excel_"
,
"auto_create_table"
:
"no"
,
"batch_create_tbl_num"
:
5
,
"data_source"
:
"sample"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10
,
"childtable_limit"
:
0
,
"childtable_offset"
:
0
,
"multi_thread_write_one_tbl"
:
"no"
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1024000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
1
,
"start_timestamp"
:
"2020-10-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"sample_file"
:
"./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv"
,
"tags_file"
:
""
,
"columns"
:
[{
"type"
:
"INT"
,
"count"
:
500
},{
"type"
:
"SMALLINT"
,
"count"
:
500
},{
"type"
:
"SMALLINT"
,
"count"
:
500
},{
"type"
:
"DOUBLE"
,
"count"
:
500
},{
"type"
:
"FLOAT"
,
"count"
:
500
},{
"type"
:
"BOOL"
,
"count"
:
500
},{
"type"
:
"BIGINT"
,
"count"
:
500
},{
"type"
:
"NCHAR"
,
"len"
:
19
,
"count"
:
300
},{
"type"
:
"BINARY"
,
"len"
:
34
,
"count"
:
290
},{
"type"
:
"BINARY"
,
"len"
:
101
,
"count"
:
1
}],
"tags"
:
[{
"type"
:
"INT"
,
"count"
:
3
},
{
"type"
:
"NCHAR"
,
"len"
:
10
,
"count"
:
1
}]
}]
}]
}
tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py
浏览文件 @
1017a803
...
...
@@ -13,6 +13,7 @@
import
sys
import
os
import
time
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
...
...
@@ -23,7 +24,7 @@ class TDTestCase:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
...
...
@@ -39,7 +40,7 @@ class TDTestCase:
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
return
buildPath
def
run
(
self
):
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
):
...
...
@@ -48,86 +49,124 @@ class TDTestCase:
tdLog
.
info
(
"taosd found in %s"
%
buildPath
)
binPath
=
buildPath
+
"/build/bin/"
# insert: create one or mutiple tables per sql and insert multiple rows per sql
#-N:regular table -d:database name -t:table num -n:rows num per table -l:col num -y:force
#regular old && new
startTime
=
time
.
time
()
os
.
system
(
"%staosdemo -N -d regular_old -t 1 -n 10 -l 1023 -y"
%
binPath
)
tdSql
.
execute
(
"use regular_old"
)
tdSql
.
query
(
"show tables;"
)
tdSql
.
checkRows
(
1
)
tdSql
.
query
(
"select * from d0;"
)
tdSql
.
checkCols
(
1024
)
tdSql
.
query
(
"describe d0;"
)
tdSql
.
checkRows
(
1024
)
os
.
system
(
"%staosdemo -N -d regular_new -t 1 -n 10 -l 4095 -y"
%
binPath
)
tdSql
.
execute
(
"use regular_new"
)
tdSql
.
query
(
"show tables;"
)
tdSql
.
checkRows
(
1
)
tdSql
.
query
(
"select * from d0;"
)
tdSql
.
checkCols
(
4096
)
tdSql
.
query
(
"describe d0;"
)
tdSql
.
checkRows
(
4096
)
#super table -d:database name -t:table num -n:rows num per table -l:col num -y:force
os
.
system
(
"%staosdemo -d super_old -t 1 -n 10 -l 1021 -y"
%
binPath
)
tdSql
.
execute
(
"use super_old"
)
tdSql
.
query
(
"show tables;"
)
tdSql
.
checkRows
(
1
)
tdSql
.
query
(
"select * from meters;"
)
tdSql
.
checkCols
(
1024
)
tdSql
.
query
(
"select * from d0;"
)
tdSql
.
checkCols
(
1022
)
tdSql
.
query
(
"describe meters;"
)
tdSql
.
checkRows
(
1024
)
tdSql
.
query
(
"describe d0;"
)
tdSql
.
checkRows
(
1024
)
os
.
system
(
"%staosdemo -d super_new -t 1 -n 10 -l 4093 -y"
%
binPath
)
tdSql
.
execute
(
"use super_new"
)
tdSql
.
query
(
"show tables;"
)
tdSql
.
checkRows
(
1
)
tdSql
.
query
(
"select * from meters;"
)
tdSql
.
checkCols
(
4096
)
tdSql
.
query
(
"select * from d0;"
)
tdSql
.
checkCols
(
4094
)
tdSql
.
query
(
"describe meters;"
)
tdSql
.
checkRows
(
4096
)
tdSql
.
query
(
"describe d0;"
)
tdSql
.
checkRows
(
4096
)
tdSql
.
execute
(
"create table stb_new1_1 using meters tags(1,2)"
)
tdSql
.
query
(
"select * from stb_new1_1"
)
tdSql
.
checkCols
(
4094
)
tdSql
.
query
(
"describe stb_new1_1;"
)
tdSql
.
checkRows
(
4096
)
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# test case for https://jira.taosdata.com:18080/browse/TD-5213
os
.
system
(
"%staosdemo -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y "
%
binPath
)
tdSql
.
execute
(
"use
db
"
)
tdSql
.
execute
(
"use
json
"
)
tdSql
.
query
(
"select count (tbname) from stb_old"
)
tdSql
.
checkData
(
0
,
0
,
10
)
# tdSql.query("select * from stb_old")
# tdSql.checkRows(10)
# tdSql.checkCols(1024)
# tdSql.query("select count (tbname) from stb_new")
# tdSql.checkData(0, 0, 10)
# tdSql.query("select * from stb_new")
# tdSql.checkRows(10)
# tdSql.checkCols(4096)
# tdLog.info("stop dnode to commit data to disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
#regular table
sql
=
"create table tb(ts timestamp, "
for
i
in
range
(
1022
):
sql
+=
"c%d binary(14), "
%
(
i
+
1
)
sql
+=
"c1023 binary(22))"
tdSql
.
execute
(
sql
)
for
i
in
range
(
4
):
sql
=
"insert into tb values(%d, "
for
j
in
range
(
1022
):
str
=
"'%s', "
%
self
.
get_random_string
(
14
)
sql
+=
str
sql
+=
"'%s')"
%
self
.
get_random_string
(
22
)
tdSql
.
execute
(
sql
%
(
self
.
ts
+
i
))
time
.
sleep
(
10
)
tdSql
.
query
(
"select count(*) from tb"
)
tdSql
.
checkData
(
0
,
0
,
4
)
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
time
.
sleep
(
1
)
tdSql
.
query
(
"select count(*) from tb"
)
tdSql
.
checkData
(
0
,
0
,
4
)
sql
=
"create table tb1(ts timestamp, "
for
i
in
range
(
4094
):
sql
+=
"c%d binary(14), "
%
(
i
+
1
)
sql
+=
"c4095 binary(22))"
tdSql
.
execute
(
sql
)
for
i
in
range
(
4
):
sql
=
"insert into tb1 values(%d, "
for
j
in
range
(
4094
):
str
=
"'%s', "
%
self
.
get_random_string
(
14
)
sql
+=
str
sql
+=
"'%s')"
%
self
.
get_random_string
(
22
)
tdSql
.
execute
(
sql
%
(
self
.
ts
+
i
))
time
.
sleep
(
10
)
tdSql
.
query
(
"select count(*) from tb1"
)
tdSql
.
checkData
(
0
,
0
,
4
)
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
time
.
sleep
(
1
)
tdSql
.
query
(
"select count(*) from tb1"
)
tdSql
.
checkData
(
0
,
0
,
4
)
#os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql")
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
query
(
"select * from stb_old"
)
tdSql
.
checkRows
(
10
)
tdSql
.
checkCols
(
1024
)
tdSql
.
query
(
"select count (tbname) from stb_new"
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
query
(
"select * from stb_new"
)
tdSql
.
checkRows
(
10
)
tdSql
.
checkCols
(
4096
)
tdSql
.
query
(
"describe stb_new;"
)
tdSql
.
checkRows
(
4096
)
tdSql
.
query
(
"select * from stb_new_0"
)
tdSql
.
checkRows
(
10
)
tdSql
.
checkCols
(
4091
)
tdSql
.
query
(
"describe stb_new_0;"
)
tdSql
.
checkRows
(
4096
)
tdSql
.
execute
(
"create table stb_new1_1 using stb_new tags(1,2,3,4,5)"
)
tdSql
.
query
(
"select * from stb_new1_1"
)
tdSql
.
checkCols
(
4091
)
tdSql
.
query
(
"describe stb_new1_1;"
)
tdSql
.
checkRows
(
4096
)
tdSql
.
query
(
"select count (tbname) from stb_mix"
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
query
(
"select * from stb_mix"
)
tdSql
.
checkRows
(
10
)
tdSql
.
checkCols
(
4096
)
tdSql
.
query
(
"describe stb_mix;"
)
tdSql
.
checkRows
(
4096
)
tdSql
.
query
(
"select * from stb_mix_0"
)
tdSql
.
checkRows
(
10
)
tdSql
.
checkCols
(
4092
)
tdSql
.
query
(
"describe stb_mix_0;"
)
tdSql
.
checkRows
(
4096
)
tdSql
.
query
(
"select count (tbname) from stb_excel"
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
query
(
"select * from stb_excel"
)
tdSql
.
checkRows
(
10
)
tdSql
.
checkCols
(
4096
)
tdSql
.
query
(
"describe stb_excel;"
)
tdSql
.
checkRows
(
4096
)
tdSql
.
query
(
"select * from stb_excel_0"
)
tdSql
.
checkRows
(
10
)
tdSql
.
checkCols
(
4092
)
tdSql
.
query
(
"describe stb_excel_0;"
)
tdSql
.
checkRows
(
4096
)
endTime
=
time
.
time
()
print
(
"total time %ds"
%
(
endTime
-
startTime
))
os
.
system
(
"rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql"
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
...
...
tests/pytest/tools/taosdumpTestNanoSupport.py
0 → 100644
浏览文件 @
1017a803
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
os
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
*
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
self
.
ts
=
1625068800000000000
# this is timestamp "2021-07-01 00:00:00"
self
.
numberOfTables
=
10
self
.
numberOfRecords
=
100
def
checkCommunity
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
return
False
else
:
return
True
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosdump"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
return
buildPath
def
createdb
(
self
,
precision
=
"ns"
):
tb_nums
=
self
.
numberOfTables
per_tb_rows
=
self
.
numberOfRecords
def
build_db
(
precision
,
start_time
):
tdSql
.
execute
(
"drop database if exists timedb1"
)
tdSql
.
execute
(
"create database timedb1 days 10 keep 365 blocks 8 precision "
+
"
\"
"
+
precision
+
"
\"
"
)
tdSql
.
execute
(
"use timedb1"
)
tdSql
.
execute
(
"create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))"
)
for
tb
in
range
(
tb_nums
):
tbname
=
"t"
+
str
(
tb
)
tdSql
.
execute
(
"create table "
+
tbname
+
" using st tags(1, 'beijing')"
)
sql
=
"insert into "
+
tbname
+
" values"
currts
=
start_time
if
precision
==
"ns"
:
ts_seed
=
1000000000
elif
precision
==
"us"
:
ts_seed
=
1000000
else
:
ts_seed
=
1000
for
i
in
range
(
per_tb_rows
):
sql
+=
"(%d, %d, 'nchar%d',%d)"
%
(
currts
+
i
*
ts_seed
,
i
%
100
,
i
%
100
,
currts
+
i
*
100
)
# currts +1000ms (1000000000ns)
tdSql
.
execute
(
sql
)
if
precision
==
"ns"
:
start_time
=
1625068800000000000
build_db
(
precision
,
start_time
)
elif
precision
==
"us"
:
start_time
=
1625068800000000
build_db
(
precision
,
start_time
)
elif
precision
==
"ms"
:
start_time
=
1625068800000
build_db
(
precision
,
start_time
)
else
:
print
(
"other time precision not valid , please check! "
)
def
run
(
self
):
# clear envs
os
.
system
(
"rm -rf ./taosdumptest/"
)
tdSql
.
execute
(
"drop database if exists dumptmp1"
)
tdSql
.
execute
(
"drop database if exists dumptmp2"
)
tdSql
.
execute
(
"drop database if exists dumptmp3"
)
if
not
os
.
path
.
exists
(
"./taosdumptest/tmp1"
):
os
.
makedirs
(
"./taosdumptest/dumptmp1"
)
else
:
print
(
"path exist!"
)
if
not
os
.
path
.
exists
(
"./taosdumptest/dumptmp2"
):
os
.
makedirs
(
"./taosdumptest/dumptmp2"
)
if
not
os
.
path
.
exists
(
"./taosdumptest/dumptmp3"
):
os
.
makedirs
(
"./taosdumptest/dumptmp3"
)
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
):
tdLog
.
exit
(
"taosdump not found!"
)
else
:
tdLog
.
info
(
"taosdump found in %s"
%
buildPath
)
binPath
=
buildPath
+
"/build/bin/"
# create nano second database
self
.
createdb
(
precision
=
"ns"
)
# dump all data
os
.
system
(
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1"
%
binPath
)
# dump part data with -S -E
os
.
system
(
'%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -C ns -o ./taosdumptest/dumptmp2 '
%
binPath
)
os
.
system
(
'%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 '
%
binPath
)
# replace strings to dump in databases
os
.
system
(
"sed -i
\"
s/timedb1/dumptmp1/g
\"
`grep timedb1 -rl ./taosdumptest/dumptmp1`"
)
os
.
system
(
"sed -i
\"
s/timedb1/dumptmp2/g
\"
`grep timedb1 -rl ./taosdumptest/dumptmp2`"
)
os
.
system
(
"sed -i
\"
s/timedb1/dumptmp3/g
\"
`grep timedb1 -rl ./taosdumptest/dumptmp3`"
)
os
.
system
(
"%staosdump -i ./taosdumptest/dumptmp1"
%
binPath
)
os
.
system
(
"%staosdump -i ./taosdumptest/dumptmp2"
%
binPath
)
os
.
system
(
"%staosdump -i ./taosdumptest/dumptmp3"
%
binPath
)
# dump data and check for taosdump
tdSql
.
query
(
"select count(*) from dumptmp1.st"
)
tdSql
.
checkData
(
0
,
0
,
1000
)
tdSql
.
query
(
"select count(*) from dumptmp2.st"
)
tdSql
.
checkData
(
0
,
0
,
510
)
tdSql
.
query
(
"select count(*) from dumptmp3.st"
)
tdSql
.
checkData
(
0
,
0
,
900
)
# check data
origin_res
=
tdSql
.
getResult
(
"select * from timedb1.st"
)
dump_res
=
tdSql
.
getResult
(
"select * from dumptmp1.st"
)
if
origin_res
==
dump_res
:
tdLog
.
info
(
"test nano second : dump check data pass for all data!"
)
else
:
tdLog
.
info
(
"test nano second : dump check data failed for all data!"
)
origin_res
=
tdSql
.
getResult
(
"select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000"
)
dump_res
=
tdSql
.
getResult
(
"select * from dumptmp2.st"
)
if
origin_res
==
dump_res
:
tdLog
.
info
(
" test nano second : dump check data pass for data! "
)
else
:
tdLog
.
info
(
" test nano second : dump check data failed for data !"
)
origin_res
=
tdSql
.
getResult
(
"select * from timedb1.st where ts >=1625068810000000000 "
)
dump_res
=
tdSql
.
getResult
(
"select * from dumptmp3.st"
)
if
origin_res
==
dump_res
:
tdLog
.
info
(
" test nano second : dump check data pass for data! "
)
else
:
tdLog
.
info
(
" test nano second : dump check data failed for data !"
)
# us second support test case
os
.
system
(
"rm -rf ./taosdumptest/"
)
tdSql
.
execute
(
"drop database if exists dumptmp1"
)
tdSql
.
execute
(
"drop database if exists dumptmp2"
)
tdSql
.
execute
(
"drop database if exists dumptmp3"
)
if
not
os
.
path
.
exists
(
"./taosdumptest/tmp1"
):
os
.
makedirs
(
"./taosdumptest/dumptmp1"
)
else
:
print
(
"path exits!"
)
if
not
os
.
path
.
exists
(
"./taosdumptest/dumptmp2"
):
os
.
makedirs
(
"./taosdumptest/dumptmp2"
)
if
not
os
.
path
.
exists
(
"./taosdumptest/dumptmp3"
):
os
.
makedirs
(
"./taosdumptest/dumptmp3"
)
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
):
tdLog
.
exit
(
"taosdump not found!"
)
else
:
tdLog
.
info
(
"taosdump found in %s"
%
buildPath
)
binPath
=
buildPath
+
"/build/bin/"
self
.
createdb
(
precision
=
"us"
)
os
.
system
(
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1"
%
binPath
)
os
.
system
(
'%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -C us -o ./taosdumptest/dumptmp2 '
%
binPath
)
os
.
system
(
'%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 '
%
binPath
)
os
.
system
(
"sed -i
\"
s/timedb1/dumptmp1/g
\"
`grep timedb1 -rl ./taosdumptest/dumptmp1`"
)
os
.
system
(
"sed -i
\"
s/timedb1/dumptmp2/g
\"
`grep timedb1 -rl ./taosdumptest/dumptmp2`"
)
os
.
system
(
"sed -i
\"
s/timedb1/dumptmp3/g
\"
`grep timedb1 -rl ./taosdumptest/dumptmp3`"
)
os
.
system
(
"%staosdump -i ./taosdumptest/dumptmp1"
%
binPath
)
os
.
system
(
"%staosdump -i ./taosdumptest/dumptmp2"
%
binPath
)
os
.
system
(
"%staosdump -i ./taosdumptest/dumptmp3"
%
binPath
)
tdSql
.
query
(
"select count(*) from dumptmp1.st"
)
tdSql
.
checkData
(
0
,
0
,
1000
)
tdSql
.
query
(
"select count(*) from dumptmp2.st"
)
tdSql
.
checkData
(
0
,
0
,
510
)
tdSql
.
query
(
"select count(*) from dumptmp3.st"
)
tdSql
.
checkData
(
0
,
0
,
900
)
origin_res
=
tdSql
.
getResult
(
"select * from timedb1.st"
)
dump_res
=
tdSql
.
getResult
(
"select * from dumptmp1.st"
)
if
origin_res
==
dump_res
:
tdLog
.
info
(
"test us second : dump check data pass for all data!"
)
else
:
tdLog
.
info
(
"test us second : dump check data failed for all data!"
)
origin_res
=
tdSql
.
getResult
(
"select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000"
)
dump_res
=
tdSql
.
getResult
(
"select * from dumptmp2.st"
)
if
origin_res
==
dump_res
:
tdLog
.
info
(
" test us second : dump check data pass for data! "
)
else
:
tdLog
.
info
(
" test us second : dump check data failed for data!"
)
origin_res
=
tdSql
.
getResult
(
"select * from timedb1.st where ts >=1625068810000000 "
)
dump_res
=
tdSql
.
getResult
(
"select * from dumptmp3.st"
)
if
origin_res
==
dump_res
:
tdLog
.
info
(
" test us second : dump check data pass for data! "
)
else
:
tdLog
.
info
(
" test us second : dump check data failed for data! "
)
# ms second support test case
os
.
system
(
"rm -rf ./taosdumptest/"
)
tdSql
.
execute
(
"drop database if exists dumptmp1"
)
tdSql
.
execute
(
"drop database if exists dumptmp2"
)
tdSql
.
execute
(
"drop database if exists dumptmp3"
)
if
not
os
.
path
.
exists
(
"./taosdumptest/tmp1"
):
os
.
makedirs
(
"./taosdumptest/dumptmp1"
)
else
:
print
(
"path exits!"
)
if
not
os
.
path
.
exists
(
"./taosdumptest/dumptmp2"
):
os
.
makedirs
(
"./taosdumptest/dumptmp2"
)
if
not
os
.
path
.
exists
(
"./taosdumptest/dumptmp3"
):
os
.
makedirs
(
"./taosdumptest/dumptmp3"
)
buildPath
=
self
.
getBuildPath
()
if
(
buildPath
==
""
):
tdLog
.
exit
(
"taosdump not found!"
)
else
:
tdLog
.
info
(
"taosdump found in %s"
%
buildPath
)
binPath
=
buildPath
+
"/build/bin/"
self
.
createdb
(
precision
=
"ms"
)
os
.
system
(
"%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1"
%
binPath
)
os
.
system
(
'%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -C ms -o ./taosdumptest/dumptmp2 '
%
binPath
)
os
.
system
(
'%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 '
%
binPath
)
os
.
system
(
"sed -i
\"
s/timedb1/dumptmp1/g
\"
`grep timedb1 -rl ./taosdumptest/dumptmp1`"
)
os
.
system
(
"sed -i
\"
s/timedb1/dumptmp2/g
\"
`grep timedb1 -rl ./taosdumptest/dumptmp2`"
)
os
.
system
(
"sed -i
\"
s/timedb1/dumptmp3/g
\"
`grep timedb1 -rl ./taosdumptest/dumptmp3`"
)
os
.
system
(
"%staosdump -i ./taosdumptest/dumptmp1"
%
binPath
)
os
.
system
(
"%staosdump -i ./taosdumptest/dumptmp2"
%
binPath
)
os
.
system
(
"%staosdump -i ./taosdumptest/dumptmp3"
%
binPath
)
tdSql
.
query
(
"select count(*) from dumptmp1.st"
)
tdSql
.
checkData
(
0
,
0
,
1000
)
tdSql
.
query
(
"select count(*) from dumptmp2.st"
)
tdSql
.
checkData
(
0
,
0
,
510
)
tdSql
.
query
(
"select count(*) from dumptmp3.st"
)
tdSql
.
checkData
(
0
,
0
,
900
)
origin_res
=
tdSql
.
getResult
(
"select * from timedb1.st"
)
dump_res
=
tdSql
.
getResult
(
"select * from dumptmp1.st"
)
if
origin_res
==
dump_res
:
tdLog
.
info
(
"test ms second : dump check data pass for all data!"
)
else
:
tdLog
.
info
(
"test ms second : dump check data failed for all data!"
)
origin_res
=
tdSql
.
getResult
(
"select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000"
)
dump_res
=
tdSql
.
getResult
(
"select * from dumptmp2.st"
)
if
origin_res
==
dump_res
:
tdLog
.
info
(
" test ms second : dump check data pass for data! "
)
else
:
tdLog
.
info
(
" test ms second : dump check data failed for data!"
)
origin_res
=
tdSql
.
getResult
(
"select * from timedb1.st where ts >=1625068810000 "
)
dump_res
=
tdSql
.
getResult
(
"select * from dumptmp3.st"
)
if
origin_res
==
dump_res
:
tdLog
.
info
(
" test ms second : dump check data pass for data! "
)
else
:
tdLog
.
info
(
" test ms second : dump check data failed for data! "
)
os
.
system
(
"rm -rf ./taosdumptest/"
)
os
.
system
(
"rm -rf ./dump_result.txt"
)
os
.
system
(
"rm -rf *.py.sql"
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/util/sql.py
浏览文件 @
1017a803
...
...
@@ -197,6 +197,19 @@ class TDSql:
self
.
checkRowCol
(
row
,
col
)
return
self
.
queryResult
[
row
][
col
]
def
getResult
(
self
,
sql
):
self
.
sql
=
sql
try
:
self
.
cursor
.
execute
(
sql
)
self
.
queryResult
=
self
.
cursor
.
fetchall
()
except
Exception
as
e
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
repr
(
e
))
tdLog
.
notice
(
"%s(%d) failed: sql:%s, %s"
%
args
)
raise
Exception
(
repr
(
e
))
return
self
.
queryResult
def
executeTimes
(
self
,
sql
,
times
):
for
i
in
range
(
times
):
try
:
...
...
@@ -282,4 +295,4 @@ class TDSql:
tdLog
.
info
(
"dir: %s is created"
%
dir
)
pass
tdSql
=
TDSql
()
tdSql
=
TDSql
()
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录