Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
f52ad00d
TDengine
项目概览
taosdata
/
TDengine
大约 1 年 前同步成功
通知
1184
Star
22015
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
f52ad00d
编写于
7月 20, 2022
作者:
S
Shengliang Guan
浏览文件
操作
浏览文件
下载
差异文件
Merge branch '3.0' into fix/mnode
上级
e9c408df
5a0668da
变更
21
隐藏空白更改
内联
并排
Showing
21 changed file
with
439 addition
and
224 deletion
+439
-224
source/client/src/clientSml.c
source/client/src/clientSml.c
+1
-1
source/dnode/mnode/impl/src/mndSync.c
source/dnode/mnode/impl/src/mndSync.c
+4
-0
source/dnode/vnode/src/tsdb/tsdbRead.c
source/dnode/vnode/src/tsdb/tsdbRead.c
+0
-1
source/libs/function/src/builtinsimpl.c
source/libs/function/src/builtinsimpl.c
+53
-16
source/libs/sync/src/syncElection.c
source/libs/sync/src/syncElection.c
+9
-4
source/libs/sync/src/syncMain.c
source/libs/sync/src/syncMain.c
+15
-4
source/libs/sync/src/syncRequestVote.c
source/libs/sync/src/syncRequestVote.c
+7
-9
source/libs/sync/src/syncRequestVoteReply.c
source/libs/sync/src/syncRequestVoteReply.c
+74
-33
source/libs/sync/src/syncSnapshot.c
source/libs/sync/src/syncSnapshot.c
+6
-0
source/libs/transport/inc/transComm.h
source/libs/transport/inc/transComm.h
+1
-1
source/libs/transport/src/.transComm.c.swo
source/libs/transport/src/.transComm.c.swo
+0
-0
source/libs/transport/src/transCli.c
source/libs/transport/src/transCli.c
+7
-3
source/libs/transport/src/transComm.c
source/libs/transport/src/transComm.c
+6
-1
tests/script/tsim/valgrind/checkError3.sim
tests/script/tsim/valgrind/checkError3.sim
+1
-1
tests/system-test/1-insert/alter_table.py
tests/system-test/1-insert/alter_table.py
+6
-6
tests/system-test/1-insert/delete_data.py
tests/system-test/1-insert/delete_data.py
+125
-36
tests/system-test/2-query/max_partition.py
tests/system-test/2-query/max_partition.py
+27
-27
tests/system-test/2-query/twa.py
tests/system-test/2-query/twa.py
+12
-12
tests/system-test/7-tmq/tmqAutoCreateTbl.py
tests/system-test/7-tmq/tmqAutoCreateTbl.py
+25
-13
tests/system-test/7-tmq/tmqDnodeRestart.py
tests/system-test/7-tmq/tmqDnodeRestart.py
+58
-54
tests/system-test/fulltest.sh
tests/system-test/fulltest.sh
+2
-2
未找到文件。
source/client/src/clientSml.c
浏览文件 @
f52ad00d
...
...
@@ -2210,7 +2210,7 @@ static int32_t smlParseTelnetLine(SSmlHandle *info, void *data) {
(
SSmlSTableMeta
**
)
taosHashGet
(
info
->
superTables
,
(
*
oneTable
)
->
sTableName
,
(
*
oneTable
)
->
sTableNameLen
);
if
(
tableMeta
)
{
// update meta
ret
=
smlUpdateMeta
((
*
tableMeta
)
->
colHash
,
(
*
tableMeta
)
->
cols
,
cols
,
&
info
->
msgBuf
);
if
(
!
hasTable
&&
ret
)
{
if
(
!
hasTable
&&
ret
==
TSDB_CODE_SUCCESS
)
{
ret
=
smlUpdateMeta
((
*
tableMeta
)
->
tagHash
,
(
*
tableMeta
)
->
tags
,
(
*
oneTable
)
->
tags
,
&
info
->
msgBuf
);
}
if
(
ret
!=
TSDB_CODE_SUCCESS
)
{
...
...
source/dnode/mnode/impl/src/mndSync.c
浏览文件 @
f52ad00d
...
...
@@ -44,6 +44,10 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
SSyncMgmt
*
pMgmt
=
&
pMnode
->
syncMgmt
;
SSdbRaw
*
pRaw
=
pMsg
->
pCont
;
// delete msg handle
SRpcMsg
rpcMsg
=
{
0
};
syncGetAndDelRespRpc
(
pMnode
->
syncMgmt
.
sync
,
cbMeta
.
seqNum
,
&
rpcMsg
.
info
);
int32_t
transId
=
sdbGetIdFromRaw
(
pMnode
->
pSdb
,
pRaw
);
pMgmt
->
errCode
=
cbMeta
.
code
;
mDebug
(
"trans:%d, is proposed, saved:%d code:0x%x, apply index:%"
PRId64
" term:%"
PRIu64
" config:%"
PRId64
...
...
source/dnode/vnode/src/tsdb/tsdbRead.c
浏览文件 @
f52ad00d
...
...
@@ -2807,7 +2807,6 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
if
(
pCond
->
suid
!=
0
)
{
(
*
ppReader
)
->
pSchema
=
metaGetTbTSchema
((
*
ppReader
)
->
pTsdb
->
pVnode
->
pMeta
,
(
*
ppReader
)
->
suid
,
-
1
);
// ASSERT((*ppReader)->pSchema);
}
else
if
(
taosArrayGetSize
(
pTableList
)
>
0
)
{
STableKeyInfo
*
pKey
=
taosArrayGet
(
pTableList
,
0
);
(
*
ppReader
)
->
pSchema
=
metaGetTbTSchema
((
*
ppReader
)
->
pTsdb
->
pVnode
->
pMeta
,
pKey
->
uid
,
-
1
);
...
...
source/libs/function/src/builtinsimpl.c
浏览文件 @
f52ad00d
...
...
@@ -165,6 +165,7 @@ typedef struct SElapsedInfo {
typedef
struct
STwaInfo
{
double
dOutput
;
bool
isNull
;
SPoint1
p
;
STimeWindow
win
;
}
STwaInfo
;
...
...
@@ -5181,8 +5182,9 @@ bool twaFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) {
}
STwaInfo
*
pInfo
=
GET_ROWCELL_INTERBUF
(
GET_RES_INFO
(
pCtx
));
pInfo
->
p
.
key
=
INT64_MIN
;
pInfo
->
win
=
TSWINDOW_INITIALIZER
;
pInfo
->
isNull
=
false
;
pInfo
->
p
.
key
=
INT64_MIN
;
pInfo
->
win
=
TSWINDOW_INITIALIZER
;
return
true
;
}
...
...
@@ -5208,27 +5210,47 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
SPoint1
*
last
=
&
pInfo
->
p
;
int32_t
numOfElems
=
0
;
if
(
IS_NULL_TYPE
(
pInputCol
->
info
.
type
))
{
pInfo
->
isNull
=
true
;
goto
_twa_over
;
}
int32_t
i
=
pInput
->
startRowIndex
;
if
(
pCtx
->
start
.
key
!=
INT64_MIN
)
{
ASSERT
((
pCtx
->
start
.
key
<
tsList
[
i
]
&&
pCtx
->
order
==
TSDB_ORDER_ASC
)
||
(
pCtx
->
start
.
key
>
tsList
[
i
]
&&
pCtx
->
order
==
TSDB_ORDER_DESC
));
ASSERT
(
last
->
key
==
INT64_MIN
);
last
->
key
=
tsList
[
i
];
for
(;
i
<
pInput
->
numOfRows
+
pInput
->
startRowIndex
;
++
i
)
{
if
(
colDataIsNull_f
(
pInputCol
->
nullbitmap
,
i
))
{
continue
;
}
GET_TYPED_DATA
(
last
->
val
,
double
,
pInputCol
->
info
.
type
,
colDataGetData
(
pInputCol
,
i
))
;
last
->
key
=
tsList
[
i
]
;
pInfo
->
dOutput
+=
twa_get_area
(
pCtx
->
start
,
*
last
);
pInfo
->
win
.
skey
=
pCtx
->
start
.
key
;
numOfElems
++
;
i
+=
1
;
GET_TYPED_DATA
(
last
->
val
,
double
,
pInputCol
->
info
.
type
,
colDataGetData
(
pInputCol
,
i
));
pInfo
->
dOutput
+=
twa_get_area
(
pCtx
->
start
,
*
last
);
pInfo
->
win
.
skey
=
pCtx
->
start
.
key
;
numOfElems
++
;
i
+=
1
;
break
;
}
}
else
if
(
pInfo
->
p
.
key
==
INT64_MIN
)
{
last
->
key
=
tsList
[
i
];
GET_TYPED_DATA
(
last
->
val
,
double
,
pInputCol
->
info
.
type
,
colDataGetData
(
pInputCol
,
i
));
for
(;
i
<
pInput
->
numOfRows
+
pInput
->
startRowIndex
;
++
i
)
{
if
(
colDataIsNull_f
(
pInputCol
->
nullbitmap
,
i
))
{
continue
;
}
pInfo
->
win
.
skey
=
last
->
key
;
numOfElems
++
;
i
+=
1
;
last
->
key
=
tsList
[
i
];
GET_TYPED_DATA
(
last
->
val
,
double
,
pInputCol
->
info
.
type
,
colDataGetData
(
pInputCol
,
i
));
pInfo
->
win
.
skey
=
last
->
key
;
numOfElems
++
;
i
+=
1
;
break
;
}
}
SPoint1
st
=
{
0
};
...
...
@@ -5241,6 +5263,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if
(
colDataIsNull_f
(
pInputCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElems
++
;
INIT_INTP_POINT
(
st
,
tsList
[
i
],
val
[
i
]);
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
);
...
...
@@ -5255,6 +5278,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if
(
colDataIsNull_f
(
pInputCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElems
++
;
INIT_INTP_POINT
(
st
,
tsList
[
i
],
val
[
i
]);
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
);
...
...
@@ -5268,6 +5292,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if
(
colDataIsNull_f
(
pInputCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElems
++
;
INIT_INTP_POINT
(
st
,
tsList
[
i
],
val
[
i
]);
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
);
...
...
@@ -5281,6 +5306,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if
(
colDataIsNull_f
(
pInputCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElems
++
;
INIT_INTP_POINT
(
st
,
tsList
[
i
],
val
[
i
]);
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
);
...
...
@@ -5294,6 +5320,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if
(
colDataIsNull_f
(
pInputCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElems
++
;
INIT_INTP_POINT
(
st
,
tsList
[
i
],
val
[
i
]);
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
);
...
...
@@ -5307,6 +5334,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if
(
colDataIsNull_f
(
pInputCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElems
++
;
INIT_INTP_POINT
(
st
,
tsList
[
i
],
val
[
i
]);
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
);
...
...
@@ -5320,6 +5348,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if
(
colDataIsNull_f
(
pInputCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElems
++
;
INIT_INTP_POINT
(
st
,
tsList
[
i
],
val
[
i
]);
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
);
...
...
@@ -5333,6 +5362,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if
(
colDataIsNull_f
(
pInputCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElems
++
;
INIT_INTP_POINT
(
st
,
tsList
[
i
],
val
[
i
]);
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
);
...
...
@@ -5346,6 +5376,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if
(
colDataIsNull_f
(
pInputCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElems
++
;
INIT_INTP_POINT
(
st
,
tsList
[
i
],
val
[
i
]);
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
);
...
...
@@ -5359,6 +5390,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if
(
colDataIsNull_f
(
pInputCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElems
++
;
INIT_INTP_POINT
(
st
,
tsList
[
i
],
val
[
i
]);
pInfo
->
dOutput
+=
twa_get_area
(
pInfo
->
p
,
st
);
...
...
@@ -5379,7 +5411,12 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
pInfo
->
win
.
ekey
=
pInfo
->
p
.
key
;
SET_VAL
(
pResInfo
,
numOfElems
,
1
);
_twa_over:
if
(
numOfElems
==
0
)
{
pInfo
->
isNull
=
true
;
}
SET_VAL
(
pResInfo
,
1
,
1
);
return
TSDB_CODE_SUCCESS
;
}
...
...
@@ -5400,8 +5437,8 @@ int32_t twaFinalize(struct SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo
*
pResInfo
=
GET_RES_INFO
(
pCtx
);
STwaInfo
*
pInfo
=
(
STwaInfo
*
)
GET_ROWCELL_INTERBUF
(
pResInfo
);
if
(
p
ResInfo
->
numOfRes
==
0
)
{
pResInfo
->
isNullRes
=
1
;
if
(
p
Info
->
isNull
==
true
)
{
pResInfo
->
numOfRes
=
0
;
}
else
{
if
(
pInfo
->
win
.
ekey
==
pInfo
->
win
.
skey
)
{
pInfo
->
dOutput
=
pInfo
->
p
.
val
;
...
...
source/libs/sync/src/syncElection.c
浏览文件 @
f52ad00d
...
...
@@ -71,6 +71,8 @@ int32_t syncNodeRequestVotePeersSnapshot(SSyncNode* pSyncNode) {
}
int32_t
syncNodeElect
(
SSyncNode
*
pSyncNode
)
{
syncNodeEventLog
(
pSyncNode
,
"begin election"
);
int32_t
ret
=
0
;
if
(
pSyncNode
->
state
==
TAOS_SYNC_STATE_FOLLOWER
)
{
syncNodeFollower2Candidate
(
pSyncNode
);
...
...
@@ -120,12 +122,15 @@ int32_t syncNodeRequestVote(SSyncNode* pSyncNode, const SRaftId* destRaftId, con
int32_t
ret
=
0
;
do
{
char
host
[
128
];
char
host
[
64
];
uint16_t
port
;
syncUtilU642Addr
(
destRaftId
->
addr
,
host
,
sizeof
(
host
),
&
port
);
sDebug
(
"vgId:%d, send sync-request-vote to %s:%d, {term:%"
PRIu64
", last-index:%"
PRId64
", last-term:%"
PRIu64
"}"
,
pSyncNode
->
vgId
,
host
,
port
,
pMsg
->
term
,
pMsg
->
lastLogIndex
,
pMsg
->
lastLogTerm
);
char
logBuf
[
256
];
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"send sync-request-vote to %s:%d {term:%"
PRIu64
", lindex:%"
PRId64
", lterm:%"
PRIu64
""
,
host
,
port
,
pMsg
->
term
,
pMsg
->
lastLogIndex
,
pMsg
->
lastLogTerm
);
syncNodeEventLog
(
pSyncNode
,
logBuf
);
}
while
(
0
);
SRpcMsg
rpcMsg
;
...
...
source/libs/sync/src/syncMain.c
浏览文件 @
f52ad00d
...
...
@@ -999,7 +999,18 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
// init TLA+ log vars
pSyncNode
->
pLogStore
=
logStoreCreate
(
pSyncNode
);
ASSERT
(
pSyncNode
->
pLogStore
!=
NULL
);
pSyncNode
->
commitIndex
=
SYNC_INDEX_INVALID
;
SyncIndex
commitIndex
=
SYNC_INDEX_INVALID
;
if
(
pSyncNode
->
pFsm
!=
NULL
&&
pSyncNode
->
pFsm
->
FpGetSnapshotInfo
!=
NULL
)
{
SSnapshot
snapshot
=
{
0
};
int32_t
code
=
pSyncNode
->
pFsm
->
FpGetSnapshotInfo
(
pSyncNode
->
pFsm
,
&
snapshot
);
ASSERT
(
code
==
0
);
if
(
snapshot
.
lastApplyIndex
>
commitIndex
)
{
commitIndex
=
snapshot
.
lastApplyIndex
;
syncNodeEventLog
(
pSyncNode
,
"reset commit index by snapshot"
);
}
}
pSyncNode
->
commitIndex
=
commitIndex
;
// timer ms init
pSyncNode
->
pingBaseLine
=
PING_TIMER_MS
;
...
...
@@ -2061,21 +2072,21 @@ void syncNodeFollower2Candidate(SSyncNode* pSyncNode) {
ASSERT
(
pSyncNode
->
state
==
TAOS_SYNC_STATE_FOLLOWER
);
pSyncNode
->
state
=
TAOS_SYNC_STATE_CANDIDATE
;
syncNode
Log2
(
"==state change syncNodeFollower2Candidate=="
,
pSyncNode
);
syncNode
EventLog
(
pSyncNode
,
"follower to candidate"
);
}
void
syncNodeLeader2Follower
(
SSyncNode
*
pSyncNode
)
{
ASSERT
(
pSyncNode
->
state
==
TAOS_SYNC_STATE_LEADER
);
syncNodeBecomeFollower
(
pSyncNode
,
"leader to follower"
);
syncNode
Log2
(
"==state change syncNodeLeader2Follower=="
,
pSyncNode
);
syncNode
EventLog
(
pSyncNode
,
"leader to follower"
);
}
void
syncNodeCandidate2Follower
(
SSyncNode
*
pSyncNode
)
{
ASSERT
(
pSyncNode
->
state
==
TAOS_SYNC_STATE_CANDIDATE
);
syncNodeBecomeFollower
(
pSyncNode
,
"candidate to follower"
);
syncNode
Log2
(
"==state change syncNodeCandidate2Follower=="
,
pSyncNode
);
syncNode
EventLog
(
pSyncNode
,
"candidate to follower"
);
}
// raft vote --------------
...
...
source/libs/sync/src/syncRequestVote.c
浏览文件 @
f52ad00d
...
...
@@ -45,8 +45,6 @@
int32_t
syncNodeOnRequestVoteCb
(
SSyncNode
*
ths
,
SyncRequestVote
*
pMsg
)
{
int32_t
ret
=
0
;
syncRequestVoteLog2
(
"==syncNodeOnRequestVoteCb=="
,
pMsg
);
// if already drop replica, do not process
if
(
!
syncNodeInRaftGroup
(
ths
,
&
(
pMsg
->
srcId
))
&&
!
ths
->
pRaftCfg
->
isStandBy
)
{
do
{
...
...
@@ -55,8 +53,8 @@ int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg) {
uint16_t
port
;
syncUtilU642Addr
(
pMsg
->
srcId
.
addr
,
host
,
sizeof
(
host
),
&
port
);
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv sync-request-vote from %s:%d, term:%"
PRIu64
", lindex:%"
PRId64
", lterm:%"
PRIu64
", maybe replica already dropped"
,
"recv sync-request-vote from %s:%d,
{
term:%"
PRIu64
", lindex:%"
PRId64
", lterm:%"
PRIu64
"
}
, maybe replica already dropped"
,
host
,
port
,
pMsg
->
term
,
pMsg
->
lastLogIndex
,
pMsg
->
lastLogTerm
);
syncNodeEventLog
(
ths
,
logBuf
);
}
while
(
0
);
...
...
@@ -98,8 +96,8 @@ int32_t syncNodeOnRequestVoteCb(SSyncNode* ths, SyncRequestVote* pMsg) {
uint16_t
port
;
syncUtilU642Addr
(
pMsg
->
srcId
.
addr
,
host
,
sizeof
(
host
),
&
port
);
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv sync-request-vote from %s:%d, term:%"
PRIu64
", lindex:%"
PRId64
", lterm:%"
PRIu64
", reply-grant:%d"
,
"recv sync-request-vote from %s:%d,
{
term:%"
PRIu64
", lindex:%"
PRId64
", lterm:%"
PRIu64
"
}
, reply-grant:%d"
,
host
,
port
,
pMsg
->
term
,
pMsg
->
lastLogIndex
,
pMsg
->
lastLogTerm
,
pReply
->
voteGranted
);
syncNodeEventLog
(
ths
,
logBuf
);
}
while
(
0
);
...
...
@@ -220,8 +218,8 @@ int32_t syncNodeOnRequestVoteSnapshotCb(SSyncNode* ths, SyncRequestVote* pMsg) {
uint16_t
port
;
syncUtilU642Addr
(
pMsg
->
srcId
.
addr
,
host
,
sizeof
(
host
),
&
port
);
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv sync-request-vote from %s:%d, term:%"
PRIu64
", lindex:%"
PRId64
", lterm:%"
PRIu64
", maybe replica already dropped"
,
"recv sync-request-vote from %s:%d,
{
term:%"
PRIu64
", lindex:%"
PRId64
", lterm:%"
PRIu64
"
}
, maybe replica already dropped"
,
host
,
port
,
pMsg
->
term
,
pMsg
->
lastLogIndex
,
pMsg
->
lastLogTerm
);
syncNodeEventLog
(
ths
,
logBuf
);
}
while
(
0
);
...
...
@@ -262,7 +260,7 @@ int32_t syncNodeOnRequestVoteSnapshotCb(SSyncNode* ths, SyncRequestVote* pMsg) {
syncUtilU642Addr
(
pMsg
->
srcId
.
addr
,
host
,
sizeof
(
host
),
&
port
);
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv sync-request-vote from %s:%d, {term:%"
PRIu64
", lindex:%"
PRId64
", lterm:%"
PRIu64
"
, reply-grant:%d}
"
,
"
}, reply-grant:%d
"
,
host
,
port
,
pMsg
->
term
,
pMsg
->
lastLogIndex
,
pMsg
->
lastLogTerm
,
pReply
->
voteGranted
);
syncNodeEventLog
(
ths
,
logBuf
);
}
while
(
0
);
...
...
source/libs/sync/src/syncRequestVoteReply.c
浏览文件 @
f52ad00d
...
...
@@ -40,22 +40,41 @@
int32_t
syncNodeOnRequestVoteReplyCb
(
SSyncNode
*
ths
,
SyncRequestVoteReply
*
pMsg
)
{
int32_t
ret
=
0
;
// print log
char
logBuf
[
128
]
=
{
0
};
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"==syncNodeOnRequestVoteReplyCb== term:%"
PRIu64
,
ths
->
pRaftStore
->
currentTerm
);
syncRequestVoteReplyLog2
(
logBuf
,
pMsg
);
// trace log
do
{
char
host
[
64
];
uint16_t
port
;
syncUtilU642Addr
(
pMsg
->
srcId
.
addr
,
host
,
sizeof
(
host
),
&
port
);
char
logBuf
[
256
];
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv sync-request-vote-reply from %s:%d {term:%"
PRIu64
", grant:%d} "
,
host
,
port
,
pMsg
->
term
,
pMsg
->
voteGranted
);
syncNodeEventLog
(
ths
,
logBuf
);
}
while
(
0
);
// if already drop replica, do not process
if
(
!
syncNodeInRaftGroup
(
ths
,
&
(
pMsg
->
srcId
))
&&
!
ths
->
pRaftCfg
->
isStandBy
)
{
sInfo
(
"recv SyncRequestVoteReply, maybe replica already dropped"
);
return
ret
;
char
host
[
64
];
uint16_t
port
;
syncUtilU642Addr
(
pMsg
->
srcId
.
addr
,
host
,
sizeof
(
host
),
&
port
);
char
logBuf
[
256
];
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv sync-request-vote-reply from %s:%d {term:%"
PRIu64
", grant:%d}, maybe replica dropped"
,
host
,
port
,
pMsg
->
term
,
pMsg
->
voteGranted
);
syncNodeErrorLog
(
ths
,
logBuf
);
return
-
1
;
}
// drop stale response
if
(
pMsg
->
term
<
ths
->
pRaftStore
->
currentTerm
)
{
sTrace
(
"recv SyncRequestVoteReply, drop stale response, receive_term:%"
PRIu64
" current_term:%"
PRIu64
,
pMsg
->
term
,
ths
->
pRaftStore
->
currentTerm
);
return
ret
;
char
host
[
64
];
uint16_t
port
;
syncUtilU642Addr
(
pMsg
->
srcId
.
addr
,
host
,
sizeof
(
host
),
&
port
);
char
logBuf
[
256
];
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv sync-request-vote-reply from %s:%d {term:%"
PRIu64
", grant:%d}, drop stale response"
,
host
,
port
,
pMsg
->
term
,
pMsg
->
voteGranted
);
syncNodeErrorLog
(
ths
,
logBuf
);
return
-
1
;
}
// ASSERT(!(pMsg->term > ths->pRaftStore->currentTerm));
...
...
@@ -65,12 +84,14 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg)
// }
if
(
pMsg
->
term
>
ths
->
pRaftStore
->
currentTerm
)
{
char
logBuf
[
128
]
=
{
0
};
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"syncNodeOnRequestVoteReplyCb error term, receive:%"
PRIu64
" current:%"
PRIu64
,
pMsg
->
term
,
ths
->
pRaftStore
->
currentTerm
);
syncNodePrint2
(
logBuf
,
ths
);
sError
(
"%s"
,
logBuf
);
return
ret
;
char
host
[
64
];
uint16_t
port
;
syncUtilU642Addr
(
pMsg
->
srcId
.
addr
,
host
,
sizeof
(
host
),
&
port
);
char
logBuf
[
256
];
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv sync-request-vote-reply from %s:%d {term:%"
PRIu64
", grant:%d}, error term"
,
host
,
port
,
pMsg
->
term
,
pMsg
->
voteGranted
);
syncNodeErrorLog
(
ths
,
logBuf
);
return
-
1
;
}
ASSERT
(
pMsg
->
term
==
ths
->
pRaftStore
->
currentTerm
);
...
...
@@ -99,7 +120,7 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg)
}
}
return
ret
;
return
0
;
}
#if 0
...
...
@@ -164,22 +185,41 @@ int32_t syncNodeOnRequestVoteReplyCb(SSyncNode* ths, SyncRequestVoteReply* pMsg)
int32_t
syncNodeOnRequestVoteReplySnapshotCb
(
SSyncNode
*
ths
,
SyncRequestVoteReply
*
pMsg
)
{
int32_t
ret
=
0
;
// print log
char
logBuf
[
128
]
=
{
0
};
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv SyncRequestVoteReply, term:%"
PRIu64
,
ths
->
pRaftStore
->
currentTerm
);
syncRequestVoteReplyLog2
(
logBuf
,
pMsg
);
// trace log
do
{
char
host
[
64
];
uint16_t
port
;
syncUtilU642Addr
(
pMsg
->
srcId
.
addr
,
host
,
sizeof
(
host
),
&
port
);
char
logBuf
[
256
];
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv sync-request-vote-reply from %s:%d {term:%"
PRIu64
", grant:%d} "
,
host
,
port
,
pMsg
->
term
,
pMsg
->
voteGranted
);
syncNodeEventLog
(
ths
,
logBuf
);
}
while
(
0
);
// if already drop replica, do not process
if
(
!
syncNodeInRaftGroup
(
ths
,
&
(
pMsg
->
srcId
))
&&
!
ths
->
pRaftCfg
->
isStandBy
)
{
sInfo
(
"recv SyncRequestVoteReply, maybe replica already dropped"
);
return
ret
;
char
host
[
64
];
uint16_t
port
;
syncUtilU642Addr
(
pMsg
->
srcId
.
addr
,
host
,
sizeof
(
host
),
&
port
);
char
logBuf
[
256
];
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv sync-request-vote-reply from %s:%d {term:%"
PRIu64
", grant:%d}, maybe replica dropped"
,
host
,
port
,
pMsg
->
term
,
pMsg
->
voteGranted
);
syncNodeErrorLog
(
ths
,
logBuf
);
return
-
1
;
}
// drop stale response
if
(
pMsg
->
term
<
ths
->
pRaftStore
->
currentTerm
)
{
sTrace
(
"recv SyncRequestVoteReply, drop stale response, receive_term:%"
PRIu64
" current_term:%"
PRIu64
,
pMsg
->
term
,
ths
->
pRaftStore
->
currentTerm
);
return
ret
;
char
host
[
64
];
uint16_t
port
;
syncUtilU642Addr
(
pMsg
->
srcId
.
addr
,
host
,
sizeof
(
host
),
&
port
);
char
logBuf
[
256
];
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv sync-request-vote-reply from %s:%d {term:%"
PRIu64
", grant:%d}, drop stale response"
,
host
,
port
,
pMsg
->
term
,
pMsg
->
voteGranted
);
syncNodeErrorLog
(
ths
,
logBuf
);
return
-
1
;
}
// ASSERT(!(pMsg->term > ths->pRaftStore->currentTerm));
...
...
@@ -189,13 +229,14 @@ int32_t syncNodeOnRequestVoteReplySnapshotCb(SSyncNode* ths, SyncRequestVoteRepl
// }
if
(
pMsg
->
term
>
ths
->
pRaftStore
->
currentTerm
)
{
char
logBuf
[
128
]
=
{
0
};
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv SyncRequestVoteReply, error term, receive_term:%"
PRIu64
" current_term:%"
PRIu64
,
pMsg
->
term
,
ths
->
pRaftStore
->
currentTerm
);
syncNodePrint2
(
logBuf
,
ths
);
sError
(
"%s"
,
logBuf
);
return
ret
;
char
host
[
64
];
uint16_t
port
;
syncUtilU642Addr
(
pMsg
->
srcId
.
addr
,
host
,
sizeof
(
host
),
&
port
);
char
logBuf
[
256
];
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"recv sync-request-vote-reply from %s:%d {term:%"
PRIu64
", grant:%d}, error term"
,
host
,
port
,
pMsg
->
term
,
pMsg
->
voteGranted
);
syncNodeErrorLog
(
ths
,
logBuf
);
return
-
1
;
}
ASSERT
(
pMsg
->
term
==
ths
->
pRaftStore
->
currentTerm
);
...
...
@@ -224,5 +265,5 @@ int32_t syncNodeOnRequestVoteReplySnapshotCb(SSyncNode* ths, SyncRequestVoteRepl
}
}
return
ret
;
return
0
;
}
\ No newline at end of file
source/libs/sync/src/syncSnapshot.c
浏览文件 @
f52ad00d
...
...
@@ -573,6 +573,12 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap
pReceiver
->
pSyncNode
->
commitIndex
=
pReceiver
->
snapshot
.
lastApplyIndex
;
}
// maybe update term
if
(
pReceiver
->
snapshot
.
lastApplyTerm
>
pReceiver
->
pSyncNode
->
pRaftStore
->
currentTerm
)
{
pReceiver
->
pSyncNode
->
pRaftStore
->
currentTerm
=
pReceiver
->
snapshot
.
lastApplyTerm
;
raftStorePersist
(
pReceiver
->
pSyncNode
->
pRaftStore
);
}
// stop writer, apply data
code
=
pReceiver
->
pSyncNode
->
pFsm
->
FpSnapshotStopWrite
(
pReceiver
->
pSyncNode
->
pFsm
,
pReceiver
->
pWriter
,
true
,
&
(
pReceiver
->
snapshot
));
...
...
source/libs/transport/inc/transComm.h
浏览文件 @
f52ad00d
...
...
@@ -392,7 +392,7 @@ typedef struct SDelayQueue {
}
SDelayQueue
;
int
transDQCreate
(
uv_loop_t
*
loop
,
SDelayQueue
**
queue
);
void
transDQDestroy
(
SDelayQueue
*
queue
);
void
transDQDestroy
(
SDelayQueue
*
queue
,
void
(
*
freeFunc
)(
void
*
arg
)
);
int
transDQSched
(
SDelayQueue
*
queue
,
void
(
*
func
)(
void
*
arg
),
void
*
arg
,
uint64_t
timeoutMs
);
bool
transEpSetIsEqual
(
SEpSet
*
a
,
SEpSet
*
b
);
...
...
source/libs/transport/src/.transComm.c.swo
0 → 100644
浏览文件 @
f52ad00d
文件已添加
source/libs/transport/src/transCli.c
浏览文件 @
f52ad00d
...
...
@@ -140,7 +140,7 @@ static void destroyUserdata(STransMsg* userdata);
static
int
cliRBChoseIdx
(
STrans
*
pTransInst
);
static
void
destroyCmsg
(
SCliMsg
*
cmsg
);
static
void
destroyCmsg
(
void
*
cmsg
);
static
void
transDestroyConnCtx
(
STransConnCtx
*
ctx
);
// thread obj
static
SCliThrd
*
createThrdObj
();
...
...
@@ -198,6 +198,7 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
} \
destroyCmsg(pMsg); \
cliReleaseUnfinishedMsg(conn); \
transQueueClear(&conn->cliMsgs); \
addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn); \
return; \
} \
...
...
@@ -545,6 +546,7 @@ static void addConnToPool(void* pool, SCliConn* conn) {
STrans
*
pTransInst
=
thrd
->
pTransInst
;
conn
->
expireTime
=
taosGetTimestampMs
()
+
CONN_PERSIST_TIME
(
pTransInst
->
idleTime
);
cliReleaseUnfinishedMsg
(
conn
);
transQueueClear
(
&
conn
->
cliMsgs
);
transCtxCleanup
(
&
conn
->
ctx
);
conn
->
status
=
ConnInPool
;
...
...
@@ -645,6 +647,7 @@ static void cliDestroy(uv_handle_t* handle) {
conn
->
stream
->
data
=
NULL
;
taosMemoryFree
(
conn
->
stream
);
transCtxCleanup
(
&
conn
->
ctx
);
cliReleaseUnfinishedMsg
(
conn
);
transQueueDestroy
(
&
conn
->
cliMsgs
);
tTrace
(
"%s conn %p destroy successfully"
,
CONN_GET_INST_LABEL
(
conn
),
conn
);
transReqQueueClear
(
&
conn
->
wreqQueue
);
...
...
@@ -962,7 +965,8 @@ static void destroyUserdata(STransMsg* userdata) {
transFreeMsg
(
userdata
->
pCont
);
userdata
->
pCont
=
NULL
;
}
static
void
destroyCmsg
(
SCliMsg
*
pMsg
)
{
static
void
destroyCmsg
(
void
*
arg
)
{
SCliMsg
*
pMsg
=
arg
;
if
(
pMsg
==
NULL
)
{
return
;
}
...
...
@@ -1001,7 +1005,7 @@ static void destroyThrdObj(SCliThrd* pThrd) {
TRANS_DESTROY_ASYNC_POOL_MSG
(
pThrd
->
asyncPool
,
SCliMsg
,
destroyCmsg
);
transDestroyAsyncPool
(
pThrd
->
asyncPool
);
transDQDestroy
(
pThrd
->
delayQueue
);
transDQDestroy
(
pThrd
->
delayQueue
,
destroyCmsg
);
taosMemoryFree
(
pThrd
->
loop
);
taosMemoryFree
(
pThrd
);
}
...
...
source/libs/transport/src/transComm.c
浏览文件 @
f52ad00d
...
...
@@ -456,7 +456,7 @@ int transDQCreate(uv_loop_t* loop, SDelayQueue** queue) {
return
0
;
}
void
transDQDestroy
(
SDelayQueue
*
queue
)
{
void
transDQDestroy
(
SDelayQueue
*
queue
,
void
(
*
freeFunc
)(
void
*
arg
)
)
{
taosMemoryFree
(
queue
->
timer
);
while
(
heapSize
(
queue
->
heap
)
>
0
)
{
...
...
@@ -467,6 +467,11 @@ void transDQDestroy(SDelayQueue* queue) {
heapRemove
(
queue
->
heap
,
minNode
);
SDelayTask
*
task
=
container_of
(
minNode
,
SDelayTask
,
node
);
STaskArg
*
arg
=
task
->
arg
;
freeFunc
(
arg
->
param1
);
taosMemoryFree
(
arg
);
taosMemoryFree
(
task
);
}
heapDestroy
(
queue
->
heap
);
...
...
tests/script/tsim/valgrind/checkError3.sim
浏览文件 @
f52ad00d
...
...
@@ -90,7 +90,7 @@ $null=
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
if $system_content >
0
then
if $system_content >
2
then
return -1
endi
...
...
tests/system-test/1-insert/alter_table.py
浏览文件 @
f52ad00d
...
...
@@ -211,10 +211,10 @@ class TDTestCase:
for
error
in
[
constant
.
INT_UN_MIN
-
1
,
constant
.
INT_UN_MAX
+
1
]:
tdSql
.
error
(
f
'alter table
{
self
.
stbname
}
_
{
i
}
set tag
{
k
}
=
{
error
}
'
)
#! bug TD-17106
#
elif v.lower() == 'bigint unsigned':
#
self.tag_check(i,k,tag_unbigint)
#
for error in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1]:
#
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
elif
v
.
lower
()
==
'bigint unsigned'
:
self
.
tag_check
(
i
,
k
,
tag_unbigint
)
for
error
in
[
constant
.
BIGINT_UN_MIN
-
1
,
constant
.
BIGINT_UN_MAX
+
1
]:
tdSql
.
error
(
f
'alter table
{
self
.
stbname
}
_
{
i
}
set tag
{
k
}
=
{
error
}
'
)
elif
v
.
lower
()
==
'bool'
:
self
.
tag_check
(
i
,
k
,
tag_bool
)
elif
v
.
lower
()
==
'float'
:
...
...
@@ -225,8 +225,8 @@ class TDTestCase:
else
:
tdLog
.
exit
(
f
'select
{
k
}
from
{
self
.
stbname
}
_
{
i
}
,data check failure'
)
#! bug TD-17106
#
for error in [constant.FLOAT_MIN*1.1,constant.FLOAT_MAX*1.1]:
#
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
for
error
in
[
constant
.
FLOAT_MIN
*
1.1
,
constant
.
FLOAT_MAX
*
1.1
]:
tdSql
.
error
(
f
'alter table
{
self
.
stbname
}
_
{
i
}
set tag
{
k
}
=
{
error
}
'
)
elif
v
.
lower
()
==
'double'
:
tdSql
.
execute
(
f
'alter table
{
self
.
stbname
}
_
{
i
}
set tag
{
k
}
=
{
tag_double
}
'
)
tdSql
.
query
(
f
'select
{
k
}
from
{
self
.
stbname
}
_
{
i
}
'
)
...
...
tests/system-test/1-insert/delete_data.py
浏览文件 @
f52ad00d
...
...
@@ -25,12 +25,13 @@ from util.sqlset import TDSetSql
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
()
,
logSql
)
tdSql
.
init
(
conn
.
cursor
())
self
.
dbname
=
'db_test'
self
.
setsql
=
TDSetSql
()
self
.
stbname
=
'stb'
self
.
ntbname
=
'ntb'
self
.
rowNum
=
10
self
.
tbnum
=
2
0
self
.
rowNum
=
5
self
.
tbnum
=
2
self
.
ts
=
1537146000000
self
.
binary_str
=
'taosdata'
self
.
nchar_str
=
'涛思数据'
...
...
@@ -51,6 +52,7 @@ class TDTestCase:
'col13'
:
f
'nchar(
{
self
.
str_length
}
)'
,
}
self
.
tinyint_val
=
random
.
randint
(
constant
.
TINYINT_MIN
,
constant
.
TINYINT_MAX
)
self
.
smallint_val
=
random
.
randint
(
constant
.
SMALLINT_MIN
,
constant
.
SMALLINT_MAX
)
self
.
int_val
=
random
.
randint
(
constant
.
INT_MIN
,
constant
.
INT_MAX
)
...
...
@@ -107,32 +109,50 @@ class TDTestCase:
tdSql
.
execute
(
f
'''insert into
{
tbname
}
values(
{
self
.
ts
+
i
}
,"
{
base_data
[
'binary'
]
}
")'''
)
elif
'nchar'
in
col_type
.
lower
():
tdSql
.
execute
(
f
'''insert into
{
tbname
}
values(
{
self
.
ts
+
i
}
,"
{
base_data
[
'nchar'
]
}
")'''
)
def
delete_all_data
(
self
,
tbname
,
col_type
,
row_num
,
base_data
,
dbname
):
def
delete_all_data
(
self
,
tbname
,
col_type
,
row_num
,
base_data
,
dbname
,
tb_type
,
tb_num
=
1
):
tdSql
.
execute
(
f
'delete from
{
tbname
}
'
)
tdSql
.
execute
(
f
'flush database
{
dbname
}
'
)
tdSql
.
execute
(
'reset query cache'
)
tdSql
.
query
(
f
'select * from
{
tbname
}
'
)
tdSql
.
checkRows
(
0
)
self
.
insert_base_data
(
col_type
,
tbname
,
row_num
,
base_data
)
if
tb_type
==
'ntb'
or
tb_type
==
'ctb'
:
self
.
insert_base_data
(
col_type
,
tbname
,
row_num
,
base_data
)
elif
tb_type
==
'stb'
:
for
i
in
range
(
tb_num
):
self
.
insert_base_data
(
col_type
,
f
'
{
tbname
}
_
{
i
}
'
,
row_num
,
base_data
)
tdSql
.
execute
(
f
'flush database
{
dbname
}
'
)
tdSql
.
execute
(
'reset query cache'
)
tdSql
.
query
(
f
'select * from
{
tbname
}
'
)
tdSql
.
checkRows
(
row_num
)
def
delete_one_row
(
self
,
tbname
,
column_type
,
column_name
,
base_data
,
dbname
):
if
tb_type
==
'ntb'
or
tb_type
==
'ctb'
:
tdSql
.
checkRows
(
row_num
)
elif
tb_type
==
'stb'
:
tdSql
.
checkRows
(
row_num
*
tb_num
)
def
delete_one_row
(
self
,
tbname
,
column_type
,
column_name
,
base_data
,
row_num
,
dbname
,
tb_type
,
tb_num
=
1
):
tdSql
.
execute
(
f
'delete from
{
tbname
}
where ts=
{
self
.
ts
}
'
)
tdSql
.
execute
(
f
'flush database
{
dbname
}
'
)
tdSql
.
execute
(
'reset query cache'
)
tdSql
.
query
(
f
'select
{
column_name
}
from
{
tbname
}
'
)
tdSql
.
checkRows
(
self
.
rowNum
-
1
)
if
tb_type
==
'ntb'
or
tb_type
==
'ctb'
:
tdSql
.
checkRows
(
row_num
-
1
)
elif
tb_type
==
'stb'
:
tdSql
.
checkRows
((
row_num
-
1
)
*
tb_num
)
tdSql
.
query
(
f
'select
{
column_name
}
from
{
tbname
}
where ts=
{
self
.
ts
}
'
)
tdSql
.
checkRows
(
0
)
if
'binary'
in
column_type
.
lower
():
tdSql
.
execute
(
f
'''insert into
{
tbname
}
values(
{
self
.
ts
}
,"
{
base_data
[
'binary'
]
}
")'''
)
elif
'nchar'
in
column_type
.
lower
():
tdSql
.
execute
(
f
'''insert into
{
tbname
}
values(
{
self
.
ts
}
,"
{
base_data
[
'nchar'
]
}
")'''
)
else
:
tdSql
.
execute
(
f
'insert into
{
tbname
}
values(
{
self
.
ts
}
,
{
base_data
[
column_type
]
}
)'
)
if
tb_type
==
'ntb'
or
tb_type
==
'ctb'
:
if
'binary'
in
column_type
.
lower
():
tdSql
.
execute
(
f
'''insert into
{
tbname
}
values(
{
self
.
ts
}
,"
{
base_data
[
'binary'
]
}
")'''
)
elif
'nchar'
in
column_type
.
lower
():
tdSql
.
execute
(
f
'''insert into
{
tbname
}
values(
{
self
.
ts
}
,"
{
base_data
[
'nchar'
]
}
")'''
)
else
:
tdSql
.
execute
(
f
'insert into
{
tbname
}
values(
{
self
.
ts
}
,
{
base_data
[
column_type
]
}
)'
)
elif
tb_type
==
'stb'
:
for
i
in
range
(
tb_num
):
if
'binary'
in
column_type
.
lower
():
tdSql
.
execute
(
f
'''insert into
{
tbname
}
_
{
i
}
values(
{
self
.
ts
}
,"
{
base_data
[
'binary'
]
}
")'''
)
elif
'nchar'
in
column_type
.
lower
():
tdSql
.
execute
(
f
'''insert into
{
tbname
}
_
{
i
}
values(
{
self
.
ts
}
,"
{
base_data
[
'nchar'
]
}
")'''
)
else
:
tdSql
.
execute
(
f
'insert into
{
tbname
}
_
{
i
}
values(
{
self
.
ts
}
,
{
base_data
[
column_type
]
}
)'
)
tdSql
.
query
(
f
'select
{
column_name
}
from
{
tbname
}
where ts=
{
self
.
ts
}
'
)
if
column_type
.
lower
()
==
'float'
or
column_type
.
lower
()
==
'double'
:
if
abs
(
tdSql
.
queryResult
[
0
][
0
]
-
base_data
[
column_type
])
/
base_data
[
column_type
]
<=
0.0001
:
...
...
@@ -144,12 +164,56 @@ class TDTestCase:
elif
'nchar'
in
column_type
.
lower
():
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
0
][
0
],
base_data
[
'nchar'
])
else
:
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
0
][
0
],
base_data
[
column_type
])
def
delete_rows
(
self
):
pass
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
0
][
0
],
base_data
[
column_type
])
def
delete_rows
(
self
,
dbname
,
tbname
,
col_name
,
col_type
,
base_data
,
row_num
,
tb_type
,
tb_num
=
1
):
for
i
in
range
(
row_num
):
tdSql
.
execute
(
f
'delete from
{
tbname
}
where ts>
{
self
.
ts
+
i
}
'
)
tdSql
.
execute
(
f
'flush database
{
dbname
}
'
)
tdSql
.
execute
(
'reset query cache'
)
tdSql
.
query
(
f
'select
{
col_name
}
from
{
tbname
}
'
)
if
tb_type
==
'ntb'
or
tb_type
==
'ctb'
:
tdSql
.
checkRows
(
i
+
1
)
self
.
insert_base_data
(
col_type
,
tbname
,
row_num
,
base_data
)
elif
tb_type
==
'stb'
:
tdSql
.
checkRows
((
i
+
1
)
*
tb_num
)
for
j
in
range
(
tb_num
):
self
.
insert_base_data
(
col_type
,
f
'
{
tbname
}
_
{
j
}
'
,
row_num
,
base_data
)
for
i
in
range
(
row_num
):
tdSql
.
execute
(
f
'delete from
{
tbname
}
where ts>=
{
self
.
ts
+
i
}
'
)
tdSql
.
execute
(
f
'flush database
{
dbname
}
'
)
tdSql
.
execute
(
'reset query cache'
)
tdSql
.
query
(
f
'select
{
col_name
}
from
{
tbname
}
'
)
if
tb_type
==
'ntb'
or
tb_type
==
'ctb'
:
tdSql
.
checkRows
(
i
)
self
.
insert_base_data
(
col_type
,
tbname
,
row_num
,
base_data
)
elif
tb_type
==
'stb'
:
tdSql
.
checkRows
(
i
*
tb_num
)
for
j
in
range
(
tb_num
):
self
.
insert_base_data
(
col_type
,
f
'
{
tbname
}
_
{
j
}
'
,
row_num
,
base_data
)
for
i
in
range
(
row_num
):
tdSql
.
execute
(
f
'delete from
{
tbname
}
where ts<=
{
self
.
ts
+
i
}
'
)
tdSql
.
execute
(
f
'flush database
{
dbname
}
'
)
tdSql
.
execute
(
'reset query cache'
)
tdSql
.
query
(
f
'select
{
col_name
}
from
{
tbname
}
'
)
if
tb_type
==
'ntb'
or
tb_type
==
'ctb'
:
tdSql
.
checkRows
(
row_num
-
i
-
1
)
self
.
insert_base_data
(
col_type
,
tbname
,
row_num
,
base_data
)
elif
tb_type
==
'stb'
:
tdSql
.
checkRows
((
row_num
-
i
-
1
)
*
tb_num
)
for
j
in
range
(
tb_num
):
self
.
insert_base_data
(
col_type
,
f
'
{
tbname
}
_
{
j
}
'
,
row_num
,
base_data
)
for
i
in
range
(
row_num
):
tdSql
.
execute
(
f
'delete from
{
tbname
}
where ts<
{
self
.
ts
+
i
}
'
)
tdSql
.
execute
(
f
'flush database
{
dbname
}
'
)
tdSql
.
execute
(
'reset query cache'
)
tdSql
.
query
(
f
'select
{
col_name
}
from
{
tbname
}
'
)
if
tb_type
==
'ntb'
or
tb_type
==
'ctb'
:
tdSql
.
checkRows
(
row_num
-
i
)
self
.
insert_base_data
(
col_type
,
tbname
,
row_num
,
base_data
)
elif
tb_type
==
'stb'
:
tdSql
.
checkRows
((
row_num
-
i
)
*
tb_num
)
for
j
in
range
(
tb_num
):
self
.
insert_base_data
(
col_type
,
f
'
{
tbname
}
_
{
j
}
'
,
row_num
,
base_data
)
def
delete_error
(
self
,
tbname
,
column_name
,
column_type
,
base_data
):
for
error_list
in
[
''
,
f
'ts =
{
self
.
ts
}
and'
,
f
'ts =
{
self
.
ts
}
or'
]:
if
'binary'
in
column_type
.
lower
():
...
...
@@ -157,31 +221,56 @@ class TDTestCase:
elif
'nchar'
in
column_type
.
lower
():
tdSql
.
error
(
f
'''delete from
{
tbname
}
where
{
error_list
}
{
column_name
}
="
{
base_data
[
'nchar'
]
}
"'''
)
else
:
tdSql
.
error
(
'delete from {tbname} where {error_list} {column_name} = {base_data[column_type]}'
)
tdSql
.
error
(
f
'delete from
{
tbname
}
where
{
error_list
}
{
column_name
}
=
{
base_data
[
column_type
]
}
'
)
def
delete_data_ntb
(
self
):
tdSql
.
execute
(
f
'create database if not exists
{
self
.
dbname
}
'
)
tdSql
.
execute
(
f
'use
{
self
.
dbname
}
'
)
for
col_name
,
col_type
in
self
.
column_dict
.
items
():
tdSql
.
execute
(
f
'create table
{
self
.
ntbname
}
(ts timestamp,
{
col_name
}
{
col_type
}
)'
)
self
.
insert_base_data
(
col_type
,
self
.
ntbname
,
self
.
rowNum
,
self
.
base_data
)
self
.
delete_one_row
(
self
.
ntbname
,
col_type
,
col_name
,
self
.
base_data
,
self
.
dbname
)
self
.
delete_all_data
(
self
.
ntbname
,
col_type
,
self
.
rowNum
,
self
.
base_data
,
self
.
dbname
)
self
.
delete_one_row
(
self
.
ntbname
,
col_type
,
col_name
,
self
.
base_data
,
self
.
rowNum
,
self
.
dbname
,
'ntb'
)
self
.
delete_all_data
(
self
.
ntbname
,
col_type
,
self
.
rowNum
,
self
.
base_data
,
self
.
dbname
,
'ntb'
)
self
.
delete_error
(
self
.
ntbname
,
col_name
,
col_type
,
self
.
base_data
)
for
i
in
range
(
self
.
rowNum
):
tdSql
.
execute
(
f
'delete from
{
self
.
ntbname
}
where ts>
{
self
.
ts
+
i
}
'
)
tdSql
.
execute
(
f
'flush database
{
self
.
dbname
}
'
)
tdSql
.
execute
(
'reset query cache'
)
tdSql
.
query
(
f
'select
{
col_name
}
from
{
self
.
ntbname
}
'
)
tdSql
.
checkRows
(
i
+
1
)
self
.
insert_base_data
(
col_type
,
self
.
ntbname
,
self
.
rowNum
,
self
.
base_data
)
self
.
delete_rows
(
self
.
dbname
,
self
.
ntbname
,
col_name
,
col_type
,
self
.
base_data
,
self
.
rowNum
,
'ntb'
)
for
func
in
[
'first'
,
'last'
]:
tdSql
.
query
(
f
'select
{
func
}
(*) from
{
self
.
ntbname
}
'
)
tdSql
.
execute
(
f
'drop table
{
self
.
ntbname
}
'
)
tdSql
.
execute
(
f
'drop database
{
self
.
dbname
}
'
)
def
delete_data_ctb
(
self
):
tdSql
.
execute
(
f
'create database if not exists
{
self
.
dbname
}
'
)
tdSql
.
execute
(
f
'use
{
self
.
dbname
}
'
)
for
col_name
,
col_type
in
self
.
column_dict
.
items
():
tdSql
.
execute
(
f
'create table
{
self
.
stbname
}
(ts timestamp,
{
col_name
}
{
col_type
}
) tags(t1 int)'
)
for
i
in
range
(
self
.
tbnum
):
tdSql
.
execute
(
f
'create table
{
self
.
stbname
}
_
{
i
}
using
{
self
.
stbname
}
tags(1)'
)
self
.
insert_base_data
(
col_type
,
f
'
{
self
.
stbname
}
_
{
i
}
'
,
self
.
rowNum
,
self
.
base_data
)
self
.
delete_one_row
(
f
'
{
self
.
stbname
}
_
{
i
}
'
,
col_type
,
col_name
,
self
.
base_data
,
self
.
rowNum
,
self
.
dbname
,
'ctb'
)
self
.
delete_all_data
(
f
'
{
self
.
stbname
}
_
{
i
}
'
,
col_type
,
self
.
rowNum
,
self
.
base_data
,
self
.
dbname
,
'ctb'
)
self
.
delete_error
(
f
'
{
self
.
stbname
}
_
{
i
}
'
,
col_name
,
col_type
,
self
.
base_data
)
self
.
delete_rows
(
self
.
dbname
,
f
'
{
self
.
stbname
}
_
{
i
}
'
,
col_name
,
col_type
,
self
.
base_data
,
self
.
rowNum
,
'ctb'
)
for
func
in
[
'first'
,
'last'
]:
tdSql
.
query
(
f
'select
{
func
}
(*) from
{
self
.
stbname
}
_
{
i
}
'
)
tdSql
.
execute
(
f
'drop table
{
self
.
stbname
}
'
)
def
delete_data_stb
(
self
):
tdSql
.
execute
(
f
'create database if not exists
{
self
.
dbname
}
'
)
tdSql
.
execute
(
f
'use
{
self
.
dbname
}
'
)
for
col_name
,
col_type
in
self
.
column_dict
.
items
():
tdSql
.
execute
(
f
'create table
{
self
.
stbname
}
(ts timestamp,
{
col_name
}
{
col_type
}
) tags(t1 int)'
)
for
i
in
range
(
self
.
tbnum
):
tdSql
.
execute
(
f
'create table
{
self
.
stbname
}
_
{
i
}
using
{
self
.
stbname
}
tags(1)'
)
self
.
insert_base_data
(
col_type
,
f
'
{
self
.
stbname
}
_
{
i
}
'
,
self
.
rowNum
,
self
.
base_data
)
self
.
delete_error
(
self
.
stbname
,
col_name
,
col_type
,
self
.
base_data
)
self
.
delete_one_row
(
self
.
stbname
,
col_type
,
col_name
,
self
.
base_data
,
self
.
rowNum
,
self
.
dbname
,
'stb'
,
self
.
tbnum
)
self
.
delete_all_data
(
self
.
stbname
,
col_type
,
self
.
rowNum
,
self
.
base_data
,
self
.
dbname
,
'stb'
,
self
.
tbnum
)
self
.
delete_rows
(
self
.
dbname
,
self
.
stbname
,
col_name
,
col_type
,
self
.
base_data
,
self
.
rowNum
,
'stb'
,
self
.
tbnum
)
for
func
in
[
'first'
,
'last'
]:
tdSql
.
query
(
f
'select
{
func
}
(*) from
{
self
.
stbname
}
'
)
tdSql
.
execute
(
f
'drop table
{
self
.
stbname
}
'
)
tdSql
.
execute
(
f
'drop database
{
self
.
dbname
}
'
)
def
run
(
self
):
self
.
delete_data_ntb
()
self
.
delete_data_ctb
()
self
.
delete_data_stb
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
...
...
tests/system-test/2-query/max_partition.py
浏览文件 @
f52ad00d
...
...
@@ -11,13 +11,13 @@ class TDTestCase:
self
.
row_nums
=
10
self
.
tb_nums
=
10
self
.
ts
=
1537146000000
def
prepare_datas
(
self
,
stb_name
,
tb_nums
,
row_nums
):
tdSql
.
execute
(
" use db "
)
tdSql
.
execute
(
f
" create stable
{
stb_name
}
(ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,
\
uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)
\
, t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) "
)
for
i
in
range
(
tb_nums
):
tbname
=
f
"sub_
{
stb_name
}
_
{
i
}
"
ts
=
self
.
ts
+
i
*
10000
...
...
@@ -30,7 +30,7 @@ class TDTestCase:
for
null
in
range
(
5
):
ts
=
self
.
ts
+
row_nums
*
1000
+
null
*
1000
tdSql
.
execute
(
f
"insert into
{
tbname
}
values(
{
ts
}
, NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )"
)
def
basic_query
(
self
):
tdSql
.
query
(
"select count(*) from stb"
)
tdSql
.
checkData
(
0
,
0
,(
self
.
row_nums
+
5
)
*
self
.
tb_nums
)
...
...
@@ -44,7 +44,7 @@ class TDTestCase:
tdSql
.
query
(
" select max(t2) from stb group by c1 order by t1 "
)
tdSql
.
query
(
" select max(c1) from stb group by tbname order by tbname "
)
tdSql
.
checkRows
(
self
.
tb_nums
)
# bug need fix
# bug need fix
tdSql
.
query
(
" select max(t2) from stb group by t2 order by t2 "
)
tdSql
.
checkRows
(
self
.
tb_nums
)
tdSql
.
query
(
" select max(c1) from stb group by c1 order by c1 "
)
...
...
@@ -62,8 +62,8 @@ class TDTestCase:
# bug need fix
# tdSql.query(" select tbname , max(c1) from sub_stb_1 where c1 is null group by c1 order by c1 desc ")
# tdSql.checkRows(1)
# tdSql.checkData(0,0,"sub_stb_1")
# tdSql.checkRows(1)
# tdSql.checkData(0,0,"sub_stb_1")
tdSql
.
query
(
"select max(c1) ,c2 ,t2,tbname from stb group by abs(c1) order by abs(c1)"
)
tdSql
.
checkRows
(
self
.
row_nums
+
1
)
...
...
@@ -80,7 +80,7 @@ class TDTestCase:
tdSql
.
checkRows
(
2
)
tdSql
.
query
(
" select max(c1) from stb where abs(c1+t2)=1 partition by tbname "
)
tdSql
.
checkRows
(
2
)
tdSql
.
query
(
" select tbname , max(c1) from stb partition by tbname order by tbname "
)
tdSql
.
checkRows
(
self
.
tb_nums
)
tdSql
.
checkData
(
0
,
1
,
self
.
row_nums
-
1
)
...
...
@@ -89,7 +89,7 @@ class TDTestCase:
tdSql
.
query
(
"select tbname , max(t2) from stb partition by t1 order by t1"
)
tdSql
.
query
(
"select tbname , max(t2) from stb partition by t2 order by t2"
)
# # bug need fix
# # bug need fix
tdSql
.
query
(
"select t2 , max(t2) from stb partition by t2 order by t2"
)
tdSql
.
checkRows
(
self
.
tb_nums
)
...
...
@@ -97,7 +97,7 @@ class TDTestCase:
tdSql
.
checkRows
(
self
.
tb_nums
)
tdSql
.
checkData
(
0
,
1
,
self
.
row_nums
-
1
)
tdSql
.
query
(
"select tbname , max(c1) from stb partition by t2 order by t2"
)
tdSql
.
query
(
"select c2, max(c1) from stb partition by c2 order by c2 desc"
)
...
...
@@ -125,10 +125,10 @@ class TDTestCase:
tdSql
.
checkRows
(
self
.
tb_nums
)
tdSql
.
checkData
(
0
,
0
,
self
.
row_nums
)
# bug need fix
# bug need fix
tdSql
.
query
(
"select count(c1) , max(t2) ,abs(c1) from stb partition by abs(c1) order by abs(c1)"
)
tdSql
.
checkRows
(
self
.
row_nums
+
1
)
tdSql
.
query
(
"select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from stb partition by abs(c2) order by abs(c2)"
)
tdSql
.
checkRows
(
self
.
row_nums
+
1
)
...
...
@@ -148,15 +148,15 @@ class TDTestCase:
tdSql
.
query
(
" select c1 , sample(c1,2) from stb partition by tbname order by tbname "
)
tdSql
.
checkRows
(
self
.
tb_nums
*
2
)
# interval
# interval
tdSql
.
query
(
"select max(c1) from stb interval(2s) sliding(1s)"
)
# bug need fix
tdSql
.
query
(
'select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)'
)
tdSql
.
query
(
" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 "
)
tdSql
.
query
(
"select tbname , max(c1) from stb partition by tbname interval(10s)"
)
...
...
@@ -179,12 +179,12 @@ class TDTestCase:
tdSql
.
query
(
"select c1 , sample(c1,2) from stb partition by c1 order by c1"
)
tdSql
.
checkRows
(
21
)
# bug need fix
# bug need fix
# tdSql.checkData(0,1,None)
tdSql
.
query
(
"select c1 , twa(c1) from stb partition by c1 order by c1"
)
tdSql
.
checkRows
(
11
)
tdSql
.
checkData
(
0
,
1
,
0.000000000
)
tdSql
.
checkData
(
0
,
1
,
None
)
tdSql
.
query
(
"select c1 , irate(c1) from stb partition by c1 order by c1"
)
tdSql
.
checkRows
(
11
)
...
...
@@ -192,7 +192,7 @@ class TDTestCase:
tdSql
.
query
(
"select c1 , DERIVATIVE(c1,2,1) from stb partition by c1 order by c1"
)
tdSql
.
checkRows
(
72
)
# bug need fix
# bug need fix
# tdSql.checkData(0,1,None)
...
...
@@ -201,15 +201,15 @@ class TDTestCase:
# bug need fix
# tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ")
# bug need fix
# tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ")
# tdSql.checkRows(5)
# tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 1 ")
# tdSql.checkRows(5)
tdSql
.
query
(
" select tbname , max(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) "
)
# tdSql.checkRows(5)
tdSql
.
query
(
" select tbname , max(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) "
)
tdSql
.
query
(
f
'select max(c1) from stb where ts>=
{
self
.
ts
}
and ts <
{
self
.
ts
}
+1000 interval(50s) sliding(30s)'
)
tdSql
.
query
(
f
'select tbname , max(c1) from stb where ts>=
{
self
.
ts
}
and ts <
{
self
.
ts
}
+1000 interval(50s) sliding(30s)'
)
...
...
@@ -219,18 +219,18 @@ class TDTestCase:
self
.
prepare_datas
(
"stb"
,
self
.
tb_nums
,
self
.
row_nums
)
self
.
basic_query
()
# # coverage case for taosd crash about bug fix
# # coverage case for taosd crash about bug fix
tdSql
.
query
(
" select sum(c1) from stb where t2+10 >1 "
)
tdSql
.
query
(
" select count(c1),count(t1) from stb where -t2<1 "
)
tdSql
.
query
(
" select tbname ,max(ceil(c1)) from stb group by tbname "
)
tdSql
.
query
(
" select avg(abs(c1)) , tbname from stb group by tbname "
)
tdSql
.
query
(
" select t1,c1 from stb where abs(t2+c1)=1 "
)
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
\ No newline at end of file
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/system-test/2-query/twa.py
浏览文件 @
f52ad00d
...
...
@@ -7,7 +7,7 @@ import platform
import
math
class
TDTestCase
:
updatecfgDict
=
{
'debugFlag'
:
143
,
"cDebugFlag"
:
143
,
"uDebugFlag"
:
143
,
"rpcDebugFlag"
:
143
,
"tmrDebugFlag"
:
143
,
updatecfgDict
=
{
'debugFlag'
:
143
,
"cDebugFlag"
:
143
,
"uDebugFlag"
:
143
,
"rpcDebugFlag"
:
143
,
"tmrDebugFlag"
:
143
,
"jniDebugFlag"
:
143
,
"simDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"dDebugFlag"
:
143
,
"vDebugFlag"
:
143
,
"mDebugFlag"
:
143
,
"qDebugFlag"
:
143
,
"wDebugFlag"
:
143
,
"sDebugFlag"
:
143
,
"tsdbDebugFlag"
:
143
,
"tqDebugFlag"
:
143
,
"fsDebugFlag"
:
143
,
"udfDebugFlag"
:
143
,
"maxTablesPerVnode"
:
2
,
"minTablesPerVnode"
:
2
,
"tableIncStepPerVnode"
:
2
}
...
...
@@ -22,7 +22,7 @@ class TDTestCase:
self
.
time_step
=
1000
def
prepare_datas_of_distribute
(
self
):
# prepate datas for 20 tables distributed at different vgroups
tdSql
.
execute
(
"create database if not exists testdb keep 3650 duration 1000 vgroups 5"
)
tdSql
.
execute
(
" use testdb "
)
...
...
@@ -32,16 +32,16 @@ class TDTestCase:
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
for
i
in
range
(
self
.
tb_nums
):
tdSql
.
execute
(
f
'create table ct
{
i
+
1
}
using stb1 tags ( now(),
{
1
*
i
}
,
{
11111
*
i
}
,
{
111
*
i
}
,
{
1
*
i
}
,
{
1.11
*
i
}
,
{
11.11
*
i
}
,
{
i
%
2
}
, "binary
{
i
}
", "nchar
{
i
}
" )'
)
ts
=
self
.
ts
for
j
in
range
(
self
.
row_nums
):
ts
+=
j
*
self
.
time_step
ts
+=
j
*
self
.
time_step
tdSql
.
execute
(
f
"insert into ct
{
i
+
1
}
values(
{
ts
}
, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary
{
j
}
', 'nchar
{
j
}
', now()+
{
1
*
j
}
a )"
)
tdSql
.
execute
(
"insert into ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
tdSql
.
execute
(
"insert into ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) "
)
...
...
@@ -64,7 +64,7 @@ class TDTestCase:
vgroups
=
tdSql
.
queryResult
vnode_tables
=
{}
for
vgroup_id
in
vgroups
:
vnode_tables
[
vgroup_id
[
0
]]
=
[]
...
...
@@ -73,7 +73,7 @@ class TDTestCase:
table_names
=
tdSql
.
queryResult
tablenames
=
[]
for
table_name
in
table_names
:
vnode_tables
[
table_name
[
6
]].
append
(
table_name
[
0
])
vnode_tables
[
table_name
[
6
]].
append
(
table_name
[
0
])
self
.
vnode_disbutes
=
vnode_tables
count
=
0
...
...
@@ -103,12 +103,12 @@ class TDTestCase:
tdSql
.
checkRows
(
self
.
tb_nums
)
tdSql
.
checkData
(
0
,
0
,
1.000000000
)
# union all
# union all
tdSql
.
query
(
" select twa(c1) from stb1 partition by tbname union all select twa(c1) from stb1 partition by tbname "
)
tdSql
.
checkRows
(
40
)
tdSql
.
checkData
(
0
,
0
,
1.000000000
)
# join
# join
tdSql
.
execute
(
" create database if not exists db "
)
tdSql
.
execute
(
" use db "
)
...
...
@@ -116,7 +116,7 @@ class TDTestCase:
tdSql
.
execute
(
" create table tb1 using st tags(1) "
)
tdSql
.
execute
(
" create table tb2 using st tags(2) "
)
for
i
in
range
(
10
):
ts
=
i
*
10
+
self
.
ts
tdSql
.
execute
(
f
" insert into tb1 values(
{
ts
}
,
{
i
}
,
{
i
}
.0)"
)
...
...
@@ -127,7 +127,7 @@ class TDTestCase:
tdSql
.
checkData
(
0
,
0
,
4.500000000
)
tdSql
.
checkData
(
0
,
1
,
4.500000000
)
# group by
# group by
tdSql
.
execute
(
" use testdb "
)
# mixup with other functions
...
...
@@ -141,7 +141,7 @@ class TDTestCase:
self
.
check_distribute_datas
()
self
.
twa_support_types
()
self
.
distribute_twa_query
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
...
...
tests/system-test/7-tmq/tmqAutoCreateTbl.py
浏览文件 @
f52ad00d
...
...
@@ -16,6 +16,7 @@ from tmqCommon import *
class
TDTestCase
:
def
__init__
(
self
):
self
.
snapshot
=
0
self
.
vgroups
=
4
self
.
ctbNum
=
1000
self
.
rowsPerTbl
=
1000
...
...
@@ -44,7 +45,7 @@ class TDTestCase:
'pollDelay'
:
3
,
'showMsg'
:
1
,
'showRow'
:
1
,
'snapshot'
:
1
}
'snapshot'
:
0
}
paraDict
[
'vgroups'
]
=
self
.
vgroups
paraDict
[
'ctbNum'
]
=
self
.
ctbNum
...
...
@@ -84,13 +85,14 @@ class TDTestCase:
'ctbStartIdx'
:
0
,
'ctbNum'
:
1000
,
'rowsPerTbl'
:
1000
,
'batchNum'
:
4
00
,
'batchNum'
:
10
00
,
'startTs'
:
1640966400000
,
# 2022-01-01 00:00:00.000
'pollDelay'
:
5
,
'showMsg'
:
1
,
'showRow'
:
1
,
'snapshot'
:
1
}
'snapshot'
:
0
}
paraDict
[
'snapshot'
]
=
self
.
snapshot
paraDict
[
'vgroups'
]
=
self
.
vgroups
paraDict
[
'ctbNum'
]
=
self
.
ctbNum
paraDict
[
'rowsPerTbl'
]
=
self
.
rowsPerTbl
...
...
@@ -131,10 +133,10 @@ class TDTestCase:
totalConsumeRows
+=
resultList
[
i
]
tdSql
.
query
(
queryString
)
totalRows
Inserted
=
tdSql
.
getRows
()
totalRows
FromQuery
=
tdSql
.
getRows
()
if
totalConsumeRows
!=
totalRowsInserted
:
tdLog
.
info
(
"act consume rows: %d, expect consume rows: %d"
%
(
totalConsumeRows
,
totalRowsInserted
))
tdLog
.
info
(
"act consume rows: %d, expect consume rows: %d"
%
(
totalConsumeRows
,
totalRowsFromQuery
))
if
totalConsumeRows
!=
totalRowsFromQuery
:
tdLog
.
exit
(
"tmq consume rows error!"
)
tdSql
.
query
(
"drop topic %s"
%
topicFromStb1
)
...
...
@@ -163,6 +165,7 @@ class TDTestCase:
'showRow'
:
1
,
'snapshot'
:
0
}
paraDict
[
'snapshot'
]
=
self
.
snapshot
paraDict
[
'vgroups'
]
=
self
.
vgroups
paraDict
[
'ctbNum'
]
=
self
.
ctbNum
paraDict
[
'rowsPerTbl'
]
=
self
.
rowsPerTbl
...
...
@@ -180,12 +183,13 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog
.
info
(
"create topics from stb1"
)
topicFromStb1
=
'topic_stb1'
queryString
=
"select ts, c1, c2 from %s.%s"
%
(
paraDict
[
'dbName'
],
paraDict
[
'stbName'
])
# queryString = "select ts, c1, c2 from %s.%s "%(paraDict['dbName'], paraDict['stbName'])
queryString
=
"select ts, c1, c2 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"
%
(
paraDict
[
'dbName'
],
paraDict
[
'stbName'
])
sqlString
=
"create topic %s as %s"
%
(
topicFromStb1
,
queryString
)
tdLog
.
info
(
"create topic sql: %s"
%
sqlString
)
tdSql
.
execute
(
sqlString
)
consumerId
=
0
consumerId
=
1
expectrowcnt
=
paraDict
[
"rowsPerTbl"
]
*
paraDict
[
"ctbNum"
]
*
2
topicList
=
topicFromStb1
ifcheckdata
=
0
...
...
@@ -210,10 +214,10 @@ class TDTestCase:
totalConsumeRows
+=
resultList
[
i
]
tdSql
.
query
(
queryString
)
totalRows
Inserted
=
tdSql
.
getRows
()
totalRows
FromQuery
=
tdSql
.
getRows
()
if
totalConsumeRows
!=
totalRowsInserted
:
tdLog
.
info
(
"act consume rows: %d, expect consume rows: %d"
%
(
totalConsumeRows
,
totalRowsInserted
))
tdLog
.
info
(
"act consume rows: %d, expect consume rows: %d"
%
(
totalConsumeRows
,
totalRowsFromQuery
))
if
totalConsumeRows
!=
totalRowsFromQuery
:
tdLog
.
exit
(
"tmq consume rows error!"
)
tdSql
.
query
(
"drop topic %s"
%
topicFromStb1
)
...
...
@@ -222,10 +226,18 @@ class TDTestCase:
def
run
(
self
):
tdSql
.
prepare
()
self
.
prepareTestEnv
()
tdLog
.
printNoPrefix
(
"============================================="
)
tdLog
.
printNoPrefix
(
"======== snapshot is 0: only consume from wal"
)
self
.
tmqCase1
()
self
.
tmqCase2
()
self
.
prepareTestEnv
()
tdLog
.
printNoPrefix
(
"===================================================================="
)
tdLog
.
printNoPrefix
(
"======== snapshot is 1: firstly consume from tsbs, and then from wal"
)
self
.
snapshot
=
1
self
.
tmqCase1
()
# self.tmqCase2() # TD-17267
self
.
tmqCase2
()
def
stop
(
self
):
...
...
tests/system-test/7-tmq/tmqDnodeRestart.py
浏览文件 @
f52ad00d
...
...
@@ -16,6 +16,7 @@ from tmqCommon import *
class
TDTestCase
:
def
__init__
(
self
):
self
.
snapshot
=
0
self
.
vgroups
=
2
self
.
ctbNum
=
100
self
.
rowsPerTbl
=
10000
...
...
@@ -37,15 +38,16 @@ class TDTestCase:
'tagSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
}],
'ctbPrefix'
:
'ctb'
,
'ctbStartIdx'
:
0
,
'ctbNum'
:
5
00
,
'rowsPerTbl'
:
1000
,
'batchNum'
:
5
00
,
'ctbNum'
:
1
00
,
'rowsPerTbl'
:
1000
0
,
'batchNum'
:
1
00
,
'startTs'
:
1640966400000
,
# 2022-01-01 00:00:00.000
'pollDelay'
:
3
,
'showMsg'
:
1
,
'showRow'
:
1
,
'snapshot'
:
0
}
paraDict
[
'snapshot'
]
=
self
.
snapshot
paraDict
[
'vgroups'
]
=
self
.
vgroups
paraDict
[
'ctbNum'
]
=
self
.
ctbNum
paraDict
[
'rowsPerTbl'
]
=
self
.
rowsPerTbl
...
...
@@ -81,30 +83,31 @@ class TDTestCase:
'tagSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
}],
'ctbPrefix'
:
'ctb'
,
'ctbStartIdx'
:
0
,
'ctbNum'
:
100
0
,
'rowsPerTbl'
:
1000
,
'batchNum'
:
4
00
,
'ctbNum'
:
100
,
'rowsPerTbl'
:
1000
0
,
'batchNum'
:
1
00
,
'startTs'
:
1640966400000
,
# 2022-01-01 00:00:00.000
'pollDelay'
:
5
,
'showMsg'
:
1
,
'showRow'
:
1
,
'snapshot'
:
1
}
'snapshot'
:
0
}
# paraDict['vgroups'] = self.vgroups
# paraDict['ctbNum'] = self.ctbNum
# paraDict['rowsPerTbl'] = self.rowsPerTbl
paraDict
[
'snapshot'
]
=
self
.
snapshot
paraDict
[
'vgroups'
]
=
self
.
vgroups
paraDict
[
'ctbNum'
]
=
self
.
ctbNum
paraDict
[
'rowsPerTbl'
]
=
self
.
rowsPerTbl
tmqCom
.
initConsumerTable
()
tdCom
.
create_database
(
tdSql
,
paraDict
[
"dbName"
],
paraDict
[
"dropFlag"
],
vgroups
=
paraDict
[
"vgroups"
],
replica
=
1
)
tdLog
.
info
(
"create stb"
)
tmqCom
.
create_stable
(
tdSql
,
dbName
=
paraDict
[
"dbName"
],
stbName
=
paraDict
[
"stbName"
])
tdLog
.
info
(
"create ctb"
)
tmqCom
.
create_ctable
(
tdSql
,
dbName
=
paraDict
[
"dbName"
],
stbName
=
paraDict
[
"stbName"
],
ctbPrefix
=
paraDict
[
'ctbPrefix'
],
ctbNum
=
paraDict
[
"ctbNum"
],
ctbStartIdx
=
paraDict
[
'ctbStartIdx'
])
tdLog
.
info
(
"insert data"
)
tmqCom
.
insert_data_interlaceByMultiTbl
(
tsql
=
tdSql
,
dbName
=
paraDict
[
"dbName"
],
ctbPrefix
=
paraDict
[
"ctbPrefix"
],
ctbNum
=
paraDict
[
"ctbNum"
],
rowsPerTbl
=
paraDict
[
"rowsPerTbl"
],
batchNum
=
paraDict
[
"batchNum"
],
startTs
=
paraDict
[
"startTs"
],
ctbStartIdx
=
paraDict
[
'ctbStartIdx'
])
#
tmqCom.initConsumerTable()
#
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
#
tdLog.info("create stb")
#
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
#
tdLog.info("create ctb")
#
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
#
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
#
tdLog.info("insert data")
#
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
#
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
#
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog
.
info
(
"create topics from stb1"
)
topicFromStb1
=
'topic_stb1'
...
...
@@ -132,7 +135,7 @@ class TDTestCase:
tdLog
.
info
(
"================= restart dnode ==========================="
)
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
time
.
sleep
(
5
)
time
.
sleep
(
3
)
tdLog
.
info
(
"insert process end, and start to check consume result"
)
expectRows
=
1
...
...
@@ -142,10 +145,10 @@ class TDTestCase:
totalConsumeRows
+=
resultList
[
i
]
tdSql
.
query
(
queryString
)
totalRows
Inserted
=
tdSql
.
getRows
()
totalRows
FromQury
=
tdSql
.
getRows
()
if
totalConsumeRows
!=
totalRowsInserted
:
tdLog
.
info
(
"act consume rows: %d, expect consume rows: %d"
%
(
totalConsumeRows
,
totalRowsInserted
))
tdLog
.
info
(
"act consume rows: %d, expect consume rows: %d"
%
(
totalConsumeRows
,
totalRowsFromQury
))
if
totalConsumeRows
!=
totalRowsFromQury
:
tdLog
.
exit
(
"tmq consume rows error!"
)
tdSql
.
query
(
"drop topic %s"
%
topicFromStb1
)
...
...
@@ -165,30 +168,31 @@ class TDTestCase:
'tagSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
}],
'ctbPrefix'
:
'ctb'
,
'ctbStartIdx'
:
0
,
'ctbNum'
:
100
0
,
'rowsPerTbl'
:
1000
,
'batchNum'
:
1
000
,
'ctbNum'
:
100
,
'rowsPerTbl'
:
1000
0
,
'batchNum'
:
3
000
,
'startTs'
:
1640966400000
,
# 2022-01-01 00:00:00.000
'pollDelay'
:
5
,
'showMsg'
:
1
,
'showRow'
:
1
,
'snapshot'
:
1
}
'snapshot'
:
0
}
# paraDict['vgroups'] = self.vgroups
# paraDict['ctbNum'] = self.ctbNum
# paraDict['rowsPerTbl'] = self.rowsPerTbl
paraDict
[
'snapshot'
]
=
self
.
snapshot
paraDict
[
'vgroups'
]
=
self
.
vgroups
paraDict
[
'ctbNum'
]
=
self
.
ctbNum
paraDict
[
'rowsPerTbl'
]
=
self
.
rowsPerTbl
tmqCom
.
initConsumerTable
()
tdCom
.
create_database
(
tdSql
,
paraDict
[
"dbName"
],
paraDict
[
"dropFlag"
],
vgroups
=
paraDict
[
"vgroups"
],
replica
=
1
)
tdLog
.
info
(
"create stb"
)
tmqCom
.
create_stable
(
tdSql
,
dbName
=
paraDict
[
"dbName"
],
stbName
=
paraDict
[
"stbName"
])
tdLog
.
info
(
"create ctb"
)
tmqCom
.
create_ctable
(
tdSql
,
dbName
=
paraDict
[
"dbName"
],
stbName
=
paraDict
[
"stbName"
],
ctbPrefix
=
paraDict
[
'ctbPrefix'
],
ctbNum
=
paraDict
[
"ctbNum"
],
ctbStartIdx
=
paraDict
[
'ctbStartIdx'
])
tdLog
.
info
(
"insert data"
)
tmqCom
.
insert_data_interlaceByMultiTbl
(
tsql
=
tdSql
,
dbName
=
paraDict
[
"dbName"
],
ctbPrefix
=
paraDict
[
"ctbPrefix"
],
ctbNum
=
paraDict
[
"ctbNum"
],
rowsPerTbl
=
paraDict
[
"rowsPerTbl"
],
batchNum
=
paraDict
[
"batchNum"
],
startTs
=
paraDict
[
"startTs"
],
ctbStartIdx
=
paraDict
[
'ctbStartIdx'
])
#
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
#
tdLog.info("create stb")
#
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
#
tdLog.info("create ctb")
#
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
#
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
#
tdLog.info("insert data")
#
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
#
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
#
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog
.
info
(
"create topics from stb1"
)
topicFromStb1
=
'topic_stb1'
queryString
=
"select ts, c1, c2 from %s.%s"
%
(
paraDict
[
'dbName'
],
paraDict
[
'stbName'
])
...
...
@@ -196,29 +200,29 @@ class TDTestCase:
tdLog
.
info
(
"create topic sql: %s"
%
sqlString
)
tdSql
.
execute
(
sqlString
)
consumerId
=
0
expectrowcnt
=
paraDict
[
"rowsPerTbl"
]
*
paraDict
[
"ctbNum"
]
*
2
consumerId
=
1
expectrowcnt
=
paraDict
[
"rowsPerTbl"
]
*
paraDict
[
"ctbNum"
]
*
2
+
100000
topicList
=
topicFromStb1
ifcheckdata
=
0
ifManualCommit
=
0
keyList
=
'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:
1
000,\
auto.commit.interval.ms:
3
000,\
auto.offset.reset:earliest'
tmqCom
.
insertConsumerInfo
(
consumerId
,
expectrowcnt
,
topicList
,
keyList
,
ifcheckdata
,
ifManualCommit
)
tdLog
.
info
(
"start consume processor"
)
tmqCom
.
startTmqSimProcess
(
pollDelay
=
paraDict
[
'pollDelay'
],
dbName
=
paraDict
[
"dbName"
],
showMsg
=
paraDict
[
'showMsg'
],
showRow
=
paraDict
[
'showRow'
],
snapshot
=
paraDict
[
'snapshot'
])
tdLog
.
info
(
"create some new child table and insert data "
)
tmqCom
.
insert_data_with_autoCreateTbl
(
tdSql
,
paraDict
[
"dbName"
],
paraDict
[
"stbName"
],
"ctb"
,
paraDict
[
"ctbNum"
],
paraDict
[
"rowsPerTbl"
],
paraDict
[
"batchNum"
])
tmqCom
.
getStartCommitNotifyFromTmqsim
()
tdLog
.
info
(
"================= restart dnode ==========================="
)
tdDnodes
.
stop
(
1
)
tdDnodes
.
start
(
1
)
time
.
sleep
(
5
)
time
.
sleep
(
3
)
tdLog
.
info
(
"create some new child table and insert data "
)
tmqCom
.
insert_data_with_autoCreateTbl
(
tdSql
,
paraDict
[
"dbName"
],
paraDict
[
"stbName"
],
"ctb"
,
paraDict
[
"ctbNum"
],
paraDict
[
"rowsPerTbl"
],
paraDict
[
"batchNum"
])
tdLog
.
info
(
"insert process end, and start to check consume result"
)
expectRows
=
1
resultList
=
tmqCom
.
selectConsumeResult
(
expectRows
)
...
...
@@ -227,10 +231,10 @@ class TDTestCase:
totalConsumeRows
+=
resultList
[
i
]
tdSql
.
query
(
queryString
)
totalRows
Inserted
=
tdSql
.
getRows
()
totalRows
FromQuery
=
tdSql
.
getRows
()
if
totalConsumeRows
!=
totalRowsInserted
:
tdLog
.
info
(
"act consume rows: %d, expect consume rows: %d"
%
(
totalConsumeRows
,
totalRowsInserted
))
tdLog
.
info
(
"act consume rows: %d, expect consume rows: %d"
%
(
totalConsumeRows
,
totalRowsFromQuery
))
if
totalConsumeRows
!=
totalRowsFromQuery
:
tdLog
.
exit
(
"tmq consume rows error!"
)
tdSql
.
query
(
"drop topic %s"
%
topicFromStb1
)
...
...
@@ -239,8 +243,8 @@ class TDTestCase:
def
run
(
self
):
tdSql
.
prepare
()
self
.
tmqCase1
()
self
.
prepareTestEnv
()
#
self.tmqCase1()
self
.
tmqCase2
()
def
stop
(
self
):
...
...
tests/system-test/fulltest.sh
浏览文件 @
f52ad00d
...
...
@@ -33,7 +33,7 @@ python3 ./test.py -f 1-insert/create_retentions.py
python3 ./test.py
-f
1-insert/table_param_ttl.py
python3 ./test.py
-f
1-insert/update_data.py
python3 ./test.py
-f
1-insert/delete_data.py
python3 ./test.py
-f
2-query/db.py
python3 ./test.py
-f
2-query/between.py
...
...
@@ -184,7 +184,7 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
python3 ./test.py
-f
7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
python3 ./test.py
-f
7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
python3 ./test.py
-f
7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
python3 ./test.py
-f
7-tmq/tmqAutoCreateTbl.py
#
python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
#python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
python3 ./test.py
-f
7-tmq/tmqUpdate-1ctb.py
python3 ./test.py
-f
7-tmq/tmqUpdate-multiCtb-snapshot0.py
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录