Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
29649171
TDengine
项目概览
taosdata
/
TDengine
1 年多 前同步成功
通知
1185
Star
22016
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
29649171
编写于
7月 25, 2022
作者:
J
jiajingbin
浏览文件
操作
浏览文件
下载
差异文件
fix: fix bug for TD-17801
上级
8833a8bd
13476682
变更
43
展开全部
隐藏空白更改
内联
并排
Showing
43 changed file
with
2905 addition
and
559 deletion
+2905
-559
include/common/tglobal.h
include/common/tglobal.h
+4
-1
include/util/tlog.h
include/util/tlog.h
+0
-1
source/common/src/tdatablock.c
source/common/src/tdatablock.c
+1
-1
source/common/src/tglobal.c
source/common/src/tglobal.c
+35
-0
source/dnode/mnode/impl/src/mndDnode.c
source/dnode/mnode/impl/src/mndDnode.c
+1
-1
source/dnode/vnode/src/inc/vnodeInt.h
source/dnode/vnode/src/inc/vnodeInt.h
+1
-0
source/dnode/vnode/src/tsdb/tsdbUtil.c
source/dnode/vnode/src/tsdb/tsdbUtil.c
+80
-9
source/dnode/vnode/src/vnd/vnodeSync.c
source/dnode/vnode/src/vnd/vnodeSync.c
+143
-46
source/libs/executor/src/executor.c
source/libs/executor/src/executor.c
+1
-0
source/libs/executor/src/projectoperator.c
source/libs/executor/src/projectoperator.c
+5
-3
source/libs/function/src/builtins.c
source/libs/function/src/builtins.c
+1
-1
source/libs/sync/inc/syncRaftEntry.h
source/libs/sync/inc/syncRaftEntry.h
+2
-0
source/libs/sync/src/syncIO.c
source/libs/sync/src/syncIO.c
+2
-2
source/libs/sync/src/syncIndexMgr.c
source/libs/sync/src/syncIndexMgr.c
+1
-1
source/libs/sync/src/syncRaftCfg.c
source/libs/sync/src/syncRaftCfg.c
+4
-4
source/libs/sync/src/syncRaftEntry.c
source/libs/sync/src/syncRaftEntry.c
+19
-1
source/libs/sync/src/syncRaftStore.c
source/libs/sync/src/syncRaftStore.c
+1
-1
source/libs/sync/src/syncRespMgr.c
source/libs/sync/src/syncRespMgr.c
+1
-1
source/libs/sync/src/syncSnapshot.c
source/libs/sync/src/syncSnapshot.c
+5
-5
source/libs/sync/test/syncConfigChangeSnapshotTest.cpp
source/libs/sync/test/syncConfigChangeSnapshotTest.cpp
+1
-1
source/libs/sync/test/syncEntryCacheTest.cpp
source/libs/sync/test/syncEntryCacheTest.cpp
+37
-15
source/libs/sync/test/syncSnapshotReceiverTest.cpp
source/libs/sync/test/syncSnapshotReceiverTest.cpp
+1
-1
source/libs/sync/test/syncTestTool.cpp
source/libs/sync/test/syncTestTool.cpp
+1
-1
source/util/src/tconfig.c
source/util/src/tconfig.c
+1
-0
source/util/src/tlog.c
source/util/src/tlog.c
+2
-22
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py
...t/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py
+3
-3
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py
...-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py
+5
-5
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py
...-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py
+5
-5
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py
...de1mnode_basic_replica3_insertdatas_stop_follower_sync.py
+28
-28
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py
...1mnode_basic_replica3_insertdatas_stop_follower_unsync.py
+28
-28
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py
...c_replica3_insertdatas_stop_follower_unsync_force_stop.py
+29
-29
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py
...de/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py
+85
-281
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py
...ode_basic_replica3_insertdatas_stop_leader_forece_stop.py
+32
-32
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py
...e/4dnode1mnode_basic_replica3_querydatas_stop_follower.py
+416
-0
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py
...ode_basic_replica3_querydatas_stop_follower_force_stop.py
+416
-0
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py
...ode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py
+470
-0
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py
...mnode_basic_replica3_querydatas_stop_leader_force_stop.py
+470
-0
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py
...st/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py
+10
-10
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py
...ster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py
+19
-19
tests/system-test/6-cluster/vnode/insert_100W_rows.json
tests/system-test/6-cluster/vnode/insert_100W_rows.json
+118
-0
tests/system-test/6-cluster/vnode/insert_10W_rows.json
tests/system-test/6-cluster/vnode/insert_10W_rows.json
+118
-0
tests/system-test/7-tmq/tmqSubscribeStb-r3.py
tests/system-test/7-tmq/tmqSubscribeStb-r3.py
+302
-0
tools/taos-tools
tools/taos-tools
+1
-1
未找到文件。
include/common/tglobal.h
浏览文件 @
29649171
...
...
@@ -152,7 +152,10 @@ void taosCfgDynamicOptions(const char *option, const char *value);
void
taosAddDataDir
(
int32_t
index
,
char
*
v1
,
int32_t
level
,
int32_t
primary
);
struct
SConfig
*
taosGetCfg
();
int32_t
taosSetCfg
(
SConfig
*
pCfg
,
char
*
name
);
void
taosSetAllDebugFlag
(
int32_t
flag
);
void
taosSetDebugFlag
(
int32_t
*
pFlagPtr
,
const
char
*
flagName
,
int32_t
flagVal
);
int32_t
taosSetCfg
(
SConfig
*
pCfg
,
char
*
name
);
#ifdef __cplusplus
}
...
...
include/util/tlog.h
浏览文件 @
29649171
...
...
@@ -67,7 +67,6 @@ extern int32_t idxDebugFlag;
int32_t
taosInitLog
(
const
char
*
logName
,
int32_t
maxFiles
);
void
taosCloseLog
();
void
taosResetLog
();
void
taosSetAllDebugFlag
(
int32_t
flag
);
void
taosDumpData
(
uint8_t
*
msg
,
int32_t
len
);
void
taosPrintLog
(
const
char
*
flags
,
ELogLevel
level
,
int32_t
dflag
,
const
char
*
format
,
...)
...
...
source/common/src/tdatablock.c
浏览文件 @
29649171
...
...
@@ -1878,7 +1878,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks
msgLen
+=
sizeof
(
SSubmitBlk
);
int32_t
dataLen
=
0
;
for
(
int32_t
j
=
0
;
j
<
rows
;
++
j
)
{
// iterate by row
tdSRowResetBuf
(
&
rb
,
POINTER_SHIFT
(
pDataBuf
,
msgLen
));
// set row buf
tdSRowResetBuf
(
&
rb
,
POINTER_SHIFT
(
pDataBuf
,
msgLen
+
dataLen
));
// set row buf
bool
isStartKey
=
false
;
int32_t
offset
=
0
;
for
(
int32_t
k
=
0
;
k
<
colNum
;
++
k
)
{
// iterate by column
...
...
source/common/src/tglobal.c
浏览文件 @
29649171
...
...
@@ -1143,6 +1143,10 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
int32_t
monitor
=
atoi
(
value
);
uInfo
(
"monitor set from %d to %d"
,
tsEnableMonitor
,
monitor
);
tsEnableMonitor
=
monitor
;
SConfigItem
*
pItem
=
cfgGetItem
(
tsCfg
,
"monitor"
);
if
(
pItem
!=
NULL
)
{
pItem
->
bval
=
tsEnableMonitor
;
}
return
;
}
...
...
@@ -1166,8 +1170,39 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
int32_t
flag
=
atoi
(
value
);
uInfo
(
"%s set from %d to %d"
,
optName
,
*
optionVars
[
d
],
flag
);
*
optionVars
[
d
]
=
flag
;
taosSetDebugFlag
(
optionVars
[
d
],
optName
,
flag
);
return
;
}
uError
(
"failed to cfg dynamic option:%s value:%s"
,
option
,
value
);
}
void
taosSetDebugFlag
(
int32_t
*
pFlagPtr
,
const
char
*
flagName
,
int32_t
flagVal
)
{
SConfigItem
*
pItem
=
cfgGetItem
(
tsCfg
,
flagName
);
if
(
pItem
!=
NULL
)
{
pItem
->
i32
=
flagVal
;
}
*
pFlagPtr
=
flagVal
;
}
void
taosSetAllDebugFlag
(
int32_t
flag
)
{
if
(
flag
<=
0
)
return
;
taosSetDebugFlag
(
&
uDebugFlag
,
"uDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
rpcDebugFlag
,
"rpcDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
jniDebugFlag
,
"jniDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
qDebugFlag
,
"qDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
cDebugFlag
,
"cDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
dDebugFlag
,
"dDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
vDebugFlag
,
"vDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
mDebugFlag
,
"mDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
wDebugFlag
,
"wDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
sDebugFlag
,
"sDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
tsdbDebugFlag
,
"tsdbDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
tqDebugFlag
,
"tqDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
fsDebugFlag
,
"fsDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
udfDebugFlag
,
"udfDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
smaDebugFlag
,
"smaDebugFlag"
,
flag
);
taosSetDebugFlag
(
&
idxDebugFlag
,
"idxDebugFlag"
,
flag
);
uInfo
(
"all debug flag are set to %d"
,
flag
);
}
source/dnode/mnode/impl/src/mndDnode.c
浏览文件 @
29649171
...
...
@@ -874,7 +874,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
}
static
int32_t
mndProcessConfigDnodeRsp
(
SRpcMsg
*
pRsp
)
{
mInfo
(
"config rsp from dnode
, app:%p"
,
pRsp
->
info
.
ahandle
);
mInfo
(
"config rsp from dnode
"
);
return
0
;
}
...
...
source/dnode/vnode/src/inc/vnodeInt.h
浏览文件 @
29649171
...
...
@@ -268,6 +268,7 @@ struct SVnode {
tsem_t
canCommit
;
int64_t
sync
;
int32_t
blockCount
;
bool
restored
;
tsem_t
syncSem
;
SQHandle
*
pQuery
;
};
...
...
source/dnode/vnode/src/tsdb/tsdbUtil.c
浏览文件 @
29649171
...
...
@@ -1395,10 +1395,26 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
break
;
case
TSDB_DATA_TYPE_BOOL
:
break
;
case
TSDB_DATA_TYPE_TINYINT
:
case
TSDB_DATA_TYPE_TINYINT
:{
pColAgg
->
sum
+=
colVal
.
value
.
i8
;
if
(
pColAgg
->
min
>
colVal
.
value
.
i8
)
{
pColAgg
->
min
=
colVal
.
value
.
i8
;
}
if
(
pColAgg
->
max
<
colVal
.
value
.
i8
)
{
pColAgg
->
max
=
colVal
.
value
.
i8
;
}
break
;
case
TSDB_DATA_TYPE_SMALLINT
:
}
case
TSDB_DATA_TYPE_SMALLINT
:{
pColAgg
->
sum
+=
colVal
.
value
.
i16
;
if
(
pColAgg
->
min
>
colVal
.
value
.
i16
)
{
pColAgg
->
min
=
colVal
.
value
.
i16
;
}
if
(
pColAgg
->
max
<
colVal
.
value
.
i16
)
{
pColAgg
->
max
=
colVal
.
value
.
i16
;
}
break
;
}
case
TSDB_DATA_TYPE_INT
:
{
pColAgg
->
sum
+=
colVal
.
value
.
i32
;
if
(
pColAgg
->
min
>
colVal
.
value
.
i32
)
{
...
...
@@ -1419,24 +1435,79 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
break
;
}
case
TSDB_DATA_TYPE_FLOAT
:
case
TSDB_DATA_TYPE_FLOAT
:{
pColAgg
->
sum
+=
colVal
.
value
.
f
;
if
(
pColAgg
->
min
>
colVal
.
value
.
f
)
{
pColAgg
->
min
=
colVal
.
value
.
f
;
}
if
(
pColAgg
->
max
<
colVal
.
value
.
f
)
{
pColAgg
->
max
=
colVal
.
value
.
f
;
}
break
;
case
TSDB_DATA_TYPE_DOUBLE
:
}
case
TSDB_DATA_TYPE_DOUBLE
:{
pColAgg
->
sum
+=
colVal
.
value
.
d
;
if
(
pColAgg
->
min
>
colVal
.
value
.
d
)
{
pColAgg
->
min
=
colVal
.
value
.
d
;
}
if
(
pColAgg
->
max
<
colVal
.
value
.
d
)
{
pColAgg
->
max
=
colVal
.
value
.
d
;
}
break
;
}
case
TSDB_DATA_TYPE_VARCHAR
:
break
;
case
TSDB_DATA_TYPE_TIMESTAMP
:
case
TSDB_DATA_TYPE_TIMESTAMP
:{
if
(
pColAgg
->
min
>
colVal
.
value
.
i64
)
{
pColAgg
->
min
=
colVal
.
value
.
i64
;
}
if
(
pColAgg
->
max
<
colVal
.
value
.
i64
)
{
pColAgg
->
max
=
colVal
.
value
.
i64
;
}
break
;
}
case
TSDB_DATA_TYPE_NCHAR
:
break
;
case
TSDB_DATA_TYPE_UTINYINT
:
case
TSDB_DATA_TYPE_UTINYINT
:{
pColAgg
->
sum
+=
colVal
.
value
.
u8
;
if
(
pColAgg
->
min
>
colVal
.
value
.
u8
)
{
pColAgg
->
min
=
colVal
.
value
.
u8
;
}
if
(
pColAgg
->
max
<
colVal
.
value
.
u8
)
{
pColAgg
->
max
=
colVal
.
value
.
u8
;
}
break
;
case
TSDB_DATA_TYPE_USMALLINT
:
}
case
TSDB_DATA_TYPE_USMALLINT
:{
pColAgg
->
sum
+=
colVal
.
value
.
u16
;
if
(
pColAgg
->
min
>
colVal
.
value
.
u16
)
{
pColAgg
->
min
=
colVal
.
value
.
u16
;
}
if
(
pColAgg
->
max
<
colVal
.
value
.
u16
)
{
pColAgg
->
max
=
colVal
.
value
.
u16
;
}
break
;
case
TSDB_DATA_TYPE_UINT
:
}
case
TSDB_DATA_TYPE_UINT
:{
pColAgg
->
sum
+=
colVal
.
value
.
u32
;
if
(
pColAgg
->
min
>
colVal
.
value
.
u32
)
{
pColAgg
->
min
=
colVal
.
value
.
u32
;
}
if
(
pColAgg
->
max
<
colVal
.
value
.
u32
)
{
pColAgg
->
max
=
colVal
.
value
.
u32
;
}
break
;
case
TSDB_DATA_TYPE_UBIGINT
:
}
case
TSDB_DATA_TYPE_UBIGINT
:{
pColAgg
->
sum
+=
colVal
.
value
.
u64
;
if
(
pColAgg
->
min
>
colVal
.
value
.
u64
)
{
pColAgg
->
min
=
colVal
.
value
.
u64
;
}
if
(
pColAgg
->
max
<
colVal
.
value
.
u64
)
{
pColAgg
->
max
=
colVal
.
value
.
u64
;
}
break
;
}
case
TSDB_DATA_TYPE_JSON
:
break
;
case
TSDB_DATA_TYPE_VARBINARY
:
...
...
source/dnode/vnode/src/vnd/vnodeSync.c
浏览文件 @
29649171
...
...
@@ -16,23 +16,28 @@
#define _DEFAULT_SOURCE
#include "vnd.h"
#define BATCH_DISABLE 1
static
inline
bool
vnodeIsMsgBlock
(
tmsg_t
type
)
{
return
(
type
==
TDMT_VND_CREATE_TABLE
)
||
(
type
==
TDMT_VND_CREATE_TABLE
)
||
(
type
==
TDMT_VND_CREATE_TABLE
)
||
(
type
==
TDMT_VND_ALTER_TABLE
)
||
(
type
==
TDMT_VND_DROP_TABLE
)
||
(
type
==
TDMT_VND_UPDATE_TAG_VAL
);
(
type
==
TDMT_VND_ALTER_TABLE
)
||
(
type
==
TDMT_VND_DROP_TABLE
)
||
(
type
==
TDMT_VND_UPDATE_TAG_VAL
)
||
(
type
==
TDMT_VND_ALTER_REPLICA
);
}
static
inline
bool
vnodeIsMsgWeak
(
tmsg_t
type
)
{
return
false
;
}
static
inline
void
vnodeWaitBlockMsg
(
SVnode
*
pVnode
,
const
SRpcMsg
*
pMsg
)
{
if
(
vnodeIsMsgBlock
(
pMsg
->
msgType
))
{
vTrace
(
"vgId:%d, msg:%p wait block, type:%s"
,
pVnode
->
config
.
vgId
,
pMsg
,
TMSG_INFO
(
pMsg
->
msgType
));
const
STraceId
*
trace
=
&
pMsg
->
info
.
traceId
;
vGTrace
(
"vgId:%d, msg:%p wait block, type:%s"
,
pVnode
->
config
.
vgId
,
pMsg
,
TMSG_INFO
(
pMsg
->
msgType
));
tsem_wait
(
&
pVnode
->
syncSem
);
}
}
static
inline
void
vnodePostBlockMsg
(
SVnode
*
pVnode
,
const
SRpcMsg
*
pMsg
)
{
if
(
vnodeIsMsgBlock
(
pMsg
->
msgType
))
{
vTrace
(
"vgId:%d, msg:%p post block, type:%s"
,
pVnode
->
config
.
vgId
,
pMsg
,
TMSG_INFO
(
pMsg
->
msgType
));
const
STraceId
*
trace
=
&
pMsg
->
info
.
traceId
;
vGTrace
(
"vgId:%d, msg:%p post block, type:%s"
,
pVnode
->
config
.
vgId
,
pMsg
,
TMSG_INFO
(
pMsg
->
msgType
));
tsem_post
(
&
pVnode
->
syncSem
);
}
}
...
...
@@ -124,60 +129,147 @@ void vnodeRedirectRpcMsg(SVnode *pVnode, SRpcMsg *pMsg) {
tmsgSendRedirectRsp
(
&
rsp
,
&
newEpSet
);
}
static
void
inline
vnodeHandleWriteMsg
(
SVnode
*
pVnode
,
SRpcMsg
*
pMsg
)
{
SRpcMsg
rsp
=
{.
code
=
pMsg
->
code
,
.
info
=
pMsg
->
info
};
if
(
vnodeProcessWriteMsg
(
pVnode
,
pMsg
,
pMsg
->
info
.
conn
.
applyIndex
,
&
rsp
)
<
0
)
{
rsp
.
code
=
terrno
;
const
STraceId
*
trace
=
&
pMsg
->
info
.
traceId
;
vGError
(
"vgId:%d, msg:%p failed to apply right now since %s"
,
pVnode
->
config
.
vgId
,
pMsg
,
terrstr
());
}
if
(
rsp
.
info
.
handle
!=
NULL
)
{
tmsgSendRsp
(
&
rsp
);
}
}
static
void
vnodeHandleProposeError
(
SVnode
*
pVnode
,
SRpcMsg
*
pMsg
,
int32_t
code
)
{
if
(
code
==
TSDB_CODE_SYN_NOT_LEADER
)
{
vnodeRedirectRpcMsg
(
pVnode
,
pMsg
);
}
else
{
const
STraceId
*
trace
=
&
pMsg
->
info
.
traceId
;
vGError
(
"vgId:%d, msg:%p failed to propose since %s, code:0x%x"
,
pVnode
->
config
.
vgId
,
pMsg
,
tstrerror
(
code
),
code
);
SRpcMsg
rsp
=
{.
code
=
code
,
.
info
=
pMsg
->
info
};
if
(
rsp
.
info
.
handle
!=
NULL
)
{
tmsgSendRsp
(
&
rsp
);
}
}
}
static
void
vnodeHandleAlterReplicaReq
(
SVnode
*
pVnode
,
SRpcMsg
*
pMsg
)
{
int32_t
code
=
vnodeProcessAlterReplicaReq
(
pVnode
,
pMsg
);
if
(
code
>
0
)
{
ASSERT
(
0
);
}
else
if
(
code
==
0
)
{
vnodeWaitBlockMsg
(
pVnode
,
pMsg
);
}
else
{
if
(
terrno
!=
0
)
code
=
terrno
;
vnodeHandleProposeError
(
pVnode
,
pMsg
,
code
);
}
const
STraceId
*
trace
=
&
pMsg
->
info
.
traceId
;
vGTrace
(
"vgId:%d, msg:%p is freed, code:0x%x"
,
pVnode
->
config
.
vgId
,
pMsg
,
code
);
rpcFreeCont
(
pMsg
->
pCont
);
taosFreeQitem
(
pMsg
);
}
static
void
inline
vnodeProposeBatchMsg
(
SVnode
*
pVnode
,
SRpcMsg
**
pMsgArr
,
bool
*
pIsWeakArr
,
int32_t
*
arrSize
)
{
if
(
*
arrSize
<=
0
)
return
;
#if BATCH_DISABLE
int32_t
code
=
syncPropose
(
pVnode
->
sync
,
pMsgArr
[
0
],
pIsWeakArr
[
0
]);
#else
int32_t
code
=
syncProposeBatch
(
pVnode
->
sync
,
pMsgArr
,
pIsWeakArr
,
*
arrSize
);
#endif
if
(
code
>
0
)
{
for
(
int32_t
i
=
0
;
i
<
*
arrSize
;
++
i
)
{
vnodeHandleWriteMsg
(
pVnode
,
pMsgArr
[
i
]);
}
}
else
if
(
code
==
0
)
{
vnodeWaitBlockMsg
(
pVnode
,
pMsgArr
[
*
arrSize
-
1
]);
}
else
{
if
(
terrno
!=
0
)
code
=
terrno
;
for
(
int32_t
i
=
0
;
i
<
*
arrSize
;
++
i
)
{
vnodeHandleProposeError
(
pVnode
,
pMsgArr
[
i
],
code
);
}
}
for
(
int32_t
i
=
0
;
i
<
*
arrSize
;
++
i
)
{
SRpcMsg
*
pMsg
=
pMsgArr
[
i
];
const
STraceId
*
trace
=
&
pMsg
->
info
.
traceId
;
vGTrace
(
"vgId:%d, msg:%p is freed, code:0x%x"
,
pVnode
->
config
.
vgId
,
pMsg
,
code
);
rpcFreeCont
(
pMsg
->
pCont
);
taosFreeQitem
(
pMsg
);
}
*
arrSize
=
0
;
}
void
vnodeProposeWriteMsg
(
SQueueInfo
*
pInfo
,
STaosQall
*
qall
,
int32_t
numOfMsgs
)
{
SVnode
*
pVnode
=
pInfo
->
ahandle
;
int32_t
vgId
=
pVnode
->
config
.
vgId
;
int32_t
code
=
0
;
SRpcMsg
*
pMsg
=
NULL
;
int32_t
arrayPos
=
0
;
SRpcMsg
**
pMsgArr
=
taosMemoryCalloc
(
numOfMsgs
,
sizeof
(
SRpcMsg
*
));
bool
*
pIsWeakArr
=
taosMemoryCalloc
(
numOfMsgs
,
sizeof
(
bool
));
vTrace
(
"vgId:%d, get %d msgs from vnode-write queue"
,
vgId
,
numOfMsgs
);
for
(
int32_t
m
=
0
;
m
<
numOfMsgs
;
m
++
)
{
for
(
int32_t
m
sg
=
0
;
msg
<
numOfMsgs
;
msg
++
)
{
if
(
taosGetQitem
(
qall
,
(
void
**
)
&
pMsg
)
==
0
)
continue
;
bool
isWeak
=
vnodeIsMsgWeak
(
pMsg
->
msgType
);
bool
isBlock
=
vnodeIsMsgBlock
(
pMsg
->
msgType
);
const
STraceId
*
trace
=
&
pMsg
->
info
.
traceId
;
vGTrace
(
"vgId:%d, msg:%p get from vnode-write queue handle:%p"
,
vgId
,
pMsg
,
pMsg
->
info
.
handle
);
vGTrace
(
"vgId:%d, msg:%p get from vnode-write queue, weak:%d block:%d msg:%d:%d pos:%d, handle:%p"
,
vgId
,
pMsg
,
isWeak
,
isBlock
,
msg
,
numOfMsgs
,
arrayPos
,
pMsg
->
info
.
handle
);
if
(
!
pVnode
->
restored
)
{
vGError
(
"vgId:%d, msg:%p failed to process since not leader"
,
vgId
,
pMsg
);
terrno
=
TSDB_CODE_APP_NOT_READY
;
vnodeHandleProposeError
(
pVnode
,
pMsg
,
TSDB_CODE_APP_NOT_READY
);
rpcFreeCont
(
pMsg
->
pCont
);
taosFreeQitem
(
pMsg
);
continue
;
}
if
(
pMsgArr
==
NULL
||
pIsWeakArr
==
NULL
)
{
vGError
(
"vgId:%d, msg:%p failed to process since out of memory"
,
vgId
,
pMsg
);
terrno
=
TSDB_CODE_OUT_OF_MEMORY
;
vnodeHandleProposeError
(
pVnode
,
pMsg
,
terrno
);
rpcFreeCont
(
pMsg
->
pCont
);
taosFreeQitem
(
pMsg
);
continue
;
}
code
=
vnodePreProcessWriteMsg
(
pVnode
,
pMsg
);
if
(
code
!=
0
)
{
vError
(
"vgId:%d, msg:%p failed to pre-process since %s"
,
vgId
,
pMsg
,
terrstr
());
}
else
{
if
(
pMsg
->
msgType
==
TDMT_VND_ALTER_REPLICA
)
{
code
=
vnodeProcessAlterReplicaReq
(
pVnode
,
pMsg
);
}
else
{
code
=
syncPropose
(
pVnode
->
sync
,
pMsg
,
vnodeIsMsgWeak
(
pMsg
->
msgType
));
if
(
code
>
0
)
{
SRpcMsg
rsp
=
{.
code
=
pMsg
->
code
,
.
info
=
pMsg
->
info
};
if
(
vnodeProcessWriteMsg
(
pVnode
,
pMsg
,
pMsg
->
info
.
conn
.
applyIndex
,
&
rsp
)
<
0
)
{
rsp
.
code
=
terrno
;
vError
(
"vgId:%d, msg:%p failed to apply right now since %s"
,
vgId
,
pMsg
,
terrstr
());
}
if
(
rsp
.
info
.
handle
!=
NULL
)
{
tmsgSendRsp
(
&
rsp
);
}
}
else
if
(
code
==
0
)
{
vnodeWaitBlockMsg
(
pVnode
,
pMsg
);
}
else
{
}
}
vGError
(
"vgId:%d, msg:%p failed to pre-process since %s"
,
vgId
,
pMsg
,
terrstr
());
rpcFreeCont
(
pMsg
->
pCont
);
taosFreeQitem
(
pMsg
);
continue
;
}
if
(
code
<
0
)
{
if
(
terrno
==
TSDB_CODE_SYN_NOT_LEADER
)
{
vnodeRedirectRpcMsg
(
pVnode
,
pMsg
);
}
else
{
if
(
terrno
!=
0
)
code
=
terrno
;
vError
(
"vgId:%d, msg:%p failed to propose since %s, code:0x%x"
,
vgId
,
pMsg
,
tstrerror
(
code
),
code
);
SRpcMsg
rsp
=
{.
code
=
code
,
.
info
=
pMsg
->
info
};
if
(
rsp
.
info
.
handle
!=
NULL
)
{
tmsgSendRsp
(
&
rsp
);
}
}
if
(
pMsg
->
msgType
==
TDMT_VND_ALTER_REPLICA
)
{
vnodeHandleAlterReplicaReq
(
pVnode
,
pMsg
);
continue
;
}
vGTrace
(
"vgId:%d, msg:%p is freed, code:0x%x"
,
vgId
,
pMsg
,
code
);
rpcFreeCont
(
pMsg
->
pCont
);
taosFreeQitem
(
pMsg
);
if
(
isBlock
||
BATCH_DISABLE
)
{
vnodeProposeBatchMsg
(
pVnode
,
pMsgArr
,
pIsWeakArr
,
&
arrayPos
);
}
pMsgArr
[
arrayPos
]
=
pMsg
;
pIsWeakArr
[
arrayPos
]
=
isWeak
;
arrayPos
++
;
if
(
isBlock
||
msg
==
numOfMsgs
-
1
||
BATCH_DISABLE
)
{
vnodeProposeBatchMsg
(
pVnode
,
pMsgArr
,
pIsWeakArr
,
&
arrayPos
);
}
}
taosMemoryFree
(
pMsgArr
);
taosMemoryFree
(
pIsWeakArr
);
}
void
vnodeApplyWriteMsg
(
SQueueInfo
*
pInfo
,
STaosQall
*
qall
,
int32_t
numOfMsgs
)
{
...
...
@@ -527,6 +619,12 @@ static void vnodeLeaderTransfer(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsm
SVnode
*
pVnode
=
pFsm
->
data
;
}
static
void
vnodeRestoreFinish
(
struct
SSyncFSM
*
pFsm
)
{
SVnode
*
pVnode
=
pFsm
->
data
;
pVnode
->
restored
=
true
;
vDebug
(
"vgId:%d, sync restore finished"
,
pVnode
->
config
.
vgId
);
}
static
SSyncFSM
*
vnodeSyncMakeFsm
(
SVnode
*
pVnode
)
{
SSyncFSM
*
pFsm
=
taosMemoryCalloc
(
1
,
sizeof
(
SSyncFSM
));
pFsm
->
data
=
pVnode
;
...
...
@@ -534,7 +632,7 @@ static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
pFsm
->
FpPreCommitCb
=
vnodeSyncPreCommitMsg
;
pFsm
->
FpRollBackCb
=
vnodeSyncRollBackMsg
;
pFsm
->
FpGetSnapshotInfo
=
vnodeSyncGetSnapshot
;
pFsm
->
FpRestoreFinishCb
=
NULL
;
pFsm
->
FpRestoreFinishCb
=
vnodeRestoreFinish
;
pFsm
->
FpLeaderTransferCb
=
vnodeLeaderTransfer
;
pFsm
->
FpReConfigCb
=
vnodeSyncReconfig
;
pFsm
->
FpSnapshotStartRead
=
vnodeSnapshotStartRead
;
...
...
@@ -588,11 +686,10 @@ bool vnodeIsLeader(SVnode *pVnode) {
return
false
;
}
// todo
// if (!pVnode->restored) {
// terrno = TSDB_CODE_APP_NOT_READY;
// return false;
// }
if
(
!
pVnode
->
restored
)
{
terrno
=
TSDB_CODE_APP_NOT_READY
;
return
false
;
}
return
true
;
}
\ No newline at end of file
source/libs/executor/src/executor.c
浏览文件 @
29649171
...
...
@@ -416,6 +416,7 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) {
}
if
(
isTaskKilled
(
pTaskInfo
))
{
atomic_store_64
(
&
pTaskInfo
->
owner
,
0
);
qDebug
(
"%s already killed, abort"
,
GET_TASKID
(
pTaskInfo
));
return
TSDB_CODE_SUCCESS
;
}
...
...
source/libs/executor/src/projectoperator.c
浏览文件 @
29649171
...
...
@@ -68,9 +68,11 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
pInfo
->
mergeDataBlocks
=
pProjPhyNode
->
mergeDataBlock
;
// todo remove it soon
// if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
// pInfo->mergeDataBlocks = true;
// }
if
(
pTaskInfo
->
execModel
==
OPTR_EXEC_MODEL_STREAM
)
{
pInfo
->
mergeDataBlocks
=
false
;
}
int32_t
numOfRows
=
4096
;
size_t
keyBufSize
=
sizeof
(
int64_t
)
+
sizeof
(
int64_t
)
+
POINTER_BYTES
;
...
...
source/libs/function/src/builtins.c
浏览文件 @
29649171
...
...
@@ -1532,7 +1532,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
}
uint8_t
resType
;
if
(
IS_SIGNED_NUMERIC_TYPE
(
colType
)
||
TSDB_DATA_TYPE_BOOL
==
colType
)
{
if
(
IS_SIGNED_NUMERIC_TYPE
(
colType
)
||
TSDB_DATA_TYPE_BOOL
==
colType
||
TSDB_DATA_TYPE_TIMESTAMP
==
colType
)
{
resType
=
TSDB_DATA_TYPE_BIGINT
;
}
else
{
resType
=
TSDB_DATA_TYPE_DOUBLE
;
...
...
source/libs/sync/inc/syncRaftEntry.h
浏览文件 @
29649171
...
...
@@ -26,6 +26,7 @@ extern "C" {
#include "syncInt.h"
#include "syncMessage.h"
#include "taosdef.h"
#include "tref.h"
#include "tskiplist.h"
typedef
struct
SSyncRaftEntry
{
...
...
@@ -89,6 +90,7 @@ typedef struct SRaftEntryCache {
SSkipList
*
pSkipList
;
int32_t
maxCount
;
int32_t
currentCount
;
int32_t
refMgr
;
TdThreadMutex
mutex
;
SSyncNode
*
pSyncNode
;
}
SRaftEntryCache
;
...
...
source/libs/sync/src/syncIO.c
浏览文件 @
29649171
...
...
@@ -242,9 +242,9 @@ static int32_t syncIOStopInternal(SSyncIO *io) {
}
static
void
*
syncIOConsumerFunc
(
void
*
param
)
{
SSyncIO
*
io
=
param
;
SSyncIO
*
io
=
param
;
STaosQall
*
qall
=
taosAllocateQall
();
SRpcMsg
*
pRpcMsg
,
rpcMsg
;
SRpcMsg
*
pRpcMsg
,
rpcMsg
;
SQueueInfo
qinfo
=
{
0
};
while
(
1
)
{
...
...
source/libs/sync/src/syncIndexMgr.c
浏览文件 @
29649171
...
...
@@ -125,7 +125,7 @@ cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) {
char
*
syncIndexMgr2Str
(
SSyncIndexMgr
*
pSyncIndexMgr
)
{
cJSON
*
pJson
=
syncIndexMgr2Json
(
pSyncIndexMgr
);
char
*
serialized
=
cJSON_Print
(
pJson
);
char
*
serialized
=
cJSON_Print
(
pJson
);
cJSON_Delete
(
pJson
);
return
serialized
;
}
...
...
source/libs/sync/src/syncRaftCfg.c
浏览文件 @
29649171
...
...
@@ -101,7 +101,7 @@ cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) {
char
*
syncCfg2Str
(
SSyncCfg
*
pSyncCfg
)
{
cJSON
*
pJson
=
syncCfg2Json
(
pSyncCfg
);
char
*
serialized
=
cJSON_Print
(
pJson
);
char
*
serialized
=
cJSON_Print
(
pJson
);
cJSON_Delete
(
pJson
);
return
serialized
;
}
...
...
@@ -109,7 +109,7 @@ char *syncCfg2Str(SSyncCfg *pSyncCfg) {
char
*
syncCfg2SimpleStr
(
SSyncCfg
*
pSyncCfg
)
{
if
(
pSyncCfg
!=
NULL
)
{
int32_t
len
=
512
;
char
*
s
=
taosMemoryMalloc
(
len
);
char
*
s
=
taosMemoryMalloc
(
len
);
memset
(
s
,
0
,
len
);
snprintf
(
s
,
len
,
"{r-num:%d, my:%d, "
,
pSyncCfg
->
replicaNum
,
pSyncCfg
->
myIndex
);
...
...
@@ -206,7 +206,7 @@ cJSON *raftCfg2Json(SRaftCfg *pRaftCfg) {
char
*
raftCfg2Str
(
SRaftCfg
*
pRaftCfg
)
{
cJSON
*
pJson
=
raftCfg2Json
(
pRaftCfg
);
char
*
serialized
=
cJSON_Print
(
pJson
);
char
*
serialized
=
cJSON_Print
(
pJson
);
cJSON_Delete
(
pJson
);
return
serialized
;
}
...
...
@@ -285,7 +285,7 @@ int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg) {
(
pRaftCfg
->
configIndexArr
)[
i
]
=
atoll
(
pIndex
->
valuestring
);
}
cJSON
*
pJsonSyncCfg
=
cJSON_GetObjectItem
(
pJson
,
"SSyncCfg"
);
cJSON
*
pJsonSyncCfg
=
cJSON_GetObjectItem
(
pJson
,
"SSyncCfg"
);
int32_t
code
=
syncCfgFromJson
(
pJsonSyncCfg
,
&
(
pRaftCfg
->
cfg
));
ASSERT
(
code
==
0
);
...
...
source/libs/sync/src/syncRaftEntry.c
浏览文件 @
29649171
...
...
@@ -23,6 +23,7 @@ SSyncRaftEntry* syncEntryBuild(uint32_t dataLen) {
memset
(
pEntry
,
0
,
bytes
);
pEntry
->
bytes
=
bytes
;
pEntry
->
dataLen
=
dataLen
;
pEntry
->
rid
=
-
1
;
return
pEntry
;
}
...
...
@@ -451,6 +452,11 @@ static char* keyFn(const void* pData) {
static
int
cmpFn
(
const
void
*
p1
,
const
void
*
p2
)
{
return
memcmp
(
p1
,
p2
,
sizeof
(
SyncIndex
));
}
static
void
freeRaftEntry
(
void
*
param
)
{
SSyncRaftEntry
*
pEntry
=
(
SSyncRaftEntry
*
)
param
;
syncEntryDestory
(
pEntry
);
}
SRaftEntryCache
*
raftEntryCacheCreate
(
SSyncNode
*
pSyncNode
,
int32_t
maxCount
)
{
SRaftEntryCache
*
pCache
=
taosMemoryMalloc
(
sizeof
(
SRaftEntryCache
));
if
(
pCache
==
NULL
)
{
...
...
@@ -466,6 +472,7 @@ SRaftEntryCache* raftEntryCacheCreate(SSyncNode* pSyncNode, int32_t maxCount) {
}
taosThreadMutexInit
(
&
(
pCache
->
mutex
),
NULL
);
pCache
->
refMgr
=
taosOpenRef
(
10
,
freeRaftEntry
);
pCache
->
maxCount
=
maxCount
;
pCache
->
currentCount
=
0
;
pCache
->
pSyncNode
=
pSyncNode
;
...
...
@@ -477,6 +484,10 @@ void raftEntryCacheDestroy(SRaftEntryCache* pCache) {
if
(
pCache
!=
NULL
)
{
taosThreadMutexLock
(
&
(
pCache
->
mutex
));
tSkipListDestroy
(
pCache
->
pSkipList
);
if
(
pCache
->
refMgr
!=
-
1
)
{
taosCloseRef
(
pCache
->
refMgr
);
pCache
->
refMgr
=
-
1
;
}
taosThreadMutexUnlock
(
&
(
pCache
->
mutex
));
taosThreadMutexDestroy
(
&
(
pCache
->
mutex
));
taosMemoryFree
(
pCache
);
...
...
@@ -498,6 +509,9 @@ int32_t raftEntryCachePutEntry(struct SRaftEntryCache* pCache, SSyncRaftEntry* p
ASSERT
(
pSkipListNode
!=
NULL
);
++
(
pCache
->
currentCount
);
pEntry
->
rid
=
taosAddRef
(
pCache
->
refMgr
,
pEntry
);
ASSERT
(
pEntry
->
rid
>=
0
);
do
{
char
eventLog
[
128
];
snprintf
(
eventLog
,
sizeof
(
eventLog
),
"raft cache add, type:%s,%d, type2:%s,%d, index:%"
PRId64
", bytes:%d"
,
...
...
@@ -520,6 +534,7 @@ int32_t raftEntryCacheGetEntry(struct SRaftEntryCache* pCache, SyncIndex index,
if
(
code
==
1
)
{
*
ppEntry
=
taosMemoryMalloc
(
pEntry
->
bytes
);
memcpy
(
*
ppEntry
,
pEntry
,
pEntry
->
bytes
);
(
*
ppEntry
)
->
rid
=
-
1
;
}
else
{
*
ppEntry
=
NULL
;
}
...
...
@@ -541,6 +556,7 @@ int32_t raftEntryCacheGetEntryP(struct SRaftEntryCache* pCache, SyncIndex index,
SSkipListNode
**
ppNode
=
(
SSkipListNode
**
)
taosArrayGet
(
entryPArray
,
0
);
ASSERT
(
*
ppNode
!=
NULL
);
*
ppEntry
=
(
SSyncRaftEntry
*
)
SL_GET_NODE_DATA
(
*
ppNode
);
taosAcquireRef
(
pCache
->
refMgr
,
(
*
ppEntry
)
->
rid
);
code
=
1
;
}
else
if
(
arraySize
==
0
)
{
...
...
@@ -600,7 +616,9 @@ int32_t raftEntryCacheClear(struct SRaftEntryCache* pCache, int32_t count) {
taosArrayPush
(
delNodeArray
,
&
pNode
);
++
returnCnt
;
SSyncRaftEntry
*
pEntry
=
(
SSyncRaftEntry
*
)
SL_GET_NODE_DATA
(
pNode
);
syncEntryDestory
(
pEntry
);
// syncEntryDestory(pEntry);
taosRemoveRef
(
pCache
->
refMgr
,
pEntry
->
rid
);
}
tSkipListDestroyIter
(
pIter
);
...
...
source/libs/sync/src/syncRaftStore.c
浏览文件 @
29649171
...
...
@@ -216,7 +216,7 @@ cJSON *raftStore2Json(SRaftStore *pRaftStore) {
char
*
raftStore2Str
(
SRaftStore
*
pRaftStore
)
{
cJSON
*
pJson
=
raftStore2Json
(
pRaftStore
);
char
*
serialized
=
cJSON_Print
(
pJson
);
char
*
serialized
=
cJSON_Print
(
pJson
);
cJSON_Delete
(
pJson
);
return
serialized
;
}
...
...
source/libs/sync/src/syncRespMgr.c
浏览文件 @
29649171
...
...
@@ -129,7 +129,7 @@ void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl) {
while
(
pStub
)
{
size_t
len
;
void
*
key
=
taosHashGetKey
(
pStub
,
&
len
);
void
*
key
=
taosHashGetKey
(
pStub
,
&
len
);
uint64_t
*
pSeqNum
=
(
uint64_t
*
)
key
;
sum
++
;
...
...
source/libs/sync/src/syncSnapshot.c
浏览文件 @
29649171
...
...
@@ -374,14 +374,14 @@ cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) {
char
*
snapshotSender2Str
(
SSyncSnapshotSender
*
pSender
)
{
cJSON
*
pJson
=
snapshotSender2Json
(
pSender
);
char
*
serialized
=
cJSON_Print
(
pJson
);
char
*
serialized
=
cJSON_Print
(
pJson
);
cJSON_Delete
(
pJson
);
return
serialized
;
}
char
*
snapshotSender2SimpleStr
(
SSyncSnapshotSender
*
pSender
,
char
*
event
)
{
int32_t
len
=
256
;
char
*
s
=
taosMemoryMalloc
(
len
);
char
*
s
=
taosMemoryMalloc
(
len
);
SRaftId
destId
=
pSender
->
pSyncNode
->
replicasId
[
pSender
->
replicaIndex
];
char
host
[
64
];
...
...
@@ -653,7 +653,7 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) {
cJSON_AddStringToObject
(
pFromId
,
"addr"
,
u64buf
);
{
uint64_t
u64
=
pReceiver
->
fromId
.
addr
;
cJSON
*
pTmp
=
pFromId
;
cJSON
*
pTmp
=
pFromId
;
char
host
[
128
]
=
{
0
};
uint16_t
port
;
syncUtilU642Addr
(
u64
,
host
,
sizeof
(
host
),
&
port
);
...
...
@@ -686,14 +686,14 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) {
char
*
snapshotReceiver2Str
(
SSyncSnapshotReceiver
*
pReceiver
)
{
cJSON
*
pJson
=
snapshotReceiver2Json
(
pReceiver
);
char
*
serialized
=
cJSON_Print
(
pJson
);
char
*
serialized
=
cJSON_Print
(
pJson
);
cJSON_Delete
(
pJson
);
return
serialized
;
}
char
*
snapshotReceiver2SimpleStr
(
SSyncSnapshotReceiver
*
pReceiver
,
char
*
event
)
{
int32_t
len
=
256
;
char
*
s
=
taosMemoryMalloc
(
len
);
char
*
s
=
taosMemoryMalloc
(
len
);
SRaftId
fromId
=
pReceiver
->
fromId
;
char
host
[
128
];
...
...
source/libs/sync/test/syncConfigChangeSnapshotTest.cpp
浏览文件 @
29649171
...
...
@@ -125,7 +125,7 @@ int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void* pParam, void** ppWriter)
return
0
;
}
int32_t
SnapshotStopWrite
(
struct
SSyncFSM
*
pFsm
,
void
*
pWriter
,
bool
isApply
,
SSnapshot
*
pSnapshot
)
{
int32_t
SnapshotStopWrite
(
struct
SSyncFSM
*
pFsm
,
void
*
pWriter
,
bool
isApply
,
SSnapshot
*
pSnapshot
)
{
char
logBuf
[
256
]
=
{
0
};
snprintf
(
logBuf
,
sizeof
(
logBuf
),
"==callback== ==SnapshotStopWrite== pFsm:%p, pWriter:%p, isApply:%d"
,
pFsm
,
pWriter
,
isApply
);
...
...
source/libs/sync/test/syncEntryCacheTest.cpp
浏览文件 @
29649171
...
...
@@ -5,8 +5,8 @@
#include "syncRaftLog.h"
#include "syncRaftStore.h"
#include "syncUtil.h"
#include "tskiplist.h"
#include "tref.h"
#include "tskiplist.h"
void
logTest
()
{
sTrace
(
"--- sync log test: trace"
);
...
...
@@ -51,7 +51,7 @@ SRaftEntryCache* createCache(int maxCount) {
}
void
test1
()
{
int32_t
code
=
0
;
int32_t
code
=
0
;
SRaftEntryCache
*
pCache
=
createCache
(
5
);
for
(
int
i
=
0
;
i
<
10
;
++
i
)
{
SSyncRaftEntry
*
pEntry
=
createEntry
(
i
);
...
...
@@ -68,7 +68,7 @@ void test1() {
}
void
test2
()
{
int32_t
code
=
0
;
int32_t
code
=
0
;
SRaftEntryCache
*
pCache
=
createCache
(
5
);
for
(
int
i
=
0
;
i
<
10
;
++
i
)
{
SSyncRaftEntry
*
pEntry
=
createEntry
(
i
);
...
...
@@ -77,7 +77,7 @@ void test2() {
}
raftEntryCacheLog2
((
char
*
)
"==test1 write 5 entries=="
,
pCache
);
SyncIndex
index
=
2
;
SyncIndex
index
=
2
;
SSyncRaftEntry
*
pEntry
=
NULL
;
code
=
raftEntryCacheGetEntryP
(
pCache
,
index
,
&
pEntry
);
...
...
@@ -107,7 +107,7 @@ void test2() {
}
void
test3
()
{
int32_t
code
=
0
;
int32_t
code
=
0
;
SRaftEntryCache
*
pCache
=
createCache
(
20
);
for
(
int
i
=
0
;
i
<=
4
;
++
i
)
{
SSyncRaftEntry
*
pEntry
=
createEntry
(
i
);
...
...
@@ -122,8 +122,6 @@ void test3() {
raftEntryCacheLog2
((
char
*
)
"==test3 write 10 entries=="
,
pCache
);
}
static
void
freeObj
(
void
*
param
)
{
SSyncRaftEntry
*
pEntry
=
(
SSyncRaftEntry
*
)
param
;
syncEntryLog2
((
char
*
)
"freeObj: "
,
pEntry
);
...
...
@@ -138,19 +136,41 @@ void test4() {
int64_t
rid
=
taosAddRef
(
testRefId
,
pEntry
);
sTrace
(
"rid: %ld"
,
rid
);
do
{
SSyncRaftEntry
*
pAcquireEntry
=
(
SSyncRaftEntry
*
)
taosAcquireRef
(
testRefId
,
rid
);
syncEntryLog2
((
char
*
)
"acquire: "
,
pAcquireEntry
);
taosAcquireRef
(
testRefId
,
rid
);
taosAcquireRef
(
testRefId
,
rid
);
taosAcquireRef
(
testRefId
,
rid
);
taosReleaseRef
(
testRefId
,
rid
);
//taosReleaseRef(testRefId, rid);
//
taosReleaseRef(testRefId, rid);
//
taosReleaseRef(testRefId, rid);
}
while
(
0
);
taosRemoveRef
(
testRefId
,
rid
);
for
(
int
i
=
0
;
i
<
10
;
++
i
)
{
sTrace
(
"taosReleaseRef, %d"
,
i
);
taosReleaseRef
(
testRefId
,
rid
);
}
}
void
test5
()
{
int32_t
testRefId
=
taosOpenRef
(
5
,
freeObj
);
for
(
int
i
=
0
;
i
<
100
;
i
++
)
{
SSyncRaftEntry
*
pEntry
=
createEntry
(
i
);
ASSERT
(
pEntry
!=
NULL
);
int64_t
rid
=
taosAddRef
(
testRefId
,
pEntry
);
sTrace
(
"rid: %ld"
,
rid
);
}
for
(
int64_t
rid
=
2
;
rid
<
101
;
rid
++
)
{
SSyncRaftEntry
*
pAcquireEntry
=
(
SSyncRaftEntry
*
)
taosAcquireRef
(
testRefId
,
rid
);
syncEntryLog2
((
char
*
)
"taosAcquireRef: "
,
pAcquireEntry
);
}
}
int
main
(
int
argc
,
char
**
argv
)
{
...
...
@@ -158,11 +178,13 @@ int main(int argc, char** argv) {
tsAsyncLog
=
0
;
sDebugFlag
=
DEBUG_TRACE
+
DEBUG_SCREEN
+
DEBUG_FILE
+
DEBUG_DEBUG
;
test1
();
test2
();
test3
();
//test4();
/*
test1();
test2();
test3();
*/
test4
();
// test5();
return
0
;
}
source/libs/sync/test/syncSnapshotReceiverTest.cpp
浏览文件 @
29649171
...
...
@@ -30,7 +30,7 @@ int32_t SnapshotStopRead(struct SSyncFSM* pFsm, void* pReader) { return 0; }
int32_t
SnapshotDoRead
(
struct
SSyncFSM
*
pFsm
,
void
*
pReader
,
void
**
ppBuf
,
int32_t
*
len
)
{
return
0
;
}
int32_t
SnapshotStartWrite
(
struct
SSyncFSM
*
pFsm
,
void
*
pParam
,
void
**
ppWriter
)
{
return
0
;
}
int32_t
SnapshotStopWrite
(
struct
SSyncFSM
*
pFsm
,
void
*
pWriter
,
bool
isApply
,
SSnapshot
*
pSnapshot
)
{
return
0
;
}
int32_t
SnapshotStopWrite
(
struct
SSyncFSM
*
pFsm
,
void
*
pWriter
,
bool
isApply
,
SSnapshot
*
pSnapshot
)
{
return
0
;
}
int32_t
SnapshotDoWrite
(
struct
SSyncFSM
*
pFsm
,
void
*
pWriter
,
void
*
pBuf
,
int32_t
len
)
{
return
0
;
}
SSyncSnapshotReceiver
*
createReceiver
()
{
...
...
source/libs/sync/test/syncTestTool.cpp
浏览文件 @
29649171
...
...
@@ -126,7 +126,7 @@ int32_t SnapshotStartWrite(struct SSyncFSM* pFsm, void* pParam, void** ppWriter)
return
0
;
}
int32_t
SnapshotStopWrite
(
struct
SSyncFSM
*
pFsm
,
void
*
pWriter
,
bool
isApply
,
SSnapshot
*
pSnapshot
)
{
int32_t
SnapshotStopWrite
(
struct
SSyncFSM
*
pFsm
,
void
*
pWriter
,
bool
isApply
,
SSnapshot
*
pSnapshot
)
{
if
(
isApply
)
{
gSnapshotLastApplyIndex
=
gFinishLastApplyIndex
;
gSnapshotLastApplyTerm
=
gFinishLastApplyTerm
;
...
...
source/util/src/tconfig.c
浏览文件 @
29649171
...
...
@@ -335,6 +335,7 @@ int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcTy
}
SConfigItem
*
cfgGetItem
(
SConfig
*
pCfg
,
const
char
*
name
)
{
if
(
pCfg
==
NULL
)
return
NULL
;
int32_t
size
=
taosArrayGetSize
(
pCfg
->
array
);
for
(
int32_t
i
=
0
;
i
<
size
;
++
i
)
{
SConfigItem
*
pItem
=
taosArrayGet
(
pCfg
->
array
,
i
);
...
...
source/util/src/tlog.c
浏览文件 @
29649171
...
...
@@ -17,6 +17,7 @@
#include "tlog.h"
#include "os.h"
#include "tutil.h"
#include "tconfig.h"
#define LOG_MAX_LINE_SIZE (1024)
#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3)
...
...
@@ -62,6 +63,7 @@ typedef struct {
TdThreadMutex
logMutex
;
}
SLogObj
;
extern
SConfig
*
tsCfg
;
static
int8_t
tsLogInited
=
0
;
static
SLogObj
tsLogObj
=
{.
fileNum
=
1
};
static
int64_t
tsAsyncLogLostLines
=
0
;
...
...
@@ -741,25 +743,3 @@ cmp_end:
return
ret
;
}
void
taosSetAllDebugFlag
(
int32_t
flag
)
{
if
(
flag
<=
0
)
return
;
uDebugFlag
=
flag
;
rpcDebugFlag
=
flag
;
jniDebugFlag
=
flag
;
qDebugFlag
=
flag
;
cDebugFlag
=
flag
;
dDebugFlag
=
flag
;
vDebugFlag
=
flag
;
mDebugFlag
=
flag
;
wDebugFlag
=
flag
;
sDebugFlag
=
flag
;
tsdbDebugFlag
=
flag
;
tqDebugFlag
=
flag
;
fsDebugFlag
=
flag
;
udfDebugFlag
=
flag
;
smaDebugFlag
=
flag
;
idxDebugFlag
=
flag
;
uInfo
(
"all debug flag are set to %d"
,
flag
);
}
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py
浏览文件 @
29649171
...
...
@@ -65,14 +65,14 @@ class TDTestCase:
is_leader
=
True
if
count
==
1
and
is_leader
:
tdLog
.
info
(
"===== depoly cluster success with 1 mnode as leader ====="
)
tdLog
.
notice
(
"===== depoly cluster success with 1 mnode as leader ====="
)
else
:
tdLog
.
exit
(
"===== depoly cluster fail with 1 mnode as leader ====="
)
for
k
,
v
in
self
.
dnode_list
.
items
():
if
k
==
mnode_name
:
if
v
[
3
]
==
0
:
tdLog
.
info
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
tdLog
.
notice
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
tdLog
.
exit
(
"===== depoly cluster mnode only fail at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
...
...
@@ -115,7 +115,7 @@ class TDTestCase:
for
k
,
v
in
vgroups_infos
.
items
():
if
len
(
v
)
==
1
and
v
[
0
]
==
"leader"
:
tdLog
.
info
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
tdLog
.
notice
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
else
:
tdLog
.
exit
(
" === create database replica only 1 role leader check fail of vgroup_id {} ======"
.
format
(
k
))
...
...
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py
浏览文件 @
29649171
...
...
@@ -71,14 +71,14 @@ class TDTestCase:
is_leader
=
True
if
count
==
1
and
is_leader
:
tdLog
.
info
(
"===== depoly cluster success with 1 mnode as leader ====="
)
tdLog
.
notice
(
"===== depoly cluster success with 1 mnode as leader ====="
)
else
:
tdLog
.
exit
(
"===== depoly cluster fail with 1 mnode as leader ====="
)
for
k
,
v
in
self
.
dnode_list
.
items
():
if
k
==
mnode_name
:
if
v
[
3
]
==
0
:
tdLog
.
info
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
tdLog
.
notice
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
tdLog
.
exit
(
"===== depoly cluster mnode only fail at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
...
...
@@ -121,7 +121,7 @@ class TDTestCase:
for
k
,
v
in
vgroups_infos
.
items
():
if
len
(
v
)
==
1
and
v
[
0
]
==
"leader"
:
tdLog
.
info
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
tdLog
.
notice
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
else
:
tdLog
.
exit
(
" === create database replica only 1 role leader check fail of vgroup_id {} ======"
.
format
(
k
))
...
...
@@ -129,7 +129,7 @@ class TDTestCase:
drop_db_sql
=
"drop database if exists {}"
.
format
(
dbname
)
create_db_sql
=
"create database {} replica {} vgroups {}"
.
format
(
dbname
,
replica_num
,
vgroup_nums
)
tdLog
.
info
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdLog
.
notice
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdSql
.
execute
(
drop_db_sql
)
tdSql
.
execute
(
create_db_sql
)
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
...
...
@@ -155,7 +155,7 @@ class TDTestCase:
ts
=
self
.
ts
+
1000
*
row_num
tdSql
.
execute
(
f
"insert into
{
sub_tbname
}
values (
{
ts
}
,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
tdLog
.
info
(
" ==== create database {} and insert rows execute end ====="
.
format
(
dbname
))
tdLog
.
notice
(
" ==== create database {} and insert rows execute end ====="
.
format
(
dbname
))
def
check_insert_status
(
self
,
dbname
,
tb_nums
,
row_nums
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
...
...
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py
浏览文件 @
29649171
...
...
@@ -71,14 +71,14 @@ class TDTestCase:
is_leader
=
True
if
count
==
1
and
is_leader
:
tdLog
.
info
(
"===== depoly cluster success with 1 mnode as leader ====="
)
tdLog
.
notice
(
"===== depoly cluster success with 1 mnode as leader ====="
)
else
:
tdLog
.
exit
(
"===== depoly cluster fail with 1 mnode as leader ====="
)
for
k
,
v
in
self
.
dnode_list
.
items
():
if
k
==
mnode_name
:
if
v
[
3
]
==
0
:
tdLog
.
info
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
tdLog
.
notice
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
tdLog
.
exit
(
"===== depoly cluster mnode only fail at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
...
...
@@ -121,7 +121,7 @@ class TDTestCase:
for
k
,
v
in
vgroups_infos
.
items
():
if
len
(
v
)
==
1
and
v
[
0
]
==
"leader"
:
tdLog
.
info
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
tdLog
.
notice
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
else
:
tdLog
.
exit
(
" === create database replica only 1 role leader check fail of vgroup_id {} ======"
.
format
(
k
))
...
...
@@ -129,7 +129,7 @@ class TDTestCase:
drop_db_sql
=
"drop database if exists {}"
.
format
(
dbname
)
create_db_sql
=
"create database {} replica {} vgroups {}"
.
format
(
dbname
,
replica_num
,
vgroup_nums
)
tdLog
.
info
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdLog
.
notice
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdSql
.
execute
(
drop_db_sql
)
tdSql
.
execute
(
create_db_sql
)
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
...
...
@@ -155,7 +155,7 @@ class TDTestCase:
ts
=
self
.
ts
+
1000
*
row_num
tdSql
.
execute
(
f
"insert into
{
sub_tbname
}
values (
{
ts
}
,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
tdLog
.
info
(
" ==== create database {} and insert rows execute end ====="
.
format
(
dbname
))
tdLog
.
notice
(
" ==== create database {} and insert rows execute end ====="
.
format
(
dbname
))
def
check_insert_status
(
self
,
dbname
,
tb_nums
,
row_nums
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
...
...
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py
浏览文件 @
29649171
...
...
@@ -80,14 +80,14 @@ class TDTestCase:
is_leader
=
True
if
count
==
1
and
is_leader
:
tdLog
.
info
(
"===== depoly cluster success with 1 mnode as leader ====="
)
tdLog
.
notice
(
"===== depoly cluster success with 1 mnode as leader ====="
)
else
:
tdLog
.
exit
(
"===== depoly cluster fail with 1 mnode as leader ====="
)
for
k
,
v
in
self
.
dnode_list
.
items
():
if
k
==
mnode_name
:
if
v
[
3
]
==
0
:
tdLog
.
info
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
tdLog
.
notice
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
tdLog
.
exit
(
"===== depoly cluster mnode only fail at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
...
...
@@ -130,7 +130,7 @@ class TDTestCase:
for
k
,
v
in
vgroups_infos
.
items
():
if
len
(
v
)
==
1
and
v
[
0
]
==
"leader"
:
tdLog
.
info
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
tdLog
.
notice
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
else
:
tdLog
.
exit
(
" === create database replica only 1 role leader check fail of vgroup_id {} ======"
.
format
(
k
))
...
...
@@ -138,7 +138,7 @@ class TDTestCase:
drop_db_sql
=
"drop database if exists {}"
.
format
(
dbname
)
create_db_sql
=
"create database {} replica {} vgroups {}"
.
format
(
dbname
,
replica_num
,
vgroup_nums
)
tdLog
.
info
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdLog
.
notice
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdSql
.
execute
(
drop_db_sql
)
tdSql
.
execute
(
create_db_sql
)
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
...
...
@@ -161,7 +161,7 @@ class TDTestCase:
ts
=
self
.
ts
+
self
.
ts_step
*
row_num
tdSql
.
execute
(
f
"insert into
{
sub_tbname
}
values (
{
ts
}
,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
tdLog
.
info
(
" ==== stable {} insert rows execute end ====="
.
format
(
stablename
))
tdLog
.
notice
(
" ==== stable {} insert rows execute end ====="
.
format
(
stablename
))
def
append_rows_of_exists_tables
(
self
,
dbname
,
stablename
,
tbname
,
append_nums
):
...
...
@@ -170,7 +170,7 @@ class TDTestCase:
for
row_num
in
range
(
append_nums
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values (now,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog
.
info
(
" ==== append new rows of table {} belongs to stable {} execute end ====="
.
format
(
tbname
,
stablename
))
tdLog
.
notice
(
" ==== append new rows of table {} belongs to stable {} execute end ====="
.
format
(
tbname
,
stablename
))
os
.
system
(
"taos -s 'select count(*) from {}.{}';"
.
format
(
dbname
,
stablename
))
def
check_insert_rows
(
self
,
dbname
,
stablename
,
tb_nums
,
row_nums
,
append_rows
):
...
...
@@ -197,7 +197,7 @@ class TDTestCase:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckData
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
)
,
0
,
0
,
tb_nums
*
row_nums
+
append_rows
)
tdLog
.
info
(
" ==== check insert rows first failed , this is {}_th retry check rows of database {}"
.
format
(
count
,
dbname
))
tdLog
.
debug
(
" ==== check insert rows first failed , this is {}_th retry check rows of database {}"
.
format
(
count
,
dbname
))
count
+=
1
...
...
@@ -218,7 +218,7 @@ class TDTestCase:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckRows
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
)
,
tb_nums
)
tdLog
.
info
(
" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}"
.
format
(
count
,
dbname
))
tdLog
.
debug
(
" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}"
.
format
(
count
,
dbname
))
count
+=
1
def
_get_stop_dnode_id
(
self
,
dbname
):
tdSql
.
query
(
"show {}.vgroups"
.
format
(
dbname
))
...
...
@@ -255,8 +255,8 @@ class TDTestCase:
while
status
!=
"offline"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.
info
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
info
(
"==== stop_dnode has stopped , id is {}
"
.
format
(
self
.
stop_dnode_id
))
# tdLog.
notice
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has stopped , id is {} ====
"
.
format
(
self
.
stop_dnode_id
))
def
wait_start_dnode_OK
(
self
):
...
...
@@ -277,8 +277,8 @@ class TDTestCase:
while
status
!=
"ready"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.
info
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
info
(
"==== stop_dnode has restart , id is {}
"
.
format
(
self
.
stop_dnode_id
))
# tdLog.
notice
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has restart , id is {} ====
"
.
format
(
self
.
stop_dnode_id
))
def
_parse_datetime
(
self
,
timestr
):
try
:
...
...
@@ -342,9 +342,9 @@ class TDTestCase:
elif
isinstance
(
data
,
str
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
elif
isinstance
(
data
,
datetime
.
date
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
#
elif isinstance(data, datetime.date):
#
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
#
(sql, row, col, tdSql.queryResult[row][col], data))
elif
isinstance
(
data
,
float
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
...
...
@@ -389,15 +389,15 @@ class TDTestCase:
# append rows of stablename when dnode stop
tbname
=
"sub_{}_{}"
.
format
(
stablename
,
0
)
tdLog
.
info
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
append_rows_of_exists_tables
(
db_name
,
stablename
,
tbname
,
100
)
tdLog
.
info
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
stablename
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
100
)
# create new stables
tdLog
.
info
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
# begin start dnode
...
...
@@ -409,9 +409,9 @@ class TDTestCase:
tdLog
.
exit
(
" ==== restart dnode {} cost too much time , please check ===="
.
format
(
self
.
stop_dnode_id
))
# create new stables again
tdLog
.
info
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
def
unsync_run_case
(
self
):
...
...
@@ -447,7 +447,7 @@ class TDTestCase:
self
.
create_database
(
dbname
=
db_name
,
replica_num
=
self
.
replica
,
vgroup_nums
=
1
)
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
stablename
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ===== restart dnode of database {} in an unsync threading ===== "
.
format
(
db_name
))
tdLog
.
notice
(
" ===== restart dnode of database {} in an unsync threading ===== "
.
format
(
db_name
))
# create sync threading and start it
self
.
current_thread
=
_create_threading
(
db_name
)
...
...
@@ -457,21 +457,21 @@ class TDTestCase:
self
.
check_insert_rows
(
db_name
,
stablename
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
tbname
=
"sub_{}_{}"
.
format
(
stablename
,
0
)
tdLog
.
info
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
append_rows_of_exists_tables
(
db_name
,
stablename
,
tbname
,
100
)
tdLog
.
info
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
stablename
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
100
)
# create new stables
tdLog
.
info
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
# create new stables again
tdLog
.
info
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
self
.
current_thread
.
join
()
...
...
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py
浏览文件 @
29649171
...
...
@@ -80,14 +80,14 @@ class TDTestCase:
is_leader
=
True
if
count
==
1
and
is_leader
:
tdLog
.
info
(
"===== depoly cluster success with 1 mnode as leader ====="
)
tdLog
.
notice
(
"===== depoly cluster success with 1 mnode as leader ====="
)
else
:
tdLog
.
exit
(
"===== depoly cluster fail with 1 mnode as leader ====="
)
for
k
,
v
in
self
.
dnode_list
.
items
():
if
k
==
mnode_name
:
if
v
[
3
]
==
0
:
tdLog
.
info
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
tdLog
.
notice
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
tdLog
.
exit
(
"===== depoly cluster mnode only fail at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
...
...
@@ -130,7 +130,7 @@ class TDTestCase:
for
k
,
v
in
vgroups_infos
.
items
():
if
len
(
v
)
==
1
and
v
[
0
]
==
"leader"
:
tdLog
.
info
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
tdLog
.
notice
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
else
:
tdLog
.
exit
(
" === create database replica only 1 role leader check fail of vgroup_id {} ======"
.
format
(
k
))
...
...
@@ -138,7 +138,7 @@ class TDTestCase:
drop_db_sql
=
"drop database if exists {}"
.
format
(
dbname
)
create_db_sql
=
"create database {} replica {} vgroups {}"
.
format
(
dbname
,
replica_num
,
vgroup_nums
)
tdLog
.
info
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdLog
.
notice
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdSql
.
execute
(
drop_db_sql
)
tdSql
.
execute
(
create_db_sql
)
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
...
...
@@ -161,7 +161,7 @@ class TDTestCase:
ts
=
self
.
ts
+
self
.
ts_step
*
row_num
tdSql
.
execute
(
f
"insert into
{
sub_tbname
}
values (
{
ts
}
,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
tdLog
.
info
(
" ==== stable {} insert rows execute end ====="
.
format
(
stablename
))
tdLog
.
notice
(
" ==== stable {} insert rows execute end ====="
.
format
(
stablename
))
def
append_rows_of_exists_tables
(
self
,
dbname
,
stablename
,
tbname
,
append_nums
):
...
...
@@ -170,7 +170,7 @@ class TDTestCase:
for
row_num
in
range
(
append_nums
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values (now,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog
.
info
(
" ==== append new rows of table {} belongs to stable {} execute end ====="
.
format
(
tbname
,
stablename
))
tdLog
.
notice
(
" ==== append new rows of table {} belongs to stable {} execute end ====="
.
format
(
tbname
,
stablename
))
os
.
system
(
"taos -s 'select count(*) from {}.{}';"
.
format
(
dbname
,
stablename
))
def
check_insert_rows
(
self
,
dbname
,
stablename
,
tb_nums
,
row_nums
,
append_rows
):
...
...
@@ -197,7 +197,7 @@ class TDTestCase:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckData
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
)
,
0
,
0
,
tb_nums
*
row_nums
+
append_rows
)
tdLog
.
info
(
" ==== check insert rows first failed , this is {}_th retry check rows of database {}"
.
format
(
count
,
dbname
))
tdLog
.
notice
(
" ==== check insert rows first failed , this is {}_th retry check rows of database {}"
.
format
(
count
,
dbname
))
count
+=
1
...
...
@@ -218,7 +218,7 @@ class TDTestCase:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckRows
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
)
,
tb_nums
)
tdLog
.
info
(
" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}"
.
format
(
count
,
dbname
))
tdLog
.
notice
(
" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}"
.
format
(
count
,
dbname
))
count
+=
1
def
_get_stop_dnode_id
(
self
,
dbname
):
...
...
@@ -256,8 +256,8 @@ class TDTestCase:
while
status
!=
"offline"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.
info
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
info
(
"==== stop_dnode has stopped , id is {}"
.
format
(
self
.
stop_dnode_id
))
# tdLog.
notice
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has stopped , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
wait_start_dnode_OK
(
self
):
...
...
@@ -278,8 +278,8 @@ class TDTestCase:
while
status
!=
"ready"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.
info
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
info
(
"==== stop_dnode has restart , id is {}"
.
format
(
self
.
stop_dnode_id
))
# tdLog.
notice
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has restart , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
_parse_datetime
(
self
,
timestr
):
try
:
...
...
@@ -343,9 +343,9 @@ class TDTestCase:
elif
isinstance
(
data
,
str
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
elif
isinstance
(
data
,
datetime
.
date
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
#
elif isinstance(data, datetime.date):
#
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
#
(sql, row, col, tdSql.queryResult[row][col], data))
elif
isinstance
(
data
,
float
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
...
...
@@ -390,15 +390,15 @@ class TDTestCase:
# append rows of stablename when dnode stop
tbname
=
"sub_{}_{}"
.
format
(
stablename
,
0
)
tdLog
.
info
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
append_rows_of_exists_tables
(
db_name
,
stablename
,
tbname
,
100
)
tdLog
.
info
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
stablename
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
100
)
# create new stables
tdLog
.
info
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
# begin start dnode
...
...
@@ -410,9 +410,9 @@ class TDTestCase:
tdLog
.
exit
(
" ==== restart dnode {} cost too much time , please check ===="
.
format
(
self
.
stop_dnode_id
))
# create new stables again
tdLog
.
info
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
def
unsync_run_case
(
self
):
...
...
@@ -448,7 +448,7 @@ class TDTestCase:
self
.
create_database
(
dbname
=
db_name
,
replica_num
=
self
.
replica
,
vgroup_nums
=
1
)
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
stablename
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ===== restart dnode of database {} in an unsync threading ===== "
.
format
(
db_name
))
tdLog
.
notice
(
" ===== restart dnode of database {} in an unsync threading ===== "
.
format
(
db_name
))
# create sync threading and start it
self
.
current_thread
=
_create_threading
(
db_name
)
...
...
@@ -458,21 +458,21 @@ class TDTestCase:
self
.
check_insert_rows
(
db_name
,
stablename
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
tbname
=
"sub_{}_{}"
.
format
(
stablename
,
0
)
tdLog
.
info
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
append_rows_of_exists_tables
(
db_name
,
stablename
,
tbname
,
100
)
tdLog
.
info
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
stablename
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
100
)
# create new stables
tdLog
.
info
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
# create new stables again
tdLog
.
info
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
self
.
current_thread
.
join
()
...
...
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py
浏览文件 @
29649171
...
...
@@ -80,14 +80,14 @@ class TDTestCase:
is_leader
=
True
if
count
==
1
and
is_leader
:
tdLog
.
info
(
"===== depoly cluster success with 1 mnode as leader ====="
)
tdLog
.
notice
(
"===== depoly cluster success with 1 mnode as leader ====="
)
else
:
tdLog
.
exit
(
"===== depoly cluster fail with 1 mnode as leader ====="
)
for
k
,
v
in
self
.
dnode_list
.
items
():
if
k
==
mnode_name
:
if
v
[
3
]
==
0
:
tdLog
.
info
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
tdLog
.
notice
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
tdLog
.
exit
(
"===== depoly cluster mnode only fail at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
...
...
@@ -130,7 +130,7 @@ class TDTestCase:
for
k
,
v
in
vgroups_infos
.
items
():
if
len
(
v
)
==
1
and
v
[
0
]
==
"leader"
:
tdLog
.
info
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
tdLog
.
notice
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
else
:
tdLog
.
exit
(
" === create database replica only 1 role leader check fail of vgroup_id {} ======"
.
format
(
k
))
...
...
@@ -138,7 +138,7 @@ class TDTestCase:
drop_db_sql
=
"drop database if exists {}"
.
format
(
dbname
)
create_db_sql
=
"create database {} replica {} vgroups {}"
.
format
(
dbname
,
replica_num
,
vgroup_nums
)
tdLog
.
info
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdLog
.
notice
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdSql
.
execute
(
drop_db_sql
)
tdSql
.
execute
(
create_db_sql
)
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
...
...
@@ -161,7 +161,7 @@ class TDTestCase:
ts
=
self
.
ts
+
self
.
ts_step
*
row_num
tdSql
.
execute
(
f
"insert into
{
sub_tbname
}
values (
{
ts
}
,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
tdLog
.
info
(
" ==== stable {} insert rows execute end ====="
.
format
(
stablename
))
tdLog
.
notice
(
" ==== stable {} insert rows execute end ====="
.
format
(
stablename
))
def
append_rows_of_exists_tables
(
self
,
dbname
,
stablename
,
tbname
,
append_nums
):
...
...
@@ -170,7 +170,7 @@ class TDTestCase:
for
row_num
in
range
(
append_nums
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values (now,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog
.
info
(
" ==== append new rows of table {} belongs to stable {} execute end ====="
.
format
(
tbname
,
stablename
))
tdLog
.
notice
(
" ==== append new rows of table {} belongs to stable {} execute end ====="
.
format
(
tbname
,
stablename
))
os
.
system
(
"taos -s 'select count(*) from {}.{}';"
.
format
(
dbname
,
stablename
))
def
check_insert_rows
(
self
,
dbname
,
stablename
,
tb_nums
,
row_nums
,
append_rows
):
...
...
@@ -197,7 +197,7 @@ class TDTestCase:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckData
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
)
,
0
,
0
,
tb_nums
*
row_nums
+
append_rows
)
tdLog
.
info
(
" ==== check insert rows first failed , this is {}_th retry check rows of database {}"
.
format
(
count
,
dbname
))
tdLog
.
notice
(
" ==== check insert rows first failed , this is {}_th retry check rows of database {}"
.
format
(
count
,
dbname
))
count
+=
1
...
...
@@ -218,7 +218,7 @@ class TDTestCase:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckRows
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
)
,
tb_nums
)
tdLog
.
info
(
" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}"
.
format
(
count
,
dbname
))
tdLog
.
notice
(
" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}"
.
format
(
count
,
dbname
))
count
+=
1
def
_get_stop_dnode_id
(
self
,
dbname
):
...
...
@@ -256,8 +256,8 @@ class TDTestCase:
while
status
!=
"offline"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.
info
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
info
(
"==== stop_dnode has stopped , id is {}"
.
format
(
self
.
stop_dnode_id
))
# tdLog.
notice
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has stopped , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
wait_start_dnode_OK
(
self
):
...
...
@@ -278,8 +278,8 @@ class TDTestCase:
while
status
!=
"ready"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.
info
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
info
(
"==== stop_dnode has restart , id is {}"
.
format
(
self
.
stop_dnode_id
))
# tdLog.
notice
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has restart , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
_parse_datetime
(
self
,
timestr
):
try
:
...
...
@@ -343,9 +343,9 @@ class TDTestCase:
elif
isinstance
(
data
,
str
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
elif
isinstance
(
data
,
datetime
.
date
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
#
elif isinstance(data, datetime.date):
#
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
#
(sql, row, col, tdSql.queryResult[row][col], data))
elif
isinstance
(
data
,
float
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
...
...
@@ -390,15 +390,15 @@ class TDTestCase:
# append rows of stablename when dnode stop
tbname
=
"sub_{}_{}"
.
format
(
stablename
,
0
)
tdLog
.
info
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
append_rows_of_exists_tables
(
db_name
,
stablename
,
tbname
,
100
)
tdLog
.
info
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
stablename
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
100
)
# create new stables
tdLog
.
info
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
# begin start dnode
...
...
@@ -410,9 +410,9 @@ class TDTestCase:
tdLog
.
exit
(
" ==== restart dnode {} cost too much time , please check ===="
.
format
(
self
.
stop_dnode_id
))
# create new stables again
tdLog
.
info
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
def
unsync_run_case
(
self
):
...
...
@@ -453,7 +453,7 @@ class TDTestCase:
self
.
create_database
(
dbname
=
db_name
,
replica_num
=
self
.
replica
,
vgroup_nums
=
1
)
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
stablename
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ===== restart dnode of database {} in an unsync threading ===== "
.
format
(
db_name
))
tdLog
.
notice
(
" ===== restart dnode of database {} in an unsync threading ===== "
.
format
(
db_name
))
# create sync threading and start it
self
.
current_thread
=
_create_threading
(
db_name
)
...
...
@@ -463,21 +463,21 @@ class TDTestCase:
self
.
check_insert_rows
(
db_name
,
stablename
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
tbname
=
"sub_{}_{}"
.
format
(
stablename
,
0
)
tdLog
.
info
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
append_rows_of_exists_tables
(
db_name
,
stablename
,
tbname
,
100
)
tdLog
.
info
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
stablename
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
100
)
# create new stables
tdLog
.
info
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
# create new stables again
tdLog
.
info
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
self
.
current_thread
.
join
()
...
...
@@ -493,7 +493,7 @@ class TDTestCase:
else
:
continue
if
port
:
tdLog
.
info
(
" ==== dnode {} will be force stop by kill -9 ===="
.
format
(
dnode_id
))
tdLog
.
notice
(
" ==== dnode {} will be force stop by kill -9 ===="
.
format
(
dnode_id
))
psCmd
=
'''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1'''
%
(
port
)
processID
=
subprocess
.
check_output
(
psCmd
,
shell
=
True
).
decode
(
"utf-8"
)
...
...
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py
浏览文件 @
29649171
此差异已折叠。
点击以展开。
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py
浏览文件 @
29649171
...
...
@@ -114,9 +114,9 @@ class TDTestCase:
elif
isinstance
(
data
,
str
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
elif
isinstance
(
data
,
datetime
.
date
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
#
elif isinstance(data, datetime.date):
#
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
#
(sql, row, col, tdSql.queryResult[row][col], data))
elif
isinstance
(
data
,
float
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
...
...
@@ -163,14 +163,14 @@ class TDTestCase:
is_leader
=
True
if
count
==
1
and
is_leader
:
tdLog
.
info
(
"===== depoly cluster success with 1 mnode as leader ====="
)
tdLog
.
notice
(
"===== depoly cluster success with 1 mnode as leader ====="
)
else
:
tdLog
.
exit
(
"===== depoly cluster fail with 1 mnode as leader ====="
)
for
k
,
v
in
self
.
dnode_list
.
items
():
if
k
==
mnode_name
:
if
v
[
3
]
==
0
:
tdLog
.
info
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
tdLog
.
notice
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
tdLog
.
exit
(
"===== depoly cluster mnode only fail at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
...
...
@@ -213,7 +213,7 @@ class TDTestCase:
for
k
,
v
in
vgroups_infos
.
items
():
if
len
(
v
)
==
1
and
v
[
0
]
==
"leader"
:
tdLog
.
info
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
tdLog
.
notice
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
else
:
tdLog
.
exit
(
" === create database replica only 1 role leader check fail of vgroup_id {} ======"
.
format
(
k
))
...
...
@@ -221,7 +221,7 @@ class TDTestCase:
drop_db_sql
=
"drop database if exists {}"
.
format
(
dbname
)
create_db_sql
=
"create database {} replica {} vgroups {}"
.
format
(
dbname
,
replica_num
,
vgroup_nums
)
tdLog
.
info
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdLog
.
notice
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdSql
.
execute
(
drop_db_sql
)
tdSql
.
execute
(
create_db_sql
)
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
...
...
@@ -244,7 +244,7 @@ class TDTestCase:
ts
=
self
.
ts
+
self
.
ts_step
*
row_num
tdSql
.
execute
(
f
"insert into
{
sub_tbname
}
values (
{
ts
}
,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
tdLog
.
info
(
" ==== stable {} insert rows execute end ====="
.
format
(
stablename
))
tdLog
.
notice
(
" ==== stable {} insert rows execute end ====="
.
format
(
stablename
))
def
append_rows_of_exists_tables
(
self
,
dbname
,
stablename
,
tbname
,
append_nums
):
...
...
@@ -253,7 +253,7 @@ class TDTestCase:
for
row_num
in
range
(
append_nums
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values (now,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog
.
info
(
" ==== append new rows of table {} belongs to stable {} execute end ====="
.
format
(
tbname
,
stablename
))
tdLog
.
notice
(
" ==== append new rows of table {} belongs to stable {} execute end ====="
.
format
(
tbname
,
stablename
))
os
.
system
(
"taos -s 'select count(*) from {}.{}';"
.
format
(
dbname
,
stablename
))
def
check_insert_rows
(
self
,
dbname
,
stablename
,
tb_nums
,
row_nums
,
append_rows
):
...
...
@@ -280,7 +280,7 @@ class TDTestCase:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckData
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
)
,
0
,
0
,
tb_nums
*
row_nums
+
append_rows
)
tdLog
.
info
(
" ==== check insert rows first failed , this is {}_th retry check rows of database {}
"
.
format
(
count
,
dbname
))
tdLog
.
notice
(
" ==== check insert rows first failed , this is {}_th retry check rows of database {} ====
"
.
format
(
count
,
dbname
))
count
+=
1
...
...
@@ -301,7 +301,7 @@ class TDTestCase:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckRows
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
)
,
tb_nums
)
tdLog
.
info
(
" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}"
.
format
(
count
,
dbname
))
tdLog
.
notice
(
" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}"
.
format
(
count
,
dbname
))
count
+=
1
def
_get_stop_dnode_id
(
self
,
dbname
):
...
...
@@ -340,8 +340,8 @@ class TDTestCase:
while
status
!=
"offline"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.
info
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
info
(
"==== stop_dnode has stopped , id is {}"
.
format
(
self
.
stop_dnode_id
))
# tdLog.
notice
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has stopped , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
wait_start_dnode_OK
(
self
):
...
...
@@ -362,8 +362,8 @@ class TDTestCase:
while
status
!=
"ready"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.
info
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
info
(
"==== stop_dnode has restart , id is {}"
.
format
(
self
.
stop_dnode_id
))
# tdLog.
notice
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has restart , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
get_leader_infos
(
self
,
dbname
):
...
...
@@ -390,10 +390,10 @@ class TDTestCase:
if
role
==
self
.
stop_dnode_id
:
if
vgroup_info
[
ind
+
1
]
==
"offline"
and
"leader"
in
vgroup_info
:
tdLog
.
info
(
" === revote leader ok , leader is {} now ===="
.
format
(
vgroup_info
[
list
(
vgroup_info
).
index
(
"leader"
)
-
1
]))
tdLog
.
notice
(
" === revote leader ok , leader is {} now ===="
.
format
(
vgroup_info
[
list
(
vgroup_info
).
index
(
"leader"
)
-
1
]))
check_status
=
True
elif
vgroup_info
[
ind
+
1
]
!=
"offline"
:
tdLog
.
info
(
" === dnode {} should be offline "
.
format
(
self
.
stop_dnode_id
))
tdLog
.
notice
(
" === dnode {} should be offline "
.
format
(
self
.
stop_dnode_id
))
else
:
continue
break
...
...
@@ -410,7 +410,7 @@ class TDTestCase:
else
:
continue
if
port
:
tdLog
.
info
(
" ==== dnode {} will be force stop by kill -9 ===="
.
format
(
dnode_id
))
tdLog
.
notice
(
" ==== dnode {} will be force stop by kill -9 ===="
.
format
(
dnode_id
))
psCmd
=
'''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1'''
%
(
port
)
processID
=
subprocess
.
check_output
(
psCmd
,
shell
=
True
).
decode
(
"utf-8"
)
...
...
@@ -457,18 +457,18 @@ class TDTestCase:
if
revote_status
:
tbname
=
"sub_{}_{}"
.
format
(
stablename
,
0
)
tdLog
.
info
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
append_rows_of_exists_tables
(
db_name
,
stablename
,
tbname
,
100
)
tdLog
.
info
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
stablename
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
100
)
# create new stables
tdLog
.
info
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
else
:
tdLog
.
info
(
"===== leader of database {} is not ok , append rows fail ====="
.
format
(
db_name
))
tdLog
.
notice
(
"===== leader of database {} is not ok , append rows fail ====="
.
format
(
db_name
))
# begin start dnode
start
=
time
.
time
()
...
...
@@ -480,9 +480,9 @@ class TDTestCase:
tdLog
.
exit
(
" ==== restart dnode {} cost too much time , please check ===="
.
format
(
self
.
stop_dnode_id
))
# create new stables again
tdLog
.
info
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
def
unsync_run_case
(
self
):
...
...
@@ -509,21 +509,21 @@ class TDTestCase:
revote_status
=
self
.
check_revote_leader_success
(
db_name
,
before_leader_infos
,
after_leader_infos
)
tbname
=
"sub_{}_{}"
.
format
(
stablename
,
0
)
tdLog
.
info
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== begin append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
append_rows_of_exists_tables
(
db_name
,
stablename
,
tbname
,
100
)
tdLog
.
info
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check append rows of exists table {} when dnode {} offline ===="
.
format
(
tbname
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
stablename
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
100
)
# create new stables
tdLog
.
info
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} offline ===="
.
format
(
'new_stb1'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb1'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
# create new stables again
tdLog
.
info
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== create new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
tdLog
.
notice
(
" ==== check new stable {} when dnode {} restart ===="
.
format
(
'new_stb2'
,
self
.
stop_dnode_id
))
self
.
check_insert_rows
(
db_name
,
'new_stb2'
,
tb_nums
=
10
,
row_nums
=
10
,
append_rows
=
0
)
...
...
@@ -551,7 +551,7 @@ class TDTestCase:
self
.
create_database
(
dbname
=
db_name
,
replica_num
=
self
.
replica
,
vgroup_nums
=
1
)
self
.
create_stable_insert_datas
(
dbname
=
db_name
,
stablename
=
stablename
,
tb_nums
=
10
,
row_nums
=
10
)
tdLog
.
info
(
" ===== restart dnode of database {} in an unsync threading ===== "
.
format
(
db_name
))
tdLog
.
notice
(
" ===== restart dnode of database {} in an unsync threading ===== "
.
format
(
db_name
))
# create sync threading and start it
self
.
current_thread
=
_create_threading
(
db_name
)
...
...
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py
0 → 100644
浏览文件 @
29649171
# author : wenzhouwww
from
ssl
import
ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import
taos
import
sys
import
time
import
os
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.dnodes
import
TDDnodes
from
util.dnodes
import
TDDnode
from
util.cluster
import
*
import
datetime
import
inspect
import
time
import
socket
import
subprocess
import
threading
sys
.
path
.
append
(
os
.
path
.
dirname
(
__file__
))
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to excute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
())
self
.
host
=
socket
.
gethostname
()
self
.
mnode_list
=
{}
self
.
dnode_list
=
{}
self
.
ts
=
1483200000000
self
.
ts_step
=
1000
self
.
db_name
=
'testdb'
self
.
replica
=
3
self
.
vgroups
=
1
self
.
tb_nums
=
10
self
.
row_nums
=
100
self
.
stop_dnode_id
=
None
self
.
loop_restart_times
=
5
self
.
thread_list
=
[]
self
.
max_restart_time
=
10
self
.
try_check_times
=
10
self
.
query_times
=
100
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
return
buildPath
def
check_setup_cluster_status
(
self
):
tdSql
.
query
(
"show mnodes"
)
for
mnode
in
tdSql
.
queryResult
:
name
=
mnode
[
1
]
info
=
mnode
self
.
mnode_list
[
name
]
=
info
tdSql
.
query
(
"show dnodes"
)
for
dnode
in
tdSql
.
queryResult
:
name
=
dnode
[
1
]
info
=
dnode
self
.
dnode_list
[
name
]
=
info
count
=
0
is_leader
=
False
mnode_name
=
''
for
k
,
v
in
self
.
mnode_list
.
items
():
count
+=
1
# only for 1 mnode
mnode_name
=
k
if
v
[
2
]
==
'leader'
:
is_leader
=
True
if
count
==
1
and
is_leader
:
tdLog
.
notice
(
"===== depoly cluster success with 1 mnode as leader ====="
)
else
:
tdLog
.
exit
(
"===== depoly cluster fail with 1 mnode as leader ====="
)
for
k
,
v
in
self
.
dnode_list
.
items
():
if
k
==
mnode_name
:
if
v
[
3
]
==
0
:
tdLog
.
notice
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
tdLog
.
exit
(
"===== depoly cluster mnode only fail at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
continue
def
create_database
(
self
,
dbname
,
replica_num
,
vgroup_nums
):
drop_db_sql
=
"drop database if exists {}"
.
format
(
dbname
)
create_db_sql
=
"create database {} replica {} vgroups {}"
.
format
(
dbname
,
replica_num
,
vgroup_nums
)
tdLog
.
notice
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdSql
.
execute
(
drop_db_sql
)
tdSql
.
execute
(
create_db_sql
)
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
def
create_stable_insert_datas
(
self
,
dbname
,
stablename
,
tb_nums
,
row_nums
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
tdSql
.
execute
(
'''create table {}
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
.
format
(
stablename
)
)
for
i
in
range
(
tb_nums
):
sub_tbname
=
"sub_{}_{}"
.
format
(
stablename
,
i
)
tdSql
.
execute
(
"create table {} using {} tags({})"
.
format
(
sub_tbname
,
stablename
,
i
))
# insert datas about new database
for
row_num
in
range
(
row_nums
):
ts
=
self
.
ts
+
self
.
ts_step
*
row_num
tdSql
.
execute
(
f
"insert into
{
sub_tbname
}
values (
{
ts
}
,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
tdLog
.
notice
(
" ==== stable {} insert rows execute end ====="
.
format
(
stablename
))
def
append_rows_of_exists_tables
(
self
,
dbname
,
stablename
,
tbname
,
append_nums
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
for
row_num
in
range
(
append_nums
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values (now,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog
.
notice
(
" ==== append new rows of table {} belongs to stable {} execute end ====="
.
format
(
tbname
,
stablename
))
os
.
system
(
"taos -s 'select count(*) from {}.{}';"
.
format
(
dbname
,
stablename
))
def
check_insert_rows
(
self
,
dbname
,
stablename
,
tb_nums
,
row_nums
,
append_rows
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckData
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
)
,
0
,
0
,
tb_nums
*
row_nums
+
append_rows
)
count
=
0
while
not
status_OK
:
if
count
>
self
.
try_check_times
:
os
.
system
(
"taos -s ' show {}.vgroups; '"
.
format
(
dbname
))
tdLog
.
exit
(
" ==== check insert rows failed after {} try check {} times of database {}"
.
format
(
count
,
self
.
try_check_times
,
dbname
))
break
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckData
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
)
,
0
,
0
,
tb_nums
*
row_nums
+
append_rows
)
tdLog
.
notice
(
" ==== check insert rows first failed , this is {}_th retry check rows of database {}"
.
format
(
count
,
dbname
))
count
+=
1
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckRows
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
)
,
tb_nums
)
count
=
0
while
not
status_OK
:
if
count
>
self
.
try_check_times
:
os
.
system
(
"taos -s ' show {}.vgroups;'"
.
format
(
dbname
))
tdLog
.
exit
(
" ==== check insert rows failed after {} try check {} times of database {}"
.
format
(
count
,
self
.
try_check_times
,
dbname
))
break
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckRows
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
)
,
tb_nums
)
tdLog
.
notice
(
" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}"
.
format
(
count
,
dbname
))
count
+=
1
def
_get_stop_dnode_id
(
self
,
dbname
):
tdSql
.
query
(
"show {}.vgroups"
.
format
(
dbname
))
vgroup_infos
=
tdSql
.
queryResult
for
vgroup_info
in
vgroup_infos
:
leader_infos
=
vgroup_info
[
3
:
-
4
]
# print(vgroup_info)
for
ind
,
role
in
enumerate
(
leader_infos
):
if
role
==
'follower'
:
# print(ind,leader_infos)
self
.
stop_dnode_id
=
leader_infos
[
ind
-
1
]
break
return
self
.
stop_dnode_id
def
wait_stop_dnode_OK
(
self
):
def
_get_status
():
newTdSql
=
tdCom
.
newTdSql
()
status
=
""
newTdSql
.
query
(
"show dnodes"
)
dnode_infos
=
newTdSql
.
queryResult
for
dnode_info
in
dnode_infos
:
id
=
dnode_info
[
0
]
dnode_status
=
dnode_info
[
4
]
if
id
==
self
.
stop_dnode_id
:
status
=
dnode_status
break
return
status
status
=
_get_status
()
while
status
!=
"offline"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has stopped , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
wait_start_dnode_OK
(
self
):
def
_get_status
():
newTdSql
=
tdCom
.
newTdSql
()
status
=
""
newTdSql
.
query
(
"show dnodes"
)
dnode_infos
=
newTdSql
.
queryResult
for
dnode_info
in
dnode_infos
:
id
=
dnode_info
[
0
]
dnode_status
=
dnode_info
[
4
]
if
id
==
self
.
stop_dnode_id
:
status
=
dnode_status
break
return
status
status
=
_get_status
()
while
status
!=
"ready"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has restart , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
_parse_datetime
(
self
,
timestr
):
try
:
return
datetime
.
datetime
.
strptime
(
timestr
,
'%Y-%m-%d %H:%M:%S.%f'
)
except
ValueError
:
pass
try
:
return
datetime
.
datetime
.
strptime
(
timestr
,
'%Y-%m-%d %H:%M:%S'
)
except
ValueError
:
pass
def
mycheckRowCol
(
self
,
sql
,
row
,
col
):
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
2
][
0
])
if
row
<
0
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, row:%d is smaller than zero"
%
args
)
if
col
<
0
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, col:%d is smaller than zero"
%
args
)
if
row
>
tdSql
.
queryRows
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
,
tdSql
.
queryRows
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d"
%
args
)
if
col
>
tdSql
.
queryCols
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
col
,
tdSql
.
queryCols
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d"
%
args
)
def
mycheckData
(
self
,
sql
,
row
,
col
,
data
):
check_status
=
True
self
.
mycheckRowCol
(
sql
,
row
,
col
)
if
tdSql
.
queryResult
[
row
][
col
]
!=
data
:
if
tdSql
.
cursor
.
istype
(
col
,
"TIMESTAMP"
):
# suppose user want to check nanosecond timestamp if a longer data passed
if
(
len
(
data
)
>=
28
):
if
pd
.
to_datetime
(
tdSql
.
queryResult
[
row
][
col
])
==
pd
.
to_datetime
(
data
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%d == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
else
:
if
tdSql
.
queryResult
[
row
][
col
]
==
self
.
_parse_datetime
(
data
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
if
str
(
tdSql
.
queryResult
[
row
][
col
])
==
str
(
data
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
elif
isinstance
(
data
,
float
)
and
abs
(
tdSql
.
queryResult
[
row
][
col
]
-
data
)
<=
0.000001
:
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%f == expect:%f"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
else
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
)
tdLog
.
info
(
"%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s"
%
args
)
check_status
=
False
if
data
is
None
:
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
elif
isinstance
(
data
,
str
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
# elif isinstance(data, datetime.date):
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
# (sql, row, col, tdSql.queryResult[row][col], data))
elif
isinstance
(
data
,
float
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
else
:
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%d"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
check_status
def
mycheckRows
(
self
,
sql
,
expectRows
):
check_status
=
True
if
len
(
tdSql
.
queryResult
)
==
expectRows
:
tdLog
.
info
(
"sql:%s, queryRows:%d == expect:%d"
%
(
sql
,
len
(
tdSql
.
queryResult
),
expectRows
))
return
True
else
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
len
(
tdSql
.
queryResult
),
expectRows
)
tdLog
.
info
(
"%s(%d) failed: sql:%s, queryRows:%d != expect:%d"
%
args
)
check_status
=
False
return
check_status
def
force_stop_dnode
(
self
,
dnode_id
):
tdSql
.
query
(
"show dnodes"
)
port
=
None
for
dnode_info
in
tdSql
.
queryResult
:
if
dnode_id
==
dnode_info
[
0
]:
port
=
dnode_info
[
1
].
split
(
":"
)[
-
1
]
break
else
:
continue
if
port
:
tdLog
.
notice
(
" ==== dnode {} will be force stop by kill -9 ===="
.
format
(
dnode_id
))
psCmd
=
'''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1'''
%
(
port
)
processID
=
subprocess
.
check_output
(
psCmd
,
shell
=
True
).
decode
(
"utf-8"
)
ps_kill_taosd
=
''' kill -9 {} '''
.
format
(
processID
)
# print(ps_kill_taosd)
os
.
system
(
ps_kill_taosd
)
def
basic_query_task
(
self
,
dbname
,
stablename
):
sql
=
"select * from {}.{} ;"
.
format
(
dbname
,
stablename
)
count
=
0
while
count
<
self
.
query_times
:
os
.
system
(
''' taos -s '{}' >>/dev/null '''
.
format
(
sql
))
count
+=
1
def
multi_thread_query_task
(
self
,
thread_nums
,
dbname
,
stablename
):
for
i
in
range
(
thread_nums
):
task
=
threading
.
Thread
(
target
=
self
.
basic_query_task
,
args
=
(
dbname
,
stablename
))
self
.
thread_list
.
append
(
task
)
for
thread
in
self
.
thread_list
:
thread
.
start
()
return
self
.
thread_list
def
stop_follower_when_query_going
(
self
):
tdDnodes
=
cluster
.
dnodes
self
.
create_database
(
dbname
=
self
.
db_name
,
replica_num
=
self
.
replica
,
vgroup_nums
=
1
)
self
.
create_stable_insert_datas
(
dbname
=
self
.
db_name
,
stablename
=
"stb1"
,
tb_nums
=
self
.
tb_nums
,
row_nums
=
self
.
row_nums
)
# let query task start
self
.
thread_list
=
self
.
multi_thread_query_task
(
10
,
self
.
db_name
,
'stb1'
)
# force stop follower
for
loop
in
range
(
self
.
loop_restart_times
):
tdLog
.
debug
(
" ==== this is {}_th restart follower of database {} ==== "
.
format
(
loop
,
self
.
db_name
))
self
.
stop_dnode_id
=
self
.
_get_stop_dnode_id
(
self
.
db_name
)
tdDnodes
[
self
.
stop_dnode_id
-
1
].
stoptaosd
()
self
.
wait_stop_dnode_OK
()
start
=
time
.
time
()
tdDnodes
[
self
.
stop_dnode_id
-
1
].
starttaosd
()
self
.
wait_start_dnode_OK
()
end
=
time
.
time
()
time_cost
=
int
(
end
-
start
)
if
time_cost
>
self
.
max_restart_time
:
tdLog
.
exit
(
" ==== restart dnode {} cost too much time , please check ===="
.
format
(
self
.
stop_dnode_id
))
for
thread
in
self
.
thread_list
:
thread
.
join
()
def
run
(
self
):
# basic check of cluster
self
.
check_setup_cluster_status
()
self
.
stop_follower_when_query_going
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
\ No newline at end of file
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py
0 → 100644
浏览文件 @
29649171
# author : wenzhouwww
from
ssl
import
ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import
taos
import
sys
import
time
import
os
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.dnodes
import
TDDnodes
from
util.dnodes
import
TDDnode
from
util.cluster
import
*
import
datetime
import
inspect
import
time
import
socket
import
subprocess
import
threading
sys
.
path
.
append
(
os
.
path
.
dirname
(
__file__
))
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to excute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
())
self
.
host
=
socket
.
gethostname
()
self
.
mnode_list
=
{}
self
.
dnode_list
=
{}
self
.
ts
=
1483200000000
self
.
ts_step
=
1000
self
.
db_name
=
'testdb'
self
.
replica
=
3
self
.
vgroups
=
1
self
.
tb_nums
=
10
self
.
row_nums
=
100
self
.
stop_dnode_id
=
None
self
.
loop_restart_times
=
5
self
.
thread_list
=
[]
self
.
max_restart_time
=
10
self
.
try_check_times
=
10
self
.
query_times
=
100
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
return
buildPath
def
check_setup_cluster_status
(
self
):
tdSql
.
query
(
"show mnodes"
)
for
mnode
in
tdSql
.
queryResult
:
name
=
mnode
[
1
]
info
=
mnode
self
.
mnode_list
[
name
]
=
info
tdSql
.
query
(
"show dnodes"
)
for
dnode
in
tdSql
.
queryResult
:
name
=
dnode
[
1
]
info
=
dnode
self
.
dnode_list
[
name
]
=
info
count
=
0
is_leader
=
False
mnode_name
=
''
for
k
,
v
in
self
.
mnode_list
.
items
():
count
+=
1
# only for 1 mnode
mnode_name
=
k
if
v
[
2
]
==
'leader'
:
is_leader
=
True
if
count
==
1
and
is_leader
:
tdLog
.
notice
(
"===== depoly cluster success with 1 mnode as leader ====="
)
else
:
tdLog
.
exit
(
"===== depoly cluster fail with 1 mnode as leader ====="
)
for
k
,
v
in
self
.
dnode_list
.
items
():
if
k
==
mnode_name
:
if
v
[
3
]
==
0
:
tdLog
.
notice
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
tdLog
.
exit
(
"===== depoly cluster mnode only fail at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
continue
def
create_database
(
self
,
dbname
,
replica_num
,
vgroup_nums
):
drop_db_sql
=
"drop database if exists {}"
.
format
(
dbname
)
create_db_sql
=
"create database {} replica {} vgroups {}"
.
format
(
dbname
,
replica_num
,
vgroup_nums
)
tdLog
.
notice
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdSql
.
execute
(
drop_db_sql
)
tdSql
.
execute
(
create_db_sql
)
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
def
create_stable_insert_datas
(
self
,
dbname
,
stablename
,
tb_nums
,
row_nums
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
tdSql
.
execute
(
'''create table {}
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
.
format
(
stablename
)
)
for
i
in
range
(
tb_nums
):
sub_tbname
=
"sub_{}_{}"
.
format
(
stablename
,
i
)
tdSql
.
execute
(
"create table {} using {} tags({})"
.
format
(
sub_tbname
,
stablename
,
i
))
# insert datas about new database
for
row_num
in
range
(
row_nums
):
ts
=
self
.
ts
+
self
.
ts_step
*
row_num
tdSql
.
execute
(
f
"insert into
{
sub_tbname
}
values (
{
ts
}
,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
tdLog
.
notice
(
" ==== stable {} insert rows execute end ====="
.
format
(
stablename
))
def
append_rows_of_exists_tables
(
self
,
dbname
,
stablename
,
tbname
,
append_nums
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
for
row_num
in
range
(
append_nums
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values (now,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog
.
notice
(
" ==== append new rows of table {} belongs to stable {} execute end ====="
.
format
(
tbname
,
stablename
))
os
.
system
(
"taos -s 'select count(*) from {}.{}';"
.
format
(
dbname
,
stablename
))
def
check_insert_rows
(
self
,
dbname
,
stablename
,
tb_nums
,
row_nums
,
append_rows
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckData
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
)
,
0
,
0
,
tb_nums
*
row_nums
+
append_rows
)
count
=
0
while
not
status_OK
:
if
count
>
self
.
try_check_times
:
os
.
system
(
"taos -s ' show {}.vgroups; '"
.
format
(
dbname
))
tdLog
.
exit
(
" ==== check insert rows failed after {} try check {} times of database {}"
.
format
(
count
,
self
.
try_check_times
,
dbname
))
break
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckData
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
)
,
0
,
0
,
tb_nums
*
row_nums
+
append_rows
)
tdLog
.
notice
(
" ==== check insert rows first failed , this is {}_th retry check rows of database {}"
.
format
(
count
,
dbname
))
count
+=
1
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckRows
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
)
,
tb_nums
)
count
=
0
while
not
status_OK
:
if
count
>
self
.
try_check_times
:
os
.
system
(
"taos -s ' show {}.vgroups;'"
.
format
(
dbname
))
tdLog
.
exit
(
" ==== check insert rows failed after {} try check {} times of database {}"
.
format
(
count
,
self
.
try_check_times
,
dbname
))
break
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckRows
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
)
,
tb_nums
)
tdLog
.
notice
(
" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}"
.
format
(
count
,
dbname
))
count
+=
1
def
_get_stop_dnode_id
(
self
,
dbname
):
tdSql
.
query
(
"show {}.vgroups"
.
format
(
dbname
))
vgroup_infos
=
tdSql
.
queryResult
for
vgroup_info
in
vgroup_infos
:
leader_infos
=
vgroup_info
[
3
:
-
4
]
# print(vgroup_info)
for
ind
,
role
in
enumerate
(
leader_infos
):
if
role
==
'follower'
:
# print(ind,leader_infos)
self
.
stop_dnode_id
=
leader_infos
[
ind
-
1
]
break
return
self
.
stop_dnode_id
def
wait_stop_dnode_OK
(
self
):
def
_get_status
():
newTdSql
=
tdCom
.
newTdSql
()
status
=
""
newTdSql
.
query
(
"show dnodes"
)
dnode_infos
=
newTdSql
.
queryResult
for
dnode_info
in
dnode_infos
:
id
=
dnode_info
[
0
]
dnode_status
=
dnode_info
[
4
]
if
id
==
self
.
stop_dnode_id
:
status
=
dnode_status
break
return
status
status
=
_get_status
()
while
status
!=
"offline"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has stopped , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
wait_start_dnode_OK
(
self
):
def
_get_status
():
newTdSql
=
tdCom
.
newTdSql
()
status
=
""
newTdSql
.
query
(
"show dnodes"
)
dnode_infos
=
newTdSql
.
queryResult
for
dnode_info
in
dnode_infos
:
id
=
dnode_info
[
0
]
dnode_status
=
dnode_info
[
4
]
if
id
==
self
.
stop_dnode_id
:
status
=
dnode_status
break
return
status
status
=
_get_status
()
while
status
!=
"ready"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has restart , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
_parse_datetime
(
self
,
timestr
):
try
:
return
datetime
.
datetime
.
strptime
(
timestr
,
'%Y-%m-%d %H:%M:%S.%f'
)
except
ValueError
:
pass
try
:
return
datetime
.
datetime
.
strptime
(
timestr
,
'%Y-%m-%d %H:%M:%S'
)
except
ValueError
:
pass
def
mycheckRowCol
(
self
,
sql
,
row
,
col
):
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
2
][
0
])
if
row
<
0
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, row:%d is smaller than zero"
%
args
)
if
col
<
0
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, col:%d is smaller than zero"
%
args
)
if
row
>
tdSql
.
queryRows
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
,
tdSql
.
queryRows
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d"
%
args
)
if
col
>
tdSql
.
queryCols
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
col
,
tdSql
.
queryCols
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d"
%
args
)
def
mycheckData
(
self
,
sql
,
row
,
col
,
data
):
check_status
=
True
self
.
mycheckRowCol
(
sql
,
row
,
col
)
if
tdSql
.
queryResult
[
row
][
col
]
!=
data
:
if
tdSql
.
cursor
.
istype
(
col
,
"TIMESTAMP"
):
# suppose user want to check nanosecond timestamp if a longer data passed
if
(
len
(
data
)
>=
28
):
if
pd
.
to_datetime
(
tdSql
.
queryResult
[
row
][
col
])
==
pd
.
to_datetime
(
data
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%d == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
else
:
if
tdSql
.
queryResult
[
row
][
col
]
==
self
.
_parse_datetime
(
data
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
if
str
(
tdSql
.
queryResult
[
row
][
col
])
==
str
(
data
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
elif
isinstance
(
data
,
float
)
and
abs
(
tdSql
.
queryResult
[
row
][
col
]
-
data
)
<=
0.000001
:
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%f == expect:%f"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
else
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
)
tdLog
.
info
(
"%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s"
%
args
)
check_status
=
False
if
data
is
None
:
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
elif
isinstance
(
data
,
str
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
# elif isinstance(data, datetime.date):
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
# (sql, row, col, tdSql.queryResult[row][col], data))
elif
isinstance
(
data
,
float
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
else
:
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%d"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
check_status
def
mycheckRows
(
self
,
sql
,
expectRows
):
check_status
=
True
if
len
(
tdSql
.
queryResult
)
==
expectRows
:
tdLog
.
info
(
"sql:%s, queryRows:%d == expect:%d"
%
(
sql
,
len
(
tdSql
.
queryResult
),
expectRows
))
return
True
else
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
len
(
tdSql
.
queryResult
),
expectRows
)
tdLog
.
info
(
"%s(%d) failed: sql:%s, queryRows:%d != expect:%d"
%
args
)
check_status
=
False
return
check_status
def
force_stop_dnode
(
self
,
dnode_id
):
tdSql
.
query
(
"show dnodes"
)
port
=
None
for
dnode_info
in
tdSql
.
queryResult
:
if
dnode_id
==
dnode_info
[
0
]:
port
=
dnode_info
[
1
].
split
(
":"
)[
-
1
]
break
else
:
continue
if
port
:
tdLog
.
notice
(
" ==== dnode {} will be force stop by kill -9 ===="
.
format
(
dnode_id
))
psCmd
=
'''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1'''
%
(
port
)
processID
=
subprocess
.
check_output
(
psCmd
,
shell
=
True
).
decode
(
"utf-8"
)
ps_kill_taosd
=
''' kill -9 {} '''
.
format
(
processID
)
# print(ps_kill_taosd)
os
.
system
(
ps_kill_taosd
)
def
basic_query_task
(
self
,
dbname
,
stablename
):
sql
=
"select * from {}.{} ;"
.
format
(
dbname
,
stablename
)
count
=
0
while
count
<
self
.
query_times
:
os
.
system
(
''' taos -s '{}' >>/dev/null '''
.
format
(
sql
))
count
+=
1
def
multi_thread_query_task
(
self
,
thread_nums
,
dbname
,
stablename
):
for
i
in
range
(
thread_nums
):
task
=
threading
.
Thread
(
target
=
self
.
basic_query_task
,
args
=
(
dbname
,
stablename
))
self
.
thread_list
.
append
(
task
)
for
thread
in
self
.
thread_list
:
thread
.
start
()
return
self
.
thread_list
def
stop_follower_when_query_going
(
self
):
tdDnodes
=
cluster
.
dnodes
self
.
create_database
(
dbname
=
self
.
db_name
,
replica_num
=
self
.
replica
,
vgroup_nums
=
1
)
self
.
create_stable_insert_datas
(
dbname
=
self
.
db_name
,
stablename
=
"stb1"
,
tb_nums
=
self
.
tb_nums
,
row_nums
=
self
.
row_nums
)
# let query task start
self
.
thread_list
=
self
.
multi_thread_query_task
(
10
,
self
.
db_name
,
'stb1'
)
# force stop follower
for
loop
in
range
(
self
.
loop_restart_times
):
tdLog
.
debug
(
" ==== this is {}_th restart follower of database {} ==== "
.
format
(
loop
,
self
.
db_name
))
self
.
stop_dnode_id
=
self
.
_get_stop_dnode_id
(
self
.
db_name
)
self
.
force_stop_dnode
(
self
.
stop_dnode_id
)
self
.
wait_stop_dnode_OK
()
start
=
time
.
time
()
tdDnodes
[
self
.
stop_dnode_id
-
1
].
starttaosd
()
self
.
wait_start_dnode_OK
()
end
=
time
.
time
()
time_cost
=
int
(
end
-
start
)
if
time_cost
>
self
.
max_restart_time
:
tdLog
.
exit
(
" ==== restart dnode {} cost too much time , please check ===="
.
format
(
self
.
stop_dnode_id
))
for
thread
in
self
.
thread_list
:
thread
.
join
()
def
run
(
self
):
# basic check of cluster
self
.
check_setup_cluster_status
()
self
.
stop_follower_when_query_going
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
\ No newline at end of file
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py
0 → 100644
浏览文件 @
29649171
# author : wenzhouwww
from
ssl
import
ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import
taos
import
sys
import
time
import
os
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.dnodes
import
TDDnodes
from
util.dnodes
import
TDDnode
from
util.cluster
import
*
import
datetime
import
inspect
import
time
import
socket
import
subprocess
import
threading
sys
.
path
.
append
(
os
.
path
.
dirname
(
__file__
))
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to excute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
())
self
.
host
=
socket
.
gethostname
()
self
.
mnode_list
=
{}
self
.
dnode_list
=
{}
self
.
ts
=
1483200000000
self
.
ts_step
=
1000
self
.
db_name
=
'testdb'
self
.
replica
=
3
self
.
vgroups
=
1
self
.
tb_nums
=
10
self
.
row_nums
=
100
self
.
stop_dnode_id
=
None
self
.
loop_restart_times
=
5
self
.
thread_list
=
[]
self
.
max_restart_time
=
10
self
.
try_check_times
=
10
self
.
query_times
=
100
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
return
buildPath
def
check_setup_cluster_status
(
self
):
tdSql
.
query
(
"show mnodes"
)
for
mnode
in
tdSql
.
queryResult
:
name
=
mnode
[
1
]
info
=
mnode
self
.
mnode_list
[
name
]
=
info
tdSql
.
query
(
"show dnodes"
)
for
dnode
in
tdSql
.
queryResult
:
name
=
dnode
[
1
]
info
=
dnode
self
.
dnode_list
[
name
]
=
info
count
=
0
is_leader
=
False
mnode_name
=
''
for
k
,
v
in
self
.
mnode_list
.
items
():
count
+=
1
# only for 1 mnode
mnode_name
=
k
if
v
[
2
]
==
'leader'
:
is_leader
=
True
if
count
==
1
and
is_leader
:
tdLog
.
notice
(
"===== depoly cluster success with 1 mnode as leader ====="
)
else
:
tdLog
.
exit
(
"===== depoly cluster fail with 1 mnode as leader ====="
)
for
k
,
v
in
self
.
dnode_list
.
items
():
if
k
==
mnode_name
:
if
v
[
3
]
==
0
:
tdLog
.
notice
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
tdLog
.
exit
(
"===== depoly cluster mnode only fail at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
continue
def
create_database
(
self
,
dbname
,
replica_num
,
vgroup_nums
):
drop_db_sql
=
"drop database if exists {}"
.
format
(
dbname
)
create_db_sql
=
"create database {} replica {} vgroups {}"
.
format
(
dbname
,
replica_num
,
vgroup_nums
)
tdLog
.
notice
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdSql
.
execute
(
drop_db_sql
)
tdSql
.
execute
(
create_db_sql
)
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
def
create_stable_insert_datas
(
self
,
dbname
,
stablename
,
tb_nums
,
row_nums
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
tdSql
.
execute
(
'''create table {}
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
.
format
(
stablename
)
)
for
i
in
range
(
tb_nums
):
sub_tbname
=
"sub_{}_{}"
.
format
(
stablename
,
i
)
tdSql
.
execute
(
"create table {} using {} tags({})"
.
format
(
sub_tbname
,
stablename
,
i
))
# insert datas about new database
for
row_num
in
range
(
row_nums
):
ts
=
self
.
ts
+
self
.
ts_step
*
row_num
tdSql
.
execute
(
f
"insert into
{
sub_tbname
}
values (
{
ts
}
,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
tdLog
.
notice
(
" ==== stable {} insert rows execute end ====="
.
format
(
stablename
))
def
append_rows_of_exists_tables
(
self
,
dbname
,
stablename
,
tbname
,
append_nums
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
for
row_num
in
range
(
append_nums
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values (now,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog
.
notice
(
" ==== append new rows of table {} belongs to stable {} execute end ====="
.
format
(
tbname
,
stablename
))
os
.
system
(
"taos -s 'select count(*) from {}.{}';"
.
format
(
dbname
,
stablename
))
def
check_insert_rows
(
self
,
dbname
,
stablename
,
tb_nums
,
row_nums
,
append_rows
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckData
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
)
,
0
,
0
,
tb_nums
*
row_nums
+
append_rows
)
count
=
0
while
not
status_OK
:
if
count
>
self
.
try_check_times
:
os
.
system
(
"taos -s ' show {}.vgroups; '"
.
format
(
dbname
))
tdLog
.
exit
(
" ==== check insert rows failed after {} try check {} times of database {}"
.
format
(
count
,
self
.
try_check_times
,
dbname
))
break
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckData
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
)
,
0
,
0
,
tb_nums
*
row_nums
+
append_rows
)
tdLog
.
notice
(
" ==== check insert rows first failed , this is {}_th retry check rows of database {}"
.
format
(
count
,
dbname
))
count
+=
1
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckRows
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
)
,
tb_nums
)
count
=
0
while
not
status_OK
:
if
count
>
self
.
try_check_times
:
os
.
system
(
"taos -s ' show {}.vgroups;'"
.
format
(
dbname
))
tdLog
.
exit
(
" ==== check insert rows failed after {} try check {} times of database {}"
.
format
(
count
,
self
.
try_check_times
,
dbname
))
break
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckRows
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
)
,
tb_nums
)
tdLog
.
notice
(
" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}"
.
format
(
count
,
dbname
))
count
+=
1
def
_get_stop_dnode_id
(
self
,
dbname
):
tdSql
.
query
(
"show {}.vgroups"
.
format
(
dbname
))
vgroup_infos
=
tdSql
.
queryResult
for
vgroup_info
in
vgroup_infos
:
leader_infos
=
vgroup_info
[
3
:
-
4
]
# print(vgroup_info)
for
ind
,
role
in
enumerate
(
leader_infos
):
if
role
==
'leader'
:
# print(ind,leader_infos)
self
.
stop_dnode_id
=
leader_infos
[
ind
-
1
]
break
return
self
.
stop_dnode_id
def
wait_stop_dnode_OK
(
self
):
def
_get_status
():
newTdSql
=
tdCom
.
newTdSql
()
status
=
""
newTdSql
.
query
(
"show dnodes"
)
dnode_infos
=
newTdSql
.
queryResult
for
dnode_info
in
dnode_infos
:
id
=
dnode_info
[
0
]
dnode_status
=
dnode_info
[
4
]
if
id
==
self
.
stop_dnode_id
:
status
=
dnode_status
break
return
status
status
=
_get_status
()
while
status
!=
"offline"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has stopped , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
check_revote_leader_success
(
self
,
dbname
,
before_leader_infos
,
after_leader_infos
):
check_status
=
False
vote_act
=
set
(
set
(
after_leader_infos
)
-
set
(
before_leader_infos
))
if
not
vote_act
:
print
(
"=======before_revote_leader_infos ======
\n
"
,
before_leader_infos
)
print
(
"=======after_revote_leader_infos ======
\n
"
,
after_leader_infos
)
tdLog
.
info
(
" ===maybe revote not occured , there is no dnode offline ===="
)
else
:
for
vgroup_info
in
vote_act
:
for
ind
,
role
in
enumerate
(
vgroup_info
):
if
role
==
self
.
stop_dnode_id
:
if
vgroup_info
[
ind
+
1
]
==
"offline"
and
"leader"
in
vgroup_info
:
tdLog
.
notice
(
" === revote leader ok , leader is {} now ===="
.
format
(
vgroup_info
[
list
(
vgroup_info
).
index
(
"leader"
)
-
1
]))
check_status
=
True
elif
vgroup_info
[
ind
+
1
]
!=
"offline"
:
tdLog
.
notice
(
" === dnode {} should be offline "
.
format
(
self
.
stop_dnode_id
))
else
:
continue
break
return
check_status
def
wait_start_dnode_OK
(
self
):
def
_get_status
():
newTdSql
=
tdCom
.
newTdSql
()
status
=
""
newTdSql
.
query
(
"show dnodes"
)
dnode_infos
=
newTdSql
.
queryResult
for
dnode_info
in
dnode_infos
:
id
=
dnode_info
[
0
]
dnode_status
=
dnode_info
[
4
]
if
id
==
self
.
stop_dnode_id
:
status
=
dnode_status
break
return
status
status
=
_get_status
()
while
status
!=
"ready"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has restart , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
_parse_datetime
(
self
,
timestr
):
try
:
return
datetime
.
datetime
.
strptime
(
timestr
,
'%Y-%m-%d %H:%M:%S.%f'
)
except
ValueError
:
pass
try
:
return
datetime
.
datetime
.
strptime
(
timestr
,
'%Y-%m-%d %H:%M:%S'
)
except
ValueError
:
pass
def
mycheckRowCol
(
self
,
sql
,
row
,
col
):
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
2
][
0
])
if
row
<
0
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, row:%d is smaller than zero"
%
args
)
if
col
<
0
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, col:%d is smaller than zero"
%
args
)
if
row
>
tdSql
.
queryRows
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
,
tdSql
.
queryRows
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d"
%
args
)
if
col
>
tdSql
.
queryCols
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
col
,
tdSql
.
queryCols
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d"
%
args
)
def
mycheckData
(
self
,
sql
,
row
,
col
,
data
):
check_status
=
True
self
.
mycheckRowCol
(
sql
,
row
,
col
)
if
tdSql
.
queryResult
[
row
][
col
]
!=
data
:
if
tdSql
.
cursor
.
istype
(
col
,
"TIMESTAMP"
):
# suppose user want to check nanosecond timestamp if a longer data passed
if
(
len
(
data
)
>=
28
):
if
pd
.
to_datetime
(
tdSql
.
queryResult
[
row
][
col
])
==
pd
.
to_datetime
(
data
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%d == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
else
:
if
tdSql
.
queryResult
[
row
][
col
]
==
self
.
_parse_datetime
(
data
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
if
str
(
tdSql
.
queryResult
[
row
][
col
])
==
str
(
data
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
elif
isinstance
(
data
,
float
)
and
abs
(
tdSql
.
queryResult
[
row
][
col
]
-
data
)
<=
0.000001
:
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%f == expect:%f"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
else
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
)
tdLog
.
info
(
"%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s"
%
args
)
check_status
=
False
if
data
is
None
:
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
elif
isinstance
(
data
,
str
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
# elif isinstance(data, datetime.date):
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
# (sql, row, col, tdSql.queryResult[row][col], data))
elif
isinstance
(
data
,
float
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
else
:
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%d"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
check_status
def
mycheckRows
(
self
,
sql
,
expectRows
):
check_status
=
True
if
len
(
tdSql
.
queryResult
)
==
expectRows
:
tdLog
.
info
(
"sql:%s, queryRows:%d == expect:%d"
%
(
sql
,
len
(
tdSql
.
queryResult
),
expectRows
))
return
True
else
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
len
(
tdSql
.
queryResult
),
expectRows
)
tdLog
.
info
(
"%s(%d) failed: sql:%s, queryRows:%d != expect:%d"
%
args
)
check_status
=
False
return
check_status
def
get_leader_infos
(
self
,
dbname
):
newTdSql
=
tdCom
.
newTdSql
()
newTdSql
.
query
(
"show {}.vgroups"
.
format
(
dbname
))
vgroup_infos
=
newTdSql
.
queryResult
leader_infos
=
set
()
for
vgroup_info
in
vgroup_infos
:
leader_infos
.
add
(
vgroup_info
[
3
:
-
4
])
return
leader_infos
def
force_stop_dnode
(
self
,
dnode_id
):
tdSql
.
query
(
"show dnodes"
)
port
=
None
for
dnode_info
in
tdSql
.
queryResult
:
if
dnode_id
==
dnode_info
[
0
]:
port
=
dnode_info
[
1
].
split
(
":"
)[
-
1
]
break
else
:
continue
if
port
:
tdLog
.
notice
(
" ==== dnode {} will be force stop by kill -9 ===="
.
format
(
dnode_id
))
psCmd
=
'''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1'''
%
(
port
)
processID
=
subprocess
.
check_output
(
psCmd
,
shell
=
True
).
decode
(
"utf-8"
)
ps_kill_taosd
=
''' kill -9 {} '''
.
format
(
processID
)
# print(ps_kill_taosd)
os
.
system
(
ps_kill_taosd
)
def
basic_query_task
(
self
,
dbname
,
stablename
):
sql
=
"select * from {}.{} ;"
.
format
(
dbname
,
stablename
)
count
=
0
while
count
<
self
.
query_times
:
os
.
system
(
''' taos -s '{}' >>/dev/null '''
.
format
(
sql
))
count
+=
1
def
multi_thread_query_task
(
self
,
thread_nums
,
dbname
,
stablename
):
for
i
in
range
(
thread_nums
):
task
=
threading
.
Thread
(
target
=
self
.
basic_query_task
,
args
=
(
dbname
,
stablename
))
self
.
thread_list
.
append
(
task
)
for
thread
in
self
.
thread_list
:
thread
.
start
()
return
self
.
thread_list
def
stop_follower_when_query_going
(
self
):
tdDnodes
=
cluster
.
dnodes
self
.
create_database
(
dbname
=
self
.
db_name
,
replica_num
=
self
.
replica
,
vgroup_nums
=
1
)
self
.
create_stable_insert_datas
(
dbname
=
self
.
db_name
,
stablename
=
"stb1"
,
tb_nums
=
self
.
tb_nums
,
row_nums
=
self
.
row_nums
)
# let query task start
self
.
thread_list
=
self
.
multi_thread_query_task
(
10
,
self
.
db_name
,
'stb1'
)
# force stop follower
for
loop
in
range
(
self
.
loop_restart_times
):
tdLog
.
debug
(
" ==== this is {}_th restart follower of database {} ==== "
.
format
(
loop
,
self
.
db_name
))
# get leader info before stop
before_leader_infos
=
self
.
get_leader_infos
(
self
.
db_name
)
self
.
stop_dnode_id
=
self
.
_get_stop_dnode_id
(
self
.
db_name
)
tdDnodes
[
self
.
stop_dnode_id
-
1
].
stoptaosd
()
start
=
time
.
time
()
# get leader info after stop
after_leader_infos
=
self
.
get_leader_infos
(
self
.
db_name
)
revote_status
=
self
.
check_revote_leader_success
(
self
.
db_name
,
before_leader_infos
,
after_leader_infos
)
while
not
revote_status
:
after_leader_infos
=
self
.
get_leader_infos
(
self
.
db_name
)
revote_status
=
self
.
check_revote_leader_success
(
self
.
db_name
,
before_leader_infos
,
after_leader_infos
)
end
=
time
.
time
()
time_cost
=
end
-
start
tdLog
.
debug
(
" ==== revote leader of database {} cost time {} ===="
.
format
(
self
.
db_name
,
time_cost
))
self
.
wait_stop_dnode_OK
()
start
=
time
.
time
()
tdDnodes
[
self
.
stop_dnode_id
-
1
].
starttaosd
()
self
.
wait_start_dnode_OK
()
end
=
time
.
time
()
time_cost
=
int
(
end
-
start
)
if
time_cost
>
self
.
max_restart_time
:
tdLog
.
exit
(
" ==== restart dnode {} cost too much time , please check ===="
.
format
(
self
.
stop_dnode_id
))
for
thread
in
self
.
thread_list
:
thread
.
join
()
def
run
(
self
):
# basic check of cluster
self
.
check_setup_cluster_status
()
self
.
stop_follower_when_query_going
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
\ No newline at end of file
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py
0 → 100644
浏览文件 @
29649171
# author : wenzhouwww
from
ssl
import
ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import
taos
import
sys
import
time
import
os
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.dnodes
import
TDDnodes
from
util.dnodes
import
TDDnode
from
util.cluster
import
*
import
datetime
import
inspect
import
time
import
socket
import
subprocess
import
threading
sys
.
path
.
append
(
os
.
path
.
dirname
(
__file__
))
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to excute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
())
self
.
host
=
socket
.
gethostname
()
self
.
mnode_list
=
{}
self
.
dnode_list
=
{}
self
.
ts
=
1483200000000
self
.
ts_step
=
1000
self
.
db_name
=
'testdb'
self
.
replica
=
3
self
.
vgroups
=
1
self
.
tb_nums
=
10
self
.
row_nums
=
100
self
.
stop_dnode_id
=
None
self
.
loop_restart_times
=
5
self
.
thread_list
=
[]
self
.
max_restart_time
=
10
self
.
try_check_times
=
10
self
.
query_times
=
100
def
getBuildPath
(
self
):
selfPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
(
"community"
in
selfPath
):
projPath
=
selfPath
[:
selfPath
.
find
(
"community"
)]
else
:
projPath
=
selfPath
[:
selfPath
.
find
(
"tests"
)]
for
root
,
dirs
,
files
in
os
.
walk
(
projPath
):
if
(
"taosd"
in
files
):
rootRealPath
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
root
))
if
(
"packaging"
not
in
rootRealPath
):
buildPath
=
root
[:
len
(
root
)
-
len
(
"/build/bin"
)]
break
return
buildPath
def
check_setup_cluster_status
(
self
):
tdSql
.
query
(
"show mnodes"
)
for
mnode
in
tdSql
.
queryResult
:
name
=
mnode
[
1
]
info
=
mnode
self
.
mnode_list
[
name
]
=
info
tdSql
.
query
(
"show dnodes"
)
for
dnode
in
tdSql
.
queryResult
:
name
=
dnode
[
1
]
info
=
dnode
self
.
dnode_list
[
name
]
=
info
count
=
0
is_leader
=
False
mnode_name
=
''
for
k
,
v
in
self
.
mnode_list
.
items
():
count
+=
1
# only for 1 mnode
mnode_name
=
k
if
v
[
2
]
==
'leader'
:
is_leader
=
True
if
count
==
1
and
is_leader
:
tdLog
.
notice
(
"===== depoly cluster success with 1 mnode as leader ====="
)
else
:
tdLog
.
exit
(
"===== depoly cluster fail with 1 mnode as leader ====="
)
for
k
,
v
in
self
.
dnode_list
.
items
():
if
k
==
mnode_name
:
if
v
[
3
]
==
0
:
tdLog
.
notice
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
tdLog
.
exit
(
"===== depoly cluster mnode only fail at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
continue
def
create_database
(
self
,
dbname
,
replica_num
,
vgroup_nums
):
drop_db_sql
=
"drop database if exists {}"
.
format
(
dbname
)
create_db_sql
=
"create database {} replica {} vgroups {}"
.
format
(
dbname
,
replica_num
,
vgroup_nums
)
tdLog
.
notice
(
" ==== create database {} and insert rows begin ====="
.
format
(
dbname
))
tdSql
.
execute
(
drop_db_sql
)
tdSql
.
execute
(
create_db_sql
)
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
def
create_stable_insert_datas
(
self
,
dbname
,
stablename
,
tb_nums
,
row_nums
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
tdSql
.
execute
(
'''create table {}
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
.
format
(
stablename
)
)
for
i
in
range
(
tb_nums
):
sub_tbname
=
"sub_{}_{}"
.
format
(
stablename
,
i
)
tdSql
.
execute
(
"create table {} using {} tags({})"
.
format
(
sub_tbname
,
stablename
,
i
))
# insert datas about new database
for
row_num
in
range
(
row_nums
):
ts
=
self
.
ts
+
self
.
ts_step
*
row_num
tdSql
.
execute
(
f
"insert into
{
sub_tbname
}
values (
{
ts
}
,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
tdLog
.
notice
(
" ==== stable {} insert rows execute end ====="
.
format
(
stablename
))
def
append_rows_of_exists_tables
(
self
,
dbname
,
stablename
,
tbname
,
append_nums
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
for
row_num
in
range
(
append_nums
):
tdSql
.
execute
(
f
"insert into
{
tbname
}
values (now,
{
row_num
}
,
{
row_num
}
, 10 ,1 ,
{
row_num
}
,
{
row_num
}
,true,'bin_
{
row_num
}
','nchar_
{
row_num
}
',now) "
)
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog
.
notice
(
" ==== append new rows of table {} belongs to stable {} execute end ====="
.
format
(
tbname
,
stablename
))
os
.
system
(
"taos -s 'select count(*) from {}.{}';"
.
format
(
dbname
,
stablename
))
def
check_insert_rows
(
self
,
dbname
,
stablename
,
tb_nums
,
row_nums
,
append_rows
):
tdSql
.
execute
(
"use {}"
.
format
(
dbname
))
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckData
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
)
,
0
,
0
,
tb_nums
*
row_nums
+
append_rows
)
count
=
0
while
not
status_OK
:
if
count
>
self
.
try_check_times
:
os
.
system
(
"taos -s ' show {}.vgroups; '"
.
format
(
dbname
))
tdLog
.
exit
(
" ==== check insert rows failed after {} try check {} times of database {}"
.
format
(
count
,
self
.
try_check_times
,
dbname
))
break
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckData
(
"select count(*) from {}.{}"
.
format
(
dbname
,
stablename
)
,
0
,
0
,
tb_nums
*
row_nums
+
append_rows
)
tdLog
.
notice
(
" ==== check insert rows first failed , this is {}_th retry check rows of database {}"
.
format
(
count
,
dbname
))
count
+=
1
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckRows
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
)
,
tb_nums
)
count
=
0
while
not
status_OK
:
if
count
>
self
.
try_check_times
:
os
.
system
(
"taos -s ' show {}.vgroups;'"
.
format
(
dbname
))
tdLog
.
exit
(
" ==== check insert rows failed after {} try check {} times of database {}"
.
format
(
count
,
self
.
try_check_times
,
dbname
))
break
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
while
not
tdSql
.
queryResult
:
time
.
sleep
(
0.1
)
tdSql
.
query
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
))
status_OK
=
self
.
mycheckRows
(
"select distinct tbname from {}.{}"
.
format
(
dbname
,
stablename
)
,
tb_nums
)
tdLog
.
notice
(
" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}"
.
format
(
count
,
dbname
))
count
+=
1
def
_get_stop_dnode_id
(
self
,
dbname
):
tdSql
.
query
(
"show {}.vgroups"
.
format
(
dbname
))
vgroup_infos
=
tdSql
.
queryResult
for
vgroup_info
in
vgroup_infos
:
leader_infos
=
vgroup_info
[
3
:
-
4
]
# print(vgroup_info)
for
ind
,
role
in
enumerate
(
leader_infos
):
if
role
==
'leader'
:
# print(ind,leader_infos)
self
.
stop_dnode_id
=
leader_infos
[
ind
-
1
]
break
return
self
.
stop_dnode_id
def
wait_stop_dnode_OK
(
self
):
def
_get_status
():
newTdSql
=
tdCom
.
newTdSql
()
status
=
""
newTdSql
.
query
(
"show dnodes"
)
dnode_infos
=
newTdSql
.
queryResult
for
dnode_info
in
dnode_infos
:
id
=
dnode_info
[
0
]
dnode_status
=
dnode_info
[
4
]
if
id
==
self
.
stop_dnode_id
:
status
=
dnode_status
break
return
status
status
=
_get_status
()
while
status
!=
"offline"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has stopped , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
check_revote_leader_success
(
self
,
dbname
,
before_leader_infos
,
after_leader_infos
):
check_status
=
False
vote_act
=
set
(
set
(
after_leader_infos
)
-
set
(
before_leader_infos
))
if
not
vote_act
:
print
(
"=======before_revote_leader_infos ======
\n
"
,
before_leader_infos
)
print
(
"=======after_revote_leader_infos ======
\n
"
,
after_leader_infos
)
tdLog
.
info
(
" ===maybe revote not occured , there is no dnode offline ===="
)
else
:
for
vgroup_info
in
vote_act
:
for
ind
,
role
in
enumerate
(
vgroup_info
):
if
role
==
self
.
stop_dnode_id
:
if
vgroup_info
[
ind
+
1
]
==
"offline"
and
"leader"
in
vgroup_info
:
tdLog
.
notice
(
" === revote leader ok , leader is {} now ===="
.
format
(
vgroup_info
[
list
(
vgroup_info
).
index
(
"leader"
)
-
1
]))
check_status
=
True
elif
vgroup_info
[
ind
+
1
]
!=
"offline"
:
tdLog
.
notice
(
" === dnode {} should be offline "
.
format
(
self
.
stop_dnode_id
))
else
:
continue
break
return
check_status
def
wait_start_dnode_OK
(
self
):
def
_get_status
():
newTdSql
=
tdCom
.
newTdSql
()
status
=
""
newTdSql
.
query
(
"show dnodes"
)
dnode_infos
=
newTdSql
.
queryResult
for
dnode_info
in
dnode_infos
:
id
=
dnode_info
[
0
]
dnode_status
=
dnode_info
[
4
]
if
id
==
self
.
stop_dnode_id
:
status
=
dnode_status
break
return
status
status
=
_get_status
()
while
status
!=
"ready"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has restart , id is {}"
.
format
(
self
.
stop_dnode_id
))
def
_parse_datetime
(
self
,
timestr
):
try
:
return
datetime
.
datetime
.
strptime
(
timestr
,
'%Y-%m-%d %H:%M:%S.%f'
)
except
ValueError
:
pass
try
:
return
datetime
.
datetime
.
strptime
(
timestr
,
'%Y-%m-%d %H:%M:%S'
)
except
ValueError
:
pass
def
mycheckRowCol
(
self
,
sql
,
row
,
col
):
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
2
][
0
])
if
row
<
0
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, row:%d is smaller than zero"
%
args
)
if
col
<
0
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, col:%d is smaller than zero"
%
args
)
if
row
>
tdSql
.
queryRows
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
,
tdSql
.
queryRows
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d"
%
args
)
if
col
>
tdSql
.
queryCols
:
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
col
,
tdSql
.
queryCols
)
tdLog
.
exit
(
"%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d"
%
args
)
def
mycheckData
(
self
,
sql
,
row
,
col
,
data
):
check_status
=
True
self
.
mycheckRowCol
(
sql
,
row
,
col
)
if
tdSql
.
queryResult
[
row
][
col
]
!=
data
:
if
tdSql
.
cursor
.
istype
(
col
,
"TIMESTAMP"
):
# suppose user want to check nanosecond timestamp if a longer data passed
if
(
len
(
data
)
>=
28
):
if
pd
.
to_datetime
(
tdSql
.
queryResult
[
row
][
col
])
==
pd
.
to_datetime
(
data
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%d == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
else
:
if
tdSql
.
queryResult
[
row
][
col
]
==
self
.
_parse_datetime
(
data
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
if
str
(
tdSql
.
queryResult
[
row
][
col
])
==
str
(
data
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
elif
isinstance
(
data
,
float
)
and
abs
(
tdSql
.
queryResult
[
row
][
col
]
-
data
)
<=
0.000001
:
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%f == expect:%f"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
else
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
)
tdLog
.
info
(
"%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s"
%
args
)
check_status
=
False
if
data
is
None
:
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
elif
isinstance
(
data
,
str
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
# elif isinstance(data, datetime.date):
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
# (sql, row, col, tdSql.queryResult[row][col], data))
elif
isinstance
(
data
,
float
):
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%s"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
else
:
tdLog
.
info
(
"sql:%s, row:%d col:%d data:%s == expect:%d"
%
(
sql
,
row
,
col
,
tdSql
.
queryResult
[
row
][
col
],
data
))
return
check_status
def
mycheckRows
(
self
,
sql
,
expectRows
):
check_status
=
True
if
len
(
tdSql
.
queryResult
)
==
expectRows
:
tdLog
.
info
(
"sql:%s, queryRows:%d == expect:%d"
%
(
sql
,
len
(
tdSql
.
queryResult
),
expectRows
))
return
True
else
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
len
(
tdSql
.
queryResult
),
expectRows
)
tdLog
.
info
(
"%s(%d) failed: sql:%s, queryRows:%d != expect:%d"
%
args
)
check_status
=
False
return
check_status
def
get_leader_infos
(
self
,
dbname
):
newTdSql
=
tdCom
.
newTdSql
()
newTdSql
.
query
(
"show {}.vgroups"
.
format
(
dbname
))
vgroup_infos
=
newTdSql
.
queryResult
leader_infos
=
set
()
for
vgroup_info
in
vgroup_infos
:
leader_infos
.
add
(
vgroup_info
[
3
:
-
4
])
return
leader_infos
def
force_stop_dnode
(
self
,
dnode_id
):
tdSql
.
query
(
"show dnodes"
)
port
=
None
for
dnode_info
in
tdSql
.
queryResult
:
if
dnode_id
==
dnode_info
[
0
]:
port
=
dnode_info
[
1
].
split
(
":"
)[
-
1
]
break
else
:
continue
if
port
:
tdLog
.
notice
(
" ==== dnode {} will be force stop by kill -9 ===="
.
format
(
dnode_id
))
psCmd
=
'''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1'''
%
(
port
)
processID
=
subprocess
.
check_output
(
psCmd
,
shell
=
True
).
decode
(
"utf-8"
)
ps_kill_taosd
=
''' kill -9 {} '''
.
format
(
processID
)
# print(ps_kill_taosd)
os
.
system
(
ps_kill_taosd
)
def
basic_query_task
(
self
,
dbname
,
stablename
):
sql
=
"select * from {}.{} ;"
.
format
(
dbname
,
stablename
)
count
=
0
while
count
<
self
.
query_times
:
os
.
system
(
''' taos -s '{}' >>/dev/null '''
.
format
(
sql
))
count
+=
1
def
multi_thread_query_task
(
self
,
thread_nums
,
dbname
,
stablename
):
for
i
in
range
(
thread_nums
):
task
=
threading
.
Thread
(
target
=
self
.
basic_query_task
,
args
=
(
dbname
,
stablename
))
self
.
thread_list
.
append
(
task
)
for
thread
in
self
.
thread_list
:
thread
.
start
()
return
self
.
thread_list
def
stop_follower_when_query_going
(
self
):
tdDnodes
=
cluster
.
dnodes
self
.
create_database
(
dbname
=
self
.
db_name
,
replica_num
=
self
.
replica
,
vgroup_nums
=
1
)
self
.
create_stable_insert_datas
(
dbname
=
self
.
db_name
,
stablename
=
"stb1"
,
tb_nums
=
self
.
tb_nums
,
row_nums
=
self
.
row_nums
)
# let query task start
self
.
thread_list
=
self
.
multi_thread_query_task
(
10
,
self
.
db_name
,
'stb1'
)
# force stop follower
for
loop
in
range
(
self
.
loop_restart_times
):
tdLog
.
debug
(
" ==== this is {}_th restart follower of database {} ==== "
.
format
(
loop
,
self
.
db_name
))
# get leader info before stop
before_leader_infos
=
self
.
get_leader_infos
(
self
.
db_name
)
self
.
stop_dnode_id
=
self
.
_get_stop_dnode_id
(
self
.
db_name
)
self
.
force_stop_dnode
(
self
.
stop_dnode_id
)
start
=
time
.
time
()
# get leader info after stop
after_leader_infos
=
self
.
get_leader_infos
(
self
.
db_name
)
revote_status
=
self
.
check_revote_leader_success
(
self
.
db_name
,
before_leader_infos
,
after_leader_infos
)
while
not
revote_status
:
after_leader_infos
=
self
.
get_leader_infos
(
self
.
db_name
)
revote_status
=
self
.
check_revote_leader_success
(
self
.
db_name
,
before_leader_infos
,
after_leader_infos
)
end
=
time
.
time
()
time_cost
=
end
-
start
tdLog
.
debug
(
" ==== revote leader of database {} cost time {} ===="
.
format
(
self
.
db_name
,
time_cost
))
self
.
wait_stop_dnode_OK
()
start
=
time
.
time
()
tdDnodes
[
self
.
stop_dnode_id
-
1
].
starttaosd
()
self
.
wait_start_dnode_OK
()
end
=
time
.
time
()
time_cost
=
int
(
end
-
start
)
if
time_cost
>
self
.
max_restart_time
:
tdLog
.
exit
(
" ==== restart dnode {} cost too much time , please check ===="
.
format
(
self
.
stop_dnode_id
))
for
thread
in
self
.
thread_list
:
thread
.
join
()
def
run
(
self
):
# basic check of cluster
self
.
check_setup_cluster_status
()
self
.
stop_follower_when_query_going
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
\ No newline at end of file
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py
浏览文件 @
29649171
...
...
@@ -71,14 +71,14 @@ class TDTestCase:
is_leader
=
True
if
count
==
1
and
is_leader
:
tdLog
.
info
(
"===== depoly cluster success with 1 mnode as leader ====="
)
tdLog
.
notice
(
"===== depoly cluster success with 1 mnode as leader ====="
)
else
:
tdLog
.
exit
(
"===== depoly cluster fail with 1 mnode as leader ====="
)
for
k
,
v
in
self
.
dnode_list
.
items
():
if
k
==
mnode_name
:
if
v
[
3
]
==
0
:
tdLog
.
info
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
tdLog
.
notice
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
tdLog
.
exit
(
"===== depoly cluster mnode only fail at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
...
...
@@ -121,7 +121,7 @@ class TDTestCase:
for
k
,
v
in
vgroups_infos
.
items
():
if
len
(
v
)
==
1
and
v
[
0
]
==
"leader"
:
tdLog
.
info
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
tdLog
.
notice
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
else
:
tdLog
.
exit
(
" === create database replica only 1 role leader check fail of vgroup_id {} ======"
.
format
(
k
))
...
...
@@ -152,10 +152,10 @@ class TDTestCase:
time
.
sleep
(
0.1
)
status
=
self
.
check_vgroups_init_done
(
dbname
)
# tdLog.
info
("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
# tdLog.
notice
("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end
=
time
.
time
()
cost_time
=
end
-
start
tdLog
.
info
(
" ==== database %s vote the leaders success , cost time is %.3f second ====="
%
(
dbname
,
cost_time
)
)
tdLog
.
notice
(
" ==== database %s vote the leaders success , cost time is %.3f second ====="
%
(
dbname
,
cost_time
)
)
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
if
cost_time
>=
self
.
max_vote_time_cost
:
tdLog
.
exit
(
" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="
%
(
dbname
,
cost_time
)
)
...
...
@@ -165,28 +165,28 @@ class TDTestCase:
def
test_init_vgroups_time_costs
(
self
):
tdLog
.
info
(
" ====start check time cost about vgroups vote leaders ==== "
)
tdLog
.
info
(
" ==== current max time cost is set value : {} ======="
.
format
(
self
.
max_vote_time_cost
))
tdLog
.
notice
(
" ====start check time cost about vgroups vote leaders ==== "
)
tdLog
.
notice
(
" ==== current max time cost is set value : {} ======="
.
format
(
self
.
max_vote_time_cost
))
# create database replica 3 vgroups 1
db1
=
'db_1'
create_db_replica_3_vgroups_1
=
"create database {} replica 3 vgroups 1"
.
format
(
db1
)
tdLog
.
info
(
'=======database {} replica 3 vgroups 1 ======'
.
format
(
db1
))
tdLog
.
notice
(
'=======database {} replica 3 vgroups 1 ======'
.
format
(
db1
))
tdSql
.
execute
(
create_db_replica_3_vgroups_1
)
self
.
vote_leader_time_costs
(
db1
)
# create database replica 3 vgroups 10
db2
=
'db_2'
create_db_replica_3_vgroups_10
=
"create database {} replica 3 vgroups 10"
.
format
(
db2
)
tdLog
.
info
(
'=======database {} replica 3 vgroups 10 ======'
.
format
(
db2
))
tdLog
.
notice
(
'=======database {} replica 3 vgroups 10 ======'
.
format
(
db2
))
tdSql
.
execute
(
create_db_replica_3_vgroups_10
)
self
.
vote_leader_time_costs
(
db2
)
# create database replica 3 vgroups 100
db3
=
'db_3'
create_db_replica_3_vgroups_100
=
"create database {} replica 3 vgroups 100"
.
format
(
db3
)
tdLog
.
info
(
'=======database {} replica 3 vgroups 100 ======'
.
format
(
db3
))
tdLog
.
notice
(
'=======database {} replica 3 vgroups 100 ======'
.
format
(
db3
))
tdSql
.
execute
(
create_db_replica_3_vgroups_100
)
self
.
vote_leader_time_costs
(
db3
)
...
...
tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py
浏览文件 @
29649171
...
...
@@ -74,14 +74,14 @@ class TDTestCase:
is_leader
=
True
if
count
==
1
and
is_leader
:
tdLog
.
info
(
"===== depoly cluster success with 1 mnode as leader ====="
)
tdLog
.
notice
(
"===== depoly cluster success with 1 mnode as leader ====="
)
else
:
tdLog
.
exit
(
"===== depoly cluster fail with 1 mnode as leader ====="
)
for
k
,
v
in
self
.
dnode_list
.
items
():
if
k
==
mnode_name
:
if
v
[
3
]
==
0
:
tdLog
.
info
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
tdLog
.
notice
(
"===== depoly cluster mnode only success at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
tdLog
.
exit
(
"===== depoly cluster mnode only fail at {} , support_vnodes is {} "
.
format
(
mnode_name
,
v
[
3
]))
else
:
...
...
@@ -124,7 +124,7 @@ class TDTestCase:
for
k
,
v
in
vgroups_infos
.
items
():
if
len
(
v
)
==
1
and
v
[
0
]
==
"leader"
:
tdLog
.
info
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
tdLog
.
notice
(
" === create database replica only 1 role leader check success of vgroup_id {} ======"
.
format
(
k
))
else
:
tdLog
.
exit
(
" === create database replica only 1 role leader check fail of vgroup_id {} ======"
.
format
(
k
))
...
...
@@ -148,7 +148,7 @@ class TDTestCase:
if
ind
%
2
==
0
:
if
role
==
stop_dnode_id
and
vgroups_leader_follower
[
ind
+
1
]
==
"offline"
:
tdLog
.
info
(
"====== dnode {} has offline , endpoint is {}"
.
format
(
stop_dnode_id
,
self
.
stop_dnode
))
tdLog
.
notice
(
"====== dnode {} has offline , endpoint is {}"
.
format
(
stop_dnode_id
,
self
.
stop_dnode
))
elif
role
==
stop_dnode_id
:
tdLog
.
exit
(
"====== dnode {} has not offline , endpoint is {}"
.
format
(
stop_dnode_id
,
self
.
stop_dnode
))
else
:
...
...
@@ -180,8 +180,8 @@ class TDTestCase:
while
status
!=
"offline"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.
info
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
info
(
"==== stop_dnode has stopped , endpoint is {}"
.
format
(
self
.
stop_dnode
))
# tdLog.
notice
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has stopped , endpoint is {}"
.
format
(
self
.
stop_dnode
))
def
wait_start_dnode_OK
(
self
):
...
...
@@ -202,15 +202,15 @@ class TDTestCase:
while
status
!=
"ready"
:
time
.
sleep
(
0.1
)
status
=
_get_status
()
# tdLog.
info
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
info
(
"==== stop_dnode has restart , endpoint is {}"
.
format
(
self
.
stop_dnode
))
# tdLog.
notice
("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog
.
notice
(
"==== stop_dnode has restart , endpoint is {}"
.
format
(
self
.
stop_dnode
))
def
random_stop_One_dnode
(
self
):
self
.
stop_dnode
=
self
.
_get_stop_dnode
()
stop_dnode_id
=
self
.
dnode_list
[
self
.
stop_dnode
][
0
]
tdLog
.
info
(
" ==== dnode {} will offline ,endpoints is {} ===="
.
format
(
stop_dnode_id
,
self
.
stop_dnode
))
tdLog
.
notice
(
" ==== dnode {} will offline ,endpoints is {} ===="
.
format
(
stop_dnode_id
,
self
.
stop_dnode
))
tdDnodes
=
cluster
.
dnodes
tdDnodes
[
stop_dnode_id
-
1
].
stoptaosd
()
self
.
wait_stop_dnode_OK
()
...
...
@@ -250,10 +250,10 @@ class TDTestCase:
time
.
sleep
(
0.1
)
status
=
self
.
check_vgroups_init_done
(
dbname
)
# tdLog.
info
("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
# tdLog.
notice
("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end
=
time
.
time
()
cost_time
=
end
-
start
tdLog
.
info
(
" ==== database %s vote the leaders success , cost time is %.3f second ====="
%
(
dbname
,
cost_time
)
)
tdLog
.
notice
(
" ==== database %s vote the leaders success , cost time is %.3f second ====="
%
(
dbname
,
cost_time
)
)
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
if
cost_time
>=
self
.
max_vote_time_cost
:
tdLog
.
exit
(
" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="
%
(
dbname
,
cost_time
)
)
...
...
@@ -269,10 +269,10 @@ class TDTestCase:
time
.
sleep
(
0.1
)
status
=
self
.
check_vgroups_revote_leader
(
dbname
)
# tdLog.
info
("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
# tdLog.
notice
("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end
=
time
.
time
()
cost_time
=
end
-
start
tdLog
.
info
(
" ==== database %s revote the leaders success , cost time is %.3f second ====="
%
(
dbname
,
cost_time
)
)
tdLog
.
notice
(
" ==== database %s revote the leaders success , cost time is %.3f second ====="
%
(
dbname
,
cost_time
)
)
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
if
cost_time
>=
self
.
max_vote_time_cost
:
tdLog
.
exit
(
" ==== database %s revote the leaders cost too large time , cost time is %.3f second ===="
%
(
dbname
,
cost_time
)
)
...
...
@@ -306,7 +306,7 @@ class TDTestCase:
if
role
==
self
.
dnode_list
[
self
.
stop_dnode
][
0
]:
if
vgroup_info
[
ind
+
1
]
==
"offline"
and
"leader"
in
vgroup_info
:
tdLog
.
info
(
" === revote leader ok , leader is {} now ===="
.
format
(
list
(
vgroup_info
).
index
(
"leader"
)
-
1
))
tdLog
.
notice
(
" === revote leader ok , leader is {} now ===="
.
format
(
list
(
vgroup_info
).
index
(
"leader"
)
-
1
))
elif
vgroup_info
[
ind
+
1
]
!=
"offline"
:
tdLog
.
exit
(
" === dnode {} should be offline "
.
format
(
self
.
stop_dnode
))
else
:
...
...
@@ -319,14 +319,14 @@ class TDTestCase:
self
.
Restart_stop_dnode
()
def
test_init_vgroups_time_costs
(
self
):
tdLog
.
info
(
" ====start check time cost about vgroups vote leaders ==== "
)
tdLog
.
info
(
" ==== current max time cost is set value : {} ======="
.
format
(
self
.
max_vote_time_cost
))
tdLog
.
notice
(
" ====start check time cost about vgroups vote leaders ==== "
)
tdLog
.
notice
(
" ==== current max time cost is set value : {} ======="
.
format
(
self
.
max_vote_time_cost
))
# create database replica 3 vgroups 1
db1
=
'db_1'
create_db_replica_3_vgroups_1
=
"create database {} replica 3 vgroups 1"
.
format
(
db1
)
tdLog
.
info
(
'=======database {} replica 3 vgroups 1 ======'
.
format
(
db1
))
tdLog
.
notice
(
'=======database {} replica 3 vgroups 1 ======'
.
format
(
db1
))
tdSql
.
execute
(
create_db_replica_3_vgroups_1
)
self
.
vote_leader_time_costs
(
db1
)
self
.
exec_revote_action
(
db1
)
...
...
@@ -334,7 +334,7 @@ class TDTestCase:
# create database replica 3 vgroups 10
db2
=
'db_2'
create_db_replica_3_vgroups_10
=
"create database {} replica 3 vgroups 10"
.
format
(
db2
)
tdLog
.
info
(
'=======database {} replica 3 vgroups 10 ======'
.
format
(
db2
))
tdLog
.
notice
(
'=======database {} replica 3 vgroups 10 ======'
.
format
(
db2
))
tdSql
.
execute
(
create_db_replica_3_vgroups_10
)
self
.
vote_leader_time_costs
(
db2
)
self
.
exec_revote_action
(
db2
)
...
...
@@ -342,7 +342,7 @@ class TDTestCase:
# create database replica 3 vgroups 100
db3
=
'db_3'
create_db_replica_3_vgroups_100
=
"create database {} replica 3 vgroups 100"
.
format
(
db3
)
tdLog
.
info
(
'=======database {} replica 3 vgroups 100 ======'
.
format
(
db3
))
tdLog
.
notice
(
'=======database {} replica 3 vgroups 100 ======'
.
format
(
db3
))
tdSql
.
execute
(
create_db_replica_3_vgroups_100
)
self
.
vote_leader_time_costs
(
db3
)
self
.
exec_revote_action
(
db3
)
...
...
tests/system-test/6-cluster/vnode/insert_100W_rows.json
0 → 100644
浏览文件 @
29649171
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos/"
,
"host"
:
"localhost"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
10
,
"create_table_thread_count"
:
10
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
1000
,
"num_of_records_per_req"
:
1000
,
"databases"
:
[
{
"dbinfo"
:
{
"name"
:
"db_2"
,
"drop"
:
"no"
,
"vgroups"
:
1
,
"replica"
:
3
},
"super_tables"
:
[
{
"name"
:
"stb1"
,
"childtable_count"
:
10
,
"childtable_prefix"
:
"sub_"
,
"auto_create_table"
:
"yes"
,
"batch_create_tbl_num"
:
5000
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
100000
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1000000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
10
,
"start_timestamp"
:
"2015-05-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"use_sample_ts"
:
"no"
,
"tags_file"
:
""
,
"columns"
:
[
{
"type"
:
"INT"
,
"count"
:
1
},
{
"type"
:
"TINYINT"
,
"count"
:
1
},
{
"type"
:
"SMALLINT"
,
"count"
:
1
},
{
"type"
:
"BIGINT"
,
"count"
:
1
},
{
"type"
:
"UINT"
,
"count"
:
1
},
{
"type"
:
"UTINYINT"
,
"count"
:
1
},
{
"type"
:
"USMALLINT"
,
"count"
:
1
},
{
"type"
:
"UBIGINT"
,
"count"
:
1
},
{
"type"
:
"DOUBLE"
,
"count"
:
1
},
{
"type"
:
"FLOAT"
,
"count"
:
1
},
{
"type"
:
"BINARY"
,
"len"
:
40
,
"count"
:
1
},
{
"type"
:
"VARCHAR"
,
"len"
:
200
,
"count"
:
1
},
{
"type"
:
"nchar"
,
"len"
:
200
,
"count"
:
1
}
],
"tags"
:
[
{
"type"
:
"INT"
,
"count"
:
1
},
{
"type"
:
"BINARY"
,
"len"
:
100
,
"count"
:
1
},
{
"type"
:
"BOOL"
,
"count"
:
1
}
]
}
]
}
]
}
\ No newline at end of file
tests/system-test/6-cluster/vnode/insert_10W_rows.json
0 → 100644
浏览文件 @
29649171
{
"filetype"
:
"insert"
,
"cfgdir"
:
"/etc/taos/"
,
"host"
:
"localhost"
,
"port"
:
6030
,
"user"
:
"root"
,
"password"
:
"taosdata"
,
"thread_count"
:
1
,
"create_table_thread_count"
:
1
,
"confirm_parameter_prompt"
:
"no"
,
"insert_interval"
:
0
,
"interlace_rows"
:
1000
,
"num_of_records_per_req"
:
1000
,
"databases"
:
[
{
"dbinfo"
:
{
"name"
:
"db_1"
,
"drop"
:
"no"
,
"vgroups"
:
1
,
"replica"
:
3
},
"super_tables"
:
[
{
"name"
:
"stb1"
,
"childtable_count"
:
10
,
"childtable_prefix"
:
"sub_"
,
"auto_create_table"
:
"yes"
,
"batch_create_tbl_num"
:
5000
,
"data_source"
:
"rand"
,
"insert_mode"
:
"taosc"
,
"insert_rows"
:
10000
,
"interlace_rows"
:
0
,
"insert_interval"
:
0
,
"max_sql_len"
:
1000000
,
"disorder_ratio"
:
0
,
"disorder_range"
:
1000
,
"timestamp_step"
:
10
,
"start_timestamp"
:
"2015-05-01 00:00:00.000"
,
"sample_format"
:
"csv"
,
"use_sample_ts"
:
"no"
,
"tags_file"
:
""
,
"columns"
:
[
{
"type"
:
"INT"
,
"count"
:
1
},
{
"type"
:
"TINYINT"
,
"count"
:
1
},
{
"type"
:
"SMALLINT"
,
"count"
:
1
},
{
"type"
:
"BIGINT"
,
"count"
:
1
},
{
"type"
:
"UINT"
,
"count"
:
1
},
{
"type"
:
"UTINYINT"
,
"count"
:
1
},
{
"type"
:
"USMALLINT"
,
"count"
:
1
},
{
"type"
:
"UBIGINT"
,
"count"
:
1
},
{
"type"
:
"DOUBLE"
,
"count"
:
1
},
{
"type"
:
"FLOAT"
,
"count"
:
1
},
{
"type"
:
"BINARY"
,
"len"
:
40
,
"count"
:
1
},
{
"type"
:
"VARCHAR"
,
"len"
:
200
,
"count"
:
1
},
{
"type"
:
"nchar"
,
"len"
:
200
,
"count"
:
1
}
],
"tags"
:
[
{
"type"
:
"INT"
,
"count"
:
1
},
{
"type"
:
"BINARY"
,
"len"
:
100
,
"count"
:
1
},
{
"type"
:
"BOOL"
,
"count"
:
1
}
]
}
]
}
]
}
\ No newline at end of file
tests/system-test/7-tmq/tmqSubscribeStb-r3.py
0 → 100644
浏览文件 @
29649171
from
distutils.log
import
error
import
taos
import
sys
import
time
import
socket
import
os
import
threading
import
subprocess
import
platform
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.dnodes
import
*
from
util.dnodes
import
TDDnodes
from
util.dnodes
import
TDDnode
from
util.cluster
import
*
from
util.common
import
*
sys
.
path
.
append
(
"./7-tmq"
)
from
tmqCommon
import
*
class
TDTestCase
:
def
__init__
(
self
):
self
.
snapshot
=
0
self
.
replica
=
3
self
.
vgroups
=
4
self
.
ctbNum
=
1000
self
.
rowsPerTbl
=
100
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to excute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
def
checkFileContent
(
self
,
consumerId
,
queryString
):
buildPath
=
tdCom
.
getBuildPath
()
cfgPath
=
tdCom
.
getClientCfgPath
()
dstFile
=
'%s/../log/dstrows_%d.txt'
%
(
cfgPath
,
consumerId
)
cmdStr
=
'%s/build/bin/taos -c %s -s "%s >> %s"'
%
(
buildPath
,
cfgPath
,
queryString
,
dstFile
)
tdLog
.
info
(
cmdStr
)
os
.
system
(
cmdStr
)
consumeRowsFile
=
'%s/../log/consumerid_%d.txt'
%
(
cfgPath
,
consumerId
)
tdLog
.
info
(
"rows file: %s, %s"
%
(
consumeRowsFile
,
dstFile
))
consumeFile
=
open
(
consumeRowsFile
,
mode
=
'r'
)
queryFile
=
open
(
dstFile
,
mode
=
'r'
)
# skip first line for it is schema
queryFile
.
readline
()
while
True
:
dst
=
queryFile
.
readline
()
src
=
consumeFile
.
readline
()
if
dst
:
if
dst
!=
src
:
tdLog
.
exit
(
"consumerId %d consume rows is not match the rows by direct query"
%
consumerId
)
else
:
break
return
def
prepareTestEnv
(
self
):
tdLog
.
printNoPrefix
(
"======== prepare test env include database, stable, ctables, and insert data: "
)
paraDict
=
{
'dbName'
:
'dbt'
,
'dropFlag'
:
1
,
'event'
:
''
,
'vgroups'
:
4
,
'stbName'
:
'stb'
,
'colPrefix'
:
'c'
,
'tagPrefix'
:
't'
,
'colSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'TIMESTAMP'
,
'count'
:
1
}],
'tagSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
}],
'ctbPrefix'
:
'ctb'
,
'ctbStartIdx'
:
0
,
'ctbNum'
:
100
,
'rowsPerTbl'
:
1000
,
'batchNum'
:
100
,
'startTs'
:
1640966400000
,
# 2022-01-01 00:00:00.000
'pollDelay'
:
3
,
'showMsg'
:
1
,
'showRow'
:
1
,
'snapshot'
:
0
}
paraDict
[
'vgroups'
]
=
self
.
vgroups
paraDict
[
'ctbNum'
]
=
self
.
ctbNum
paraDict
[
'rowsPerTbl'
]
=
self
.
rowsPerTbl
tmqCom
.
initConsumerTable
()
tdCom
.
create_database
(
tdSql
,
paraDict
[
"dbName"
],
paraDict
[
"dropFlag"
],
vgroups
=
paraDict
[
"vgroups"
],
replica
=
self
.
replica
)
tdLog
.
info
(
"create stb"
)
tmqCom
.
create_stable
(
tdSql
,
dbName
=
paraDict
[
"dbName"
],
stbName
=
paraDict
[
"stbName"
])
tdLog
.
info
(
"create ctb"
)
tmqCom
.
create_ctable
(
tdSql
,
dbName
=
paraDict
[
"dbName"
],
stbName
=
paraDict
[
"stbName"
],
ctbPrefix
=
paraDict
[
'ctbPrefix'
],
ctbNum
=
paraDict
[
"ctbNum"
],
ctbStartIdx
=
paraDict
[
'ctbStartIdx'
])
tdLog
.
info
(
"insert data"
)
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom
.
asyncInsertDataByInterlace
(
paraDict
)
tdLog
.
info
(
"wait some data inserted"
)
exitFlag
=
1
while
exitFlag
:
queryString
=
"select count(*) from %s.%s"
%
(
paraDict
[
"dbName"
],
paraDict
[
"stbName"
])
tdSql
.
query
(
queryString
)
if
tdSql
.
getRows
()
>
0
:
rowsInserted
=
tdSql
.
getData
(
0
,
0
)
if
(
rowsInserted
>
((
self
.
ctbNum
*
self
.
rowsPerTbl
)
/
5
)):
exitFlag
=
0
time
.
sleep
(
0.1
)
tdLog
.
info
(
"inserted rows: %d"
%
tdSql
.
getData
(
0
,
0
))
# tdDnodes=cluster.dnodes
tdLog
.
info
(
"================= restart dnode 2==========================="
)
cluster
.
dnodes
[
1
].
stoptaosd
()
cluster
.
dnodes
[
1
].
starttaosd
()
tdLog
.
info
(
"================= restart dnode 3==========================="
)
cluster
.
dnodes
[
2
].
stoptaosd
()
cluster
.
dnodes
[
2
].
starttaosd
()
tdLog
.
info
(
"================= restart dnode 4==========================="
)
cluster
.
dnodes
[
3
].
stoptaosd
()
cluster
.
dnodes
[
3
].
starttaosd
()
tdLog
.
info
(
"================= restart dnode 5==========================="
)
cluster
.
dnodes
[
4
].
stoptaosd
()
cluster
.
dnodes
[
4
].
starttaosd
()
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
def
tmqCase1
(
self
):
tdLog
.
printNoPrefix
(
"======== test case 1: "
)
# create and start thread
paraDict
=
{
'dbName'
:
'dbt'
,
'dropFlag'
:
1
,
'event'
:
''
,
'vgroups'
:
4
,
'stbName'
:
'stb'
,
'colPrefix'
:
'c'
,
'tagPrefix'
:
't'
,
'colSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'TIMESTAMP'
,
'count'
:
1
}],
'tagSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
}],
'ctbPrefix'
:
'ctb'
,
'ctbStartIdx'
:
0
,
'ctbNum'
:
100
,
'rowsPerTbl'
:
1000
,
'batchNum'
:
100
,
'startTs'
:
1640966400000
,
# 2022-01-01 00:00:00.000
'pollDelay'
:
15
,
'showMsg'
:
1
,
'showRow'
:
1
,
'snapshot'
:
0
}
paraDict
[
'vgroups'
]
=
self
.
vgroups
paraDict
[
'ctbNum'
]
=
self
.
ctbNum
paraDict
[
'rowsPerTbl'
]
=
self
.
rowsPerTbl
tdLog
.
info
(
"create topics from stb1"
)
topicFromStb1
=
'topic_stb1'
queryString
=
"select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "
%
(
paraDict
[
'dbName'
],
paraDict
[
'stbName'
])
sqlString
=
"create topic %s as %s"
%
(
topicFromStb1
,
queryString
)
tdLog
.
info
(
"create topic sql: %s"
%
sqlString
)
tdSql
.
execute
(
sqlString
)
consumerId
=
0
expectrowcnt
=
paraDict
[
"rowsPerTbl"
]
*
paraDict
[
"ctbNum"
]
topicList
=
topicFromStb1
ifcheckdata
=
0
ifManualCommit
=
0
keyList
=
'group.id:cgrp1,\
enable.auto.commit:false,\
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
tmqCom
.
insertConsumerInfo
(
consumerId
,
expectrowcnt
,
topicList
,
keyList
,
ifcheckdata
,
ifManualCommit
)
tdLog
.
info
(
"start consume processor"
)
tmqCom
.
startTmqSimProcess
(
pollDelay
=
paraDict
[
'pollDelay'
],
dbName
=
paraDict
[
"dbName"
],
showMsg
=
paraDict
[
'showMsg'
],
showRow
=
paraDict
[
'showRow'
],
snapshot
=
paraDict
[
'snapshot'
])
tdLog
.
info
(
"start to check consume result"
)
expectRows
=
1
resultList
=
tmqCom
.
selectConsumeResult
(
expectRows
)
totalConsumeRows
=
0
for
i
in
range
(
expectRows
):
totalConsumeRows
+=
resultList
[
i
]
tdSql
.
query
(
queryString
)
totalRowsFromQuery
=
tdSql
.
getRows
()
tdLog
.
info
(
"act consume rows: %d, act query rows: %d, expect consume rows: %d, "
%
(
totalConsumeRows
,
totalRowsFromQuery
,
expectrowcnt
))
if
totalConsumeRows
!=
totalRowsFromQuery
:
tdLog
.
exit
(
"tmq consume rows error!"
)
# tmqCom.checkFileContent(consumerId, queryString)
tmqCom
.
waitSubscriptionExit
(
tdSql
,
topicFromStb1
)
tdSql
.
query
(
"drop topic %s"
%
topicFromStb1
)
tdLog
.
printNoPrefix
(
"======== test case 1 end ...... "
)
def
tmqCase2
(
self
):
tdLog
.
printNoPrefix
(
"======== test case 2: "
)
# create and start thread
paraDict
=
{
'dbName'
:
'dbt'
,
'dropFlag'
:
1
,
'event'
:
''
,
'vgroups'
:
4
,
'stbName'
:
'stb'
,
'colPrefix'
:
'c'
,
'tagPrefix'
:
't'
,
'colSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'TIMESTAMP'
,
'count'
:
1
}],
'tagSchema'
:
[{
'type'
:
'INT'
,
'count'
:
1
},{
'type'
:
'BIGINT'
,
'count'
:
1
},{
'type'
:
'DOUBLE'
,
'count'
:
1
},{
'type'
:
'BINARY'
,
'len'
:
32
,
'count'
:
1
},{
'type'
:
'NCHAR'
,
'len'
:
32
,
'count'
:
1
}],
'ctbPrefix'
:
'ctb'
,
'ctbStartIdx'
:
0
,
'ctbNum'
:
100
,
'rowsPerTbl'
:
1000
,
'batchNum'
:
100
,
'startTs'
:
1640966400000
,
# 2022-01-01 00:00:00.000
'pollDelay'
:
15
,
'showMsg'
:
1
,
'showRow'
:
1
,
'snapshot'
:
1
}
paraDict
[
'vgroups'
]
=
self
.
vgroups
paraDict
[
'ctbNum'
]
=
self
.
ctbNum
paraDict
[
'rowsPerTbl'
]
=
self
.
rowsPerTbl
tdLog
.
info
(
"create topics from stb1"
)
topicFromStb1
=
'topic_stb1'
queryString
=
"select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "
%
(
paraDict
[
'dbName'
],
paraDict
[
'stbName'
])
sqlString
=
"create topic %s as %s"
%
(
topicFromStb1
,
queryString
)
tdLog
.
info
(
"create topic sql: %s"
%
sqlString
)
tdSql
.
execute
(
sqlString
)
consumerId
=
0
expectrowcnt
=
paraDict
[
"rowsPerTbl"
]
*
paraDict
[
"ctbNum"
]
topicList
=
topicFromStb1
ifcheckdata
=
0
ifManualCommit
=
0
keyList
=
'group.id:cgrp1,\
enable.auto.commit:false,\
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
tmqCom
.
insertConsumerInfo
(
consumerId
,
expectrowcnt
,
topicList
,
keyList
,
ifcheckdata
,
ifManualCommit
)
tdLog
.
info
(
"start consume processor"
)
tmqCom
.
startTmqSimProcess
(
pollDelay
=
paraDict
[
'pollDelay'
],
dbName
=
paraDict
[
"dbName"
],
showMsg
=
paraDict
[
'showMsg'
],
showRow
=
paraDict
[
'showRow'
],
snapshot
=
paraDict
[
'snapshot'
])
tdLog
.
info
(
"================= restart dnode 2==========================="
)
cluster
.
dnodes
[
1
].
stoptaosd
()
cluster
.
dnodes
[
1
].
starttaosd
()
tdLog
.
info
(
"================= restart dnode 3==========================="
)
cluster
.
dnodes
[
2
].
stoptaosd
()
cluster
.
dnodes
[
2
].
starttaosd
()
tdLog
.
info
(
"================= restart dnode 4==========================="
)
cluster
.
dnodes
[
3
].
stoptaosd
()
cluster
.
dnodes
[
3
].
starttaosd
()
tdLog
.
info
(
"================= restart dnode 5==========================="
)
cluster
.
dnodes
[
4
].
stoptaosd
()
cluster
.
dnodes
[
4
].
starttaosd
()
tdLog
.
info
(
"start to check consume result"
)
expectRows
=
1
resultList
=
tmqCom
.
selectConsumeResult
(
expectRows
)
totalConsumeRows
=
0
for
i
in
range
(
expectRows
):
totalConsumeRows
+=
resultList
[
i
]
tdSql
.
query
(
queryString
)
totalRowsFromQuery
=
tdSql
.
getRows
()
tdLog
.
info
(
"act consume rows: %d, act query rows: %d, expect consume rows: %d, "
%
(
totalConsumeRows
,
totalRowsFromQuery
,
expectrowcnt
))
if
totalConsumeRows
!=
totalRowsFromQuery
:
tdLog
.
exit
(
"tmq consume rows error!"
)
# tmqCom.checkFileContent(consumerId, queryString)
tmqCom
.
waitSubscriptionExit
(
tdSql
,
topicFromStb1
)
tdSql
.
query
(
"drop topic %s"
%
topicFromStb1
)
tdLog
.
printNoPrefix
(
"======== test case 2 end ...... "
)
def
run
(
self
):
self
.
prepareTestEnv
()
self
.
tmqCase1
()
self
.
tmqCase2
()
def
stop
(
self
):
tdSql
.
close
()
tdLog
.
success
(
f
"
{
__file__
}
successfully executed"
)
event
=
threading
.
Event
()
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
taos-tools
@
817cb6ac
比较
0b8a3373
...
817cb6ac
Subproject commit
0b8a3373bb7548f8106d13e7d3b0a988d3c4d48a
Subproject commit
817cb6ac431ed8ae4c843872cdfc8c201c1e1894
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录