Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
慢慢CG
TDengine
提交
867bc9cc
T
TDengine
项目概览
慢慢CG
/
TDengine
与 Fork 源项目一致
Fork自
taosdata / TDengine
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
867bc9cc
编写于
7月 10, 2020
作者:
P
Ping Xiao
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into jdbcfixes
上级
c4cac5c6
a572369e
变更
39
展开全部
隐藏空白更改
内联
并排
Showing
39 changed file
with
878 addition
and
668 deletion
+878
-668
src/client/inc/tscUtil.h
src/client/inc/tscUtil.h
+10
-0
src/client/src/tscAsync.c
src/client/src/tscAsync.c
+7
-1
src/client/src/tscFunctionImpl.c
src/client/src/tscFunctionImpl.c
+2
-3
src/client/src/tscParseInsert.c
src/client/src/tscParseInsert.c
+5
-5
src/client/src/tscUtil.c
src/client/src/tscUtil.c
+2
-12
src/common/src/tglobal.c
src/common/src/tglobal.c
+1
-1
src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java
...r/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java
+3
-5
src/dnode/inc/dnodeMgmt.h
src/dnode/inc/dnodeMgmt.h
+2
-0
src/dnode/src/dnodeMain.c
src/dnode/src/dnodeMain.c
+1
-0
src/dnode/src/dnodeMgmt.c
src/dnode/src/dnodeMgmt.c
+11
-4
src/mnode/src/mnodeMain.c
src/mnode/src/mnodeMain.c
+1
-1
src/mnode/src/mnodeSdb.c
src/mnode/src/mnodeSdb.c
+13
-8
src/mnode/src/mnodeShow.c
src/mnode/src/mnodeShow.c
+3
-2
src/mnode/src/mnodeVgroup.c
src/mnode/src/mnodeVgroup.c
+6
-5
src/plugins/http/inc/httpInt.h
src/plugins/http/inc/httpInt.h
+1
-1
src/plugins/http/src/httpContext.c
src/plugins/http/src/httpContext.c
+17
-16
src/plugins/http/src/httpHandle.c
src/plugins/http/src/httpHandle.c
+3
-3
src/plugins/http/src/httpServer.c
src/plugins/http/src/httpServer.c
+4
-4
src/query/inc/qExecutor.h
src/query/inc/qExecutor.h
+1
-0
src/query/inc/qUtil.h
src/query/inc/qUtil.h
+3
-1
src/query/inc/tsqlfunction.h
src/query/inc/tsqlfunction.h
+1
-1
src/query/src/qExecutor.c
src/query/src/qExecutor.c
+124
-91
src/query/src/qUtil.c
src/query/src/qUtil.c
+16
-13
src/rpc/src/rpcMain.c
src/rpc/src/rpcMain.c
+2
-1
src/wal/src/walMain.c
src/wal/src/walMain.c
+4
-3
tests/pytest/crash_gen.py
tests/pytest/crash_gen.py
+52
-3
tests/pytest/fulltest.sh
tests/pytest/fulltest.sh
+1
-1
tests/pytest/import_merge/importCSV.py
tests/pytest/import_merge/importCSV.py
+94
-0
tests/pytest/regressiontest.sh
tests/pytest/regressiontest.sh
+1
-1
tests/pytest/testCompress.py
tests/pytest/testCompress.py
+136
-0
tests/pytest/testNoCompress.py
tests/pytest/testNoCompress.py
+137
-0
tests/pytest/util/dnodes.py
tests/pytest/util/dnodes.py
+41
-19
tests/pytest/util/sql.py
tests/pytest/util/sql.py
+6
-3
tests/script/general/parser/fill_us.sim
tests/script/general/parser/fill_us.sim
+3
-3
tests/script/general/parser/testSuite.sim
tests/script/general/parser/testSuite.sim
+37
-33
tests/script/jenkins/basic.txt
tests/script/jenkins/basic.txt
+0
-3
tests/script/sh/deploy.sh
tests/script/sh/deploy.sh
+0
-1
tests/script/unique/cluster/client1_0.sim
tests/script/unique/cluster/client1_0.sim
+23
-4
tests/script/unique/cluster/cluster_main.sim
tests/script/unique/cluster/cluster_main.sim
+104
-416
未找到文件。
src/client/inc/tscUtil.h
浏览文件 @
867bc9cc
...
@@ -87,6 +87,16 @@ typedef struct SVgroupTableInfo {
...
@@ -87,6 +87,16 @@ typedef struct SVgroupTableInfo {
SArray
*
itemList
;
//SArray<STableIdInfo>
SArray
*
itemList
;
//SArray<STableIdInfo>
}
SVgroupTableInfo
;
}
SVgroupTableInfo
;
static
FORCE_INLINE
SQueryInfo
*
tscGetQueryInfoDetail
(
SSqlCmd
*
pCmd
,
int32_t
subClauseIndex
)
{
assert
(
pCmd
!=
NULL
&&
subClauseIndex
>=
0
&&
subClauseIndex
<
TSDB_MAX_UNION_CLAUSE
);
if
(
pCmd
->
pQueryInfo
==
NULL
||
subClauseIndex
>=
pCmd
->
numOfClause
)
{
return
NULL
;
}
return
pCmd
->
pQueryInfo
[
subClauseIndex
];
}
int32_t
tscCreateDataBlock
(
size_t
initialSize
,
int32_t
rowSize
,
int32_t
startOffset
,
const
char
*
name
,
int32_t
tscCreateDataBlock
(
size_t
initialSize
,
int32_t
rowSize
,
int32_t
startOffset
,
const
char
*
name
,
STableMeta
*
pTableMeta
,
STableDataBlocks
**
dataBlocks
);
STableMeta
*
pTableMeta
,
STableDataBlocks
**
dataBlocks
);
void
tscDestroyDataBlock
(
STableDataBlocks
*
pDataBlock
);
void
tscDestroyDataBlock
(
STableDataBlocks
*
pDataBlock
);
...
...
src/client/src/tscAsync.c
浏览文件 @
867bc9cc
...
@@ -477,7 +477,13 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
...
@@ -477,7 +477,13 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
tscDebug
(
"%p redo parse sql string to build submit block"
,
pSql
);
tscDebug
(
"%p redo parse sql string to build submit block"
,
pSql
);
pCmd
->
parseFinished
=
false
;
pCmd
->
parseFinished
=
false
;
if
((
code
=
tsParseSql
(
pSql
,
true
))
==
TSDB_CODE_SUCCESS
)
{
code
=
tsParseSql
(
pSql
,
true
);
if
(
code
==
TSDB_CODE_TSC_ACTION_IN_PROGRESS
)
{
return
;
}
if
(
code
==
TSDB_CODE_SUCCESS
)
{
/*
/*
* Discard previous built submit blocks, and then parse the sql string again and build up all submit blocks,
* Discard previous built submit blocks, and then parse the sql string again and build up all submit blocks,
* and send the required submit block according to index value in supporter to server.
* and send the required submit block according to index value in supporter to server.
...
...
src/client/src/tscFunctionImpl.c
浏览文件 @
867bc9cc
...
@@ -340,13 +340,12 @@ bool stableQueryFunctChanged(int32_t funcId) {
...
@@ -340,13 +340,12 @@ bool stableQueryFunctChanged(int32_t funcId) {
*/
*/
void
resetResultInfo
(
SResultInfo
*
pResInfo
)
{
pResInfo
->
initialized
=
false
;
}
void
resetResultInfo
(
SResultInfo
*
pResInfo
)
{
pResInfo
->
initialized
=
false
;
}
void
setResultInfoBuf
(
SResultInfo
*
pResInfo
,
int32_t
size
,
bool
superTable
)
{
void
setResultInfoBuf
(
SResultInfo
*
pResInfo
,
int32_t
size
,
bool
superTable
,
char
*
buf
)
{
assert
(
pResInfo
->
interResultBuf
==
NULL
);
assert
(
pResInfo
->
interResultBuf
==
NULL
);
pResInfo
->
bufLen
=
size
;
pResInfo
->
bufLen
=
size
;
pResInfo
->
superTableQ
=
superTable
;
pResInfo
->
superTableQ
=
superTable
;
pResInfo
->
interResultBuf
=
buf
;
pResInfo
->
interResultBuf
=
calloc
(
1
,
(
size_t
)
size
);
}
}
// set the query flag to denote that query is completed
// set the query flag to denote that query is completed
...
...
src/client/src/tscParseInsert.c
浏览文件 @
867bc9cc
...
@@ -1310,6 +1310,11 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
...
@@ -1310,6 +1310,11 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
tscDebug
(
"%p resume to parse sql: %s"
,
pSql
,
pCmd
->
curSql
);
tscDebug
(
"%p resume to parse sql: %s"
,
pSql
,
pCmd
->
curSql
);
}
}
ret
=
tscAllocPayload
(
&
pSql
->
cmd
,
TSDB_DEFAULT_PAYLOAD_SIZE
);
if
(
TSDB_CODE_SUCCESS
!=
ret
)
{
return
ret
;
}
if
(
tscIsInsertData
(
pSql
->
sqlstr
))
{
if
(
tscIsInsertData
(
pSql
->
sqlstr
))
{
/*
/*
* Set the fp before parse the sql string, in case of getTableMeta failed, in which
* Set the fp before parse the sql string, in case of getTableMeta failed, in which
...
@@ -1326,11 +1331,6 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
...
@@ -1326,11 +1331,6 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
ret
=
tsParseInsertSql
(
pSql
);
ret
=
tsParseInsertSql
(
pSql
);
}
else
{
}
else
{
ret
=
tscAllocPayload
(
&
pSql
->
cmd
,
TSDB_DEFAULT_PAYLOAD_SIZE
);
if
(
TSDB_CODE_SUCCESS
!=
ret
)
{
return
ret
;
}
SSqlInfo
SQLInfo
=
qSQLParse
(
pSql
->
sqlstr
);
SSqlInfo
SQLInfo
=
qSQLParse
(
pSql
->
sqlstr
);
ret
=
tscToSQLCmd
(
pSql
,
&
SQLInfo
);
ret
=
tscToSQLCmd
(
pSql
,
&
SQLInfo
);
SQLInfoDestroy
(
&
SQLInfo
);
SQLInfoDestroy
(
&
SQLInfo
);
...
...
src/client/src/tscUtil.c
浏览文件 @
867bc9cc
...
@@ -1464,16 +1464,6 @@ STableMetaInfo* tscGetMetaInfo(SQueryInfo* pQueryInfo, int32_t tableIndex) {
...
@@ -1464,16 +1464,6 @@ STableMetaInfo* tscGetMetaInfo(SQueryInfo* pQueryInfo, int32_t tableIndex) {
return
pQueryInfo
->
pTableMetaInfo
[
tableIndex
];
return
pQueryInfo
->
pTableMetaInfo
[
tableIndex
];
}
}
SQueryInfo
*
tscGetQueryInfoDetail
(
SSqlCmd
*
pCmd
,
int32_t
subClauseIndex
)
{
assert
(
pCmd
!=
NULL
&&
subClauseIndex
>=
0
&&
subClauseIndex
<
TSDB_MAX_UNION_CLAUSE
);
if
(
pCmd
->
pQueryInfo
==
NULL
||
subClauseIndex
>=
pCmd
->
numOfClause
)
{
return
NULL
;
}
return
pCmd
->
pQueryInfo
[
subClauseIndex
];
}
int32_t
tscGetQueryInfoDetailSafely
(
SSqlCmd
*
pCmd
,
int32_t
subClauseIndex
,
SQueryInfo
**
pQueryInfo
)
{
int32_t
tscGetQueryInfoDetailSafely
(
SSqlCmd
*
pCmd
,
int32_t
subClauseIndex
,
SQueryInfo
**
pQueryInfo
)
{
int32_t
ret
=
TSDB_CODE_SUCCESS
;
int32_t
ret
=
TSDB_CODE_SUCCESS
;
...
@@ -2097,7 +2087,7 @@ void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) {
...
@@ -2097,7 +2087,7 @@ void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) {
}
}
void
tscGetResultColumnChr
(
SSqlRes
*
pRes
,
SFieldInfo
*
pFieldInfo
,
int32_t
columnIndex
)
{
void
tscGetResultColumnChr
(
SSqlRes
*
pRes
,
SFieldInfo
*
pFieldInfo
,
int32_t
columnIndex
)
{
SFieldSupInfo
*
pInfo
=
taosArrayGet
(
pFieldInfo
->
pSupportInfo
,
columnIndex
);
//tscFieldInfoGetSupp(pFieldInfo, columnIndex);
SFieldSupInfo
*
pInfo
=
taosArrayGet
(
pFieldInfo
->
pSupportInfo
,
columnIndex
);
assert
(
pInfo
->
pSqlExpr
!=
NULL
);
assert
(
pInfo
->
pSqlExpr
!=
NULL
);
int32_t
type
=
pInfo
->
pSqlExpr
->
resType
;
int32_t
type
=
pInfo
->
pSqlExpr
->
resType
;
...
@@ -2112,7 +2102,7 @@ void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t column
...
@@ -2112,7 +2102,7 @@ void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t column
if
(
isNull
(
pData
,
type
))
{
if
(
isNull
(
pData
,
type
))
{
pRes
->
tsrow
[
columnIndex
]
=
NULL
;
pRes
->
tsrow
[
columnIndex
]
=
NULL
;
}
else
{
}
else
{
pRes
->
tsrow
[
columnIndex
]
=
pData
+
VARSTR_HEADER_SIZE
;
pRes
->
tsrow
[
columnIndex
]
=
((
tstr
*
)
pData
)
->
data
;
}
}
if
(
realLen
<
pInfo
->
pSqlExpr
->
resBytes
-
VARSTR_HEADER_SIZE
)
{
// todo refactor
if
(
realLen
<
pInfo
->
pSqlExpr
->
resBytes
-
VARSTR_HEADER_SIZE
)
{
// todo refactor
...
...
src/common/src/tglobal.c
浏览文件 @
867bc9cc
...
@@ -129,7 +129,7 @@ int32_t tsMnodeEqualVnodeNum = 4;
...
@@ -129,7 +129,7 @@ int32_t tsMnodeEqualVnodeNum = 4;
int32_t
tsEnableHttpModule
=
1
;
int32_t
tsEnableHttpModule
=
1
;
int32_t
tsRestRowLimit
=
10240
;
int32_t
tsRestRowLimit
=
10240
;
uint16_t
tsHttpPort
=
6020
;
// only tcp, range tcp[6020]
uint16_t
tsHttpPort
=
6020
;
// only tcp, range tcp[6020]
int32_t
tsHttpCacheSessions
=
100
;
int32_t
tsHttpCacheSessions
=
100
0
;
int32_t
tsHttpSessionExpire
=
36000
;
int32_t
tsHttpSessionExpire
=
36000
;
int32_t
tsHttpMaxThreads
=
2
;
int32_t
tsHttpMaxThreads
=
2
;
int32_t
tsHttpEnableCompress
=
0
;
int32_t
tsHttpEnableCompress
=
0
;
...
...
src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TDNodes.java
浏览文件 @
867bc9cc
...
@@ -16,11 +16,9 @@ public class TDNodes {
...
@@ -16,11 +16,9 @@ public class TDNodes {
public
void
setPath
(
String
path
)
{
public
void
setPath
(
String
path
)
{
try
{
try
{
String
psCmd
=
"ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
;
String
killCmd
=
"pkill -kill -x taosd"
;
Process
ps
=
Runtime
.
getRuntime
().
exec
(
psCmd
);
String
[]
killCmds
=
{
"sh"
,
"-c"
,
killCmd
};
ps
.
waitFor
();
Runtime
.
getRuntime
().
exec
(
killCmds
).
waitFor
();
String
killCmd
=
"kill -9 "
+
ps
.
pid
();
Runtime
.
getRuntime
().
exec
(
killCmd
).
waitFor
();
String
binPath
=
System
.
getProperty
(
"user.dir"
);
String
binPath
=
System
.
getProperty
(
"user.dir"
);
binPath
+=
"/../../../debug"
;
binPath
+=
"/../../../debug"
;
...
...
src/dnode/inc/dnodeMgmt.h
浏览文件 @
867bc9cc
...
@@ -22,6 +22,8 @@ extern "C" {
...
@@ -22,6 +22,8 @@ extern "C" {
int32_t
dnodeInitMgmt
();
int32_t
dnodeInitMgmt
();
void
dnodeCleanupMgmt
();
void
dnodeCleanupMgmt
();
int32_t
dnodeInitMgmtTimer
();
void
dnodeCleanupMgmtTimer
();
void
dnodeDispatchToMgmtQueue
(
SRpcMsg
*
rpcMsg
);
void
dnodeDispatchToMgmtQueue
(
SRpcMsg
*
rpcMsg
);
void
*
dnodeGetVnode
(
int32_t
vgId
);
void
*
dnodeGetVnode
(
int32_t
vgId
);
...
...
src/dnode/src/dnodeMain.c
浏览文件 @
867bc9cc
...
@@ -57,6 +57,7 @@ static const SDnodeComponent tsDnodeComponents[] = {
...
@@ -57,6 +57,7 @@ static const SDnodeComponent tsDnodeComponents[] = {
{
"server"
,
dnodeInitServer
,
dnodeCleanupServer
},
{
"server"
,
dnodeInitServer
,
dnodeCleanupServer
},
{
"mgmt"
,
dnodeInitMgmt
,
dnodeCleanupMgmt
},
{
"mgmt"
,
dnodeInitMgmt
,
dnodeCleanupMgmt
},
{
"modules"
,
dnodeInitModules
,
dnodeCleanupModules
},
{
"modules"
,
dnodeInitModules
,
dnodeCleanupModules
},
{
"mgmt-tmr"
,
dnodeInitMgmtTimer
,
dnodeCleanupMgmtTimer
},
{
"shell"
,
dnodeInitShell
,
dnodeCleanupShell
}
{
"shell"
,
dnodeInitShell
,
dnodeCleanupShell
}
};
};
...
...
src/dnode/src/dnodeMgmt.c
浏览文件 @
867bc9cc
...
@@ -147,6 +147,12 @@ int32_t dnodeInitMgmt() {
...
@@ -147,6 +147,12 @@ int32_t dnodeInitMgmt() {
return
-
1
;
return
-
1
;
}
}
dInfo
(
"dnode mgmt is initialized"
);
return
TSDB_CODE_SUCCESS
;
}
int32_t
dnodeInitMgmtTimer
()
{
tsDnodeTmr
=
taosTmrInit
(
100
,
200
,
60000
,
"DND-DM"
);
tsDnodeTmr
=
taosTmrInit
(
100
,
200
,
60000
,
"DND-DM"
);
if
(
tsDnodeTmr
==
NULL
)
{
if
(
tsDnodeTmr
==
NULL
)
{
dError
(
"failed to init dnode timer"
);
dError
(
"failed to init dnode timer"
);
...
@@ -155,13 +161,11 @@ int32_t dnodeInitMgmt() {
...
@@ -155,13 +161,11 @@ int32_t dnodeInitMgmt() {
}
}
taosTmrReset
(
dnodeSendStatusMsg
,
500
,
NULL
,
tsDnodeTmr
,
&
tsStatusTimer
);
taosTmrReset
(
dnodeSendStatusMsg
,
500
,
NULL
,
tsDnodeTmr
,
&
tsStatusTimer
);
dInfo
(
"dnode mgmt timer is initialized"
);
dInfo
(
"dnode mgmt is initialized"
);
return
TSDB_CODE_SUCCESS
;
return
TSDB_CODE_SUCCESS
;
}
}
void
dnodeCleanupMgmt
()
{
void
dnodeCleanupMgmt
Timer
()
{
if
(
tsStatusTimer
!=
NULL
)
{
if
(
tsStatusTimer
!=
NULL
)
{
taosTmrStopA
(
&
tsStatusTimer
);
taosTmrStopA
(
&
tsStatusTimer
);
tsStatusTimer
=
NULL
;
tsStatusTimer
=
NULL
;
...
@@ -171,7 +175,10 @@ void dnodeCleanupMgmt() {
...
@@ -171,7 +175,10 @@ void dnodeCleanupMgmt() {
taosTmrCleanUp
(
tsDnodeTmr
);
taosTmrCleanUp
(
tsDnodeTmr
);
tsDnodeTmr
=
NULL
;
tsDnodeTmr
=
NULL
;
}
}
}
void
dnodeCleanupMgmt
()
{
dnodeCleanupMgmtTimer
();
dnodeCloseVnodes
();
dnodeCloseVnodes
();
if
(
tsMgmtQset
)
taosQsetThreadResume
(
tsMgmtQset
);
if
(
tsMgmtQset
)
taosQsetThreadResume
(
tsMgmtQset
);
...
...
src/mnode/src/mnodeMain.c
浏览文件 @
867bc9cc
...
@@ -41,7 +41,7 @@ typedef struct {
...
@@ -41,7 +41,7 @@ typedef struct {
void
(
*
cleanup
)();
void
(
*
cleanup
)();
}
SMnodeComponent
;
}
SMnodeComponent
;
void
*
tsMnodeTmr
;
void
*
tsMnodeTmr
=
NULL
;
static
bool
tsMgmtIsRunning
=
false
;
static
bool
tsMgmtIsRunning
=
false
;
static
const
SMnodeComponent
tsMnodeComponents
[]
=
{
static
const
SMnodeComponent
tsMnodeComponents
[]
=
{
...
...
src/mnode/src/mnodeSdb.c
浏览文件 @
867bc9cc
...
@@ -471,8 +471,8 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
...
@@ -471,8 +471,8 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
atomic_add_fetch_32
(
&
pTable
->
autoIndex
,
1
);
atomic_add_fetch_32
(
&
pTable
->
autoIndex
,
1
);
}
}
sdbDebug
(
"table:%s, insert record:%s to hash, rowSize:%d numOfRows:%"
PRId64
"
ver:%"
PRIu64
"
, msg:%p"
,
pTable
->
tableName
,
sdbDebug
(
"table:%s, insert record:%s to hash, rowSize:%d numOfRows:%"
PRId64
", msg:%p"
,
pTable
->
tableName
,
sdbGetKeyStrFromObj
(
pTable
,
pOper
->
pObj
),
pOper
->
rowSize
,
pTable
->
numOfRows
,
sdbGetVersion
(),
pOper
->
pMsg
);
sdbGetKeyStrFromObj
(
pTable
,
pOper
->
pObj
),
pOper
->
rowSize
,
pTable
->
numOfRows
,
pOper
->
pMsg
);
(
*
pTable
->
insertFp
)(
pOper
);
(
*
pTable
->
insertFp
)(
pOper
);
return
TSDB_CODE_SUCCESS
;
return
TSDB_CODE_SUCCESS
;
...
@@ -490,8 +490,8 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
...
@@ -490,8 +490,8 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
taosHashRemove
(
pTable
->
iHandle
,
key
,
keySize
);
taosHashRemove
(
pTable
->
iHandle
,
key
,
keySize
);
atomic_sub_fetch_32
(
&
pTable
->
numOfRows
,
1
);
atomic_sub_fetch_32
(
&
pTable
->
numOfRows
,
1
);
sdbDebug
(
"table:%s, delete record:%s from hash, numOfRows:%"
PRId64
"
ver:%"
PRIu64
"
, msg:%p"
,
pTable
->
tableName
,
sdbDebug
(
"table:%s, delete record:%s from hash, numOfRows:%"
PRId64
", msg:%p"
,
pTable
->
tableName
,
sdbGetKeyStrFromObj
(
pTable
,
pOper
->
pObj
),
pTable
->
numOfRows
,
sdbGetVersion
(),
pOper
->
pMsg
);
sdbGetKeyStrFromObj
(
pTable
,
pOper
->
pObj
),
pTable
->
numOfRows
,
pOper
->
pMsg
);
int8_t
*
updateEnd
=
pOper
->
pObj
+
pTable
->
refCountPos
-
1
;
int8_t
*
updateEnd
=
pOper
->
pObj
+
pTable
->
refCountPos
-
1
;
*
updateEnd
=
1
;
*
updateEnd
=
1
;
...
@@ -501,8 +501,8 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
...
@@ -501,8 +501,8 @@ static int32_t sdbDeleteHash(SSdbTable *pTable, SSdbOper *pOper) {
}
}
static
int32_t
sdbUpdateHash
(
SSdbTable
*
pTable
,
SSdbOper
*
pOper
)
{
static
int32_t
sdbUpdateHash
(
SSdbTable
*
pTable
,
SSdbOper
*
pOper
)
{
sdbDebug
(
"table:%s, update record:%s in hash, numOfRows:%"
PRId64
"
ver:%"
PRIu64
"
, msg:%p"
,
pTable
->
tableName
,
sdbDebug
(
"table:%s, update record:%s in hash, numOfRows:%"
PRId64
", msg:%p"
,
pTable
->
tableName
,
sdbGetKeyStrFromObj
(
pTable
,
pOper
->
pObj
),
pTable
->
numOfRows
,
sdbGetVersion
(),
pOper
->
pMsg
);
sdbGetKeyStrFromObj
(
pTable
,
pOper
->
pObj
),
pTable
->
numOfRows
,
pOper
->
pMsg
);
(
*
pTable
->
updateFp
)(
pOper
);
(
*
pTable
->
updateFp
)(
pOper
);
return
TSDB_CODE_SUCCESS
;
return
TSDB_CODE_SUCCESS
;
...
@@ -967,7 +967,11 @@ static void *sdbWorkerFp(void *param) {
...
@@ -967,7 +967,11 @@ static void *sdbWorkerFp(void *param) {
}
}
int32_t
code
=
sdbWrite
(
pOper
,
pHead
,
type
);
int32_t
code
=
sdbWrite
(
pOper
,
pHead
,
type
);
if
(
pOper
&&
code
<=
0
)
pOper
->
retCode
=
code
;
if
(
code
>
0
)
code
=
0
;
if
(
pOper
)
pOper
->
retCode
=
code
;
else
pHead
->
len
=
code
;
// hackway
}
}
walFsync
(
tsSdbObj
.
wal
);
walFsync
(
tsSdbObj
.
wal
);
...
@@ -982,7 +986,8 @@ static void *sdbWorkerFp(void *param) {
...
@@ -982,7 +986,8 @@ static void *sdbWorkerFp(void *param) {
sdbDecRef
(
pOper
->
table
,
pOper
->
pObj
);
sdbDecRef
(
pOper
->
table
,
pOper
->
pObj
);
sdbConfirmForward
(
NULL
,
pOper
,
pOper
->
retCode
);
sdbConfirmForward
(
NULL
,
pOper
,
pOper
->
retCode
);
}
else
if
(
type
==
TAOS_QTYPE_FWD
)
{
}
else
if
(
type
==
TAOS_QTYPE_FWD
)
{
syncConfirmForward
(
tsSdbObj
.
sync
,
pHead
->
version
,
TSDB_CODE_SUCCESS
);
pHead
=
(
SWalHead
*
)
item
;
syncConfirmForward
(
tsSdbObj
.
sync
,
pHead
->
version
,
pHead
->
len
);
taosFreeQitem
(
item
);
taosFreeQitem
(
item
);
}
else
{
}
else
{
taosFreeQitem
(
item
);
taosFreeQitem
(
item
);
...
...
src/mnode/src/mnodeShow.c
浏览文件 @
867bc9cc
...
@@ -310,7 +310,8 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
...
@@ -310,7 +310,8 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
if
(
pDb
->
status
!=
TSDB_DB_STATUS_READY
)
{
if
(
pDb
->
status
!=
TSDB_DB_STATUS_READY
)
{
mError
(
"db:%s, status:%d, in dropping"
,
pDb
->
name
,
pDb
->
status
);
mError
(
"db:%s, status:%d, in dropping"
,
pDb
->
name
,
pDb
->
status
);
return
TSDB_CODE_MND_DB_IN_DROPPING
;
code
=
TSDB_CODE_MND_DB_IN_DROPPING
;
goto
connect_over
;
}
}
mnodeDecDbRef
(
pDb
);
mnodeDecDbRef
(
pDb
);
}
}
...
@@ -355,7 +356,7 @@ static int32_t mnodeProcessUseMsg(SMnodeMsg *pMsg) {
...
@@ -355,7 +356,7 @@ static int32_t mnodeProcessUseMsg(SMnodeMsg *pMsg) {
int32_t
code
=
TSDB_CODE_SUCCESS
;
int32_t
code
=
TSDB_CODE_SUCCESS
;
if
(
pMsg
->
pDb
==
NULL
)
pMsg
->
pDb
=
mnodeGetDb
(
pUseDbMsg
->
db
);
if
(
pMsg
->
pDb
==
NULL
)
pMsg
->
pDb
=
mnodeGetDb
(
pUseDbMsg
->
db
);
if
(
pMsg
->
pDb
==
NULL
)
{
if
(
pMsg
->
pDb
==
NULL
)
{
code
=
TSDB_CODE_MND_INVALID_DB
;
return
TSDB_CODE_MND_INVALID_DB
;
}
}
if
(
pMsg
->
pDb
->
status
!=
TSDB_DB_STATUS_READY
)
{
if
(
pMsg
->
pDb
->
status
!=
TSDB_DB_STATUS_READY
)
{
...
...
src/mnode/src/mnodeVgroup.c
浏览文件 @
867bc9cc
...
@@ -372,7 +372,6 @@ static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) {
...
@@ -372,7 +372,6 @@ static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) {
pVgroup
->
vnodeGid
[
i
].
dnodeId
);
pVgroup
->
vnodeGid
[
i
].
dnodeId
);
}
}
mnodeIncVgroupRef
(
pVgroup
);
pMsg
->
expected
=
pVgroup
->
numOfVnodes
;
pMsg
->
expected
=
pVgroup
->
numOfVnodes
;
mnodeSendCreateVgroupMsg
(
pVgroup
,
pMsg
);
mnodeSendCreateVgroupMsg
(
pVgroup
,
pMsg
);
...
@@ -393,6 +392,9 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) {
...
@@ -393,6 +392,9 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) {
return
TSDB_CODE_MND_NO_ENOUGH_DNODES
;
return
TSDB_CODE_MND_NO_ENOUGH_DNODES
;
}
}
pMsg
->
pVgroup
=
pVgroup
;
mnodeIncVgroupRef
(
pVgroup
);
SSdbOper
oper
=
{
SSdbOper
oper
=
{
.
type
=
SDB_OPER_GLOBAL
,
.
type
=
SDB_OPER_GLOBAL
,
.
table
=
tsVgroupSdb
,
.
table
=
tsVgroupSdb
,
...
@@ -402,8 +404,6 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) {
...
@@ -402,8 +404,6 @@ int32_t mnodeCreateVgroup(SMnodeMsg *pMsg, SDbObj *pDb) {
.
cb
=
mnodeCreateVgroupCb
.
cb
=
mnodeCreateVgroupCb
};
};
pMsg
->
pVgroup
=
pVgroup
;
int32_t
code
=
sdbInsertRow
(
&
oper
);
int32_t
code
=
sdbInsertRow
(
&
oper
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
pMsg
->
pVgroup
=
NULL
;
pMsg
->
pVgroup
=
NULL
;
...
@@ -814,19 +814,20 @@ static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) {
...
@@ -814,19 +814,20 @@ static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) {
mDebug
(
"dnode:%s, vgId:%d, invalid dnode"
,
taosIpStr
(
pCfg
->
dnodeId
),
pCfg
->
vgId
);
mDebug
(
"dnode:%s, vgId:%d, invalid dnode"
,
taosIpStr
(
pCfg
->
dnodeId
),
pCfg
->
vgId
);
return
TSDB_CODE_MND_VGROUP_NOT_EXIST
;
return
TSDB_CODE_MND_VGROUP_NOT_EXIST
;
}
}
mnodeDecDnodeRef
(
pDnode
);
SVgObj
*
pVgroup
=
mnodeGetVgroup
(
pCfg
->
vgId
);
SVgObj
*
pVgroup
=
mnodeGetVgroup
(
pCfg
->
vgId
);
if
(
pVgroup
==
NULL
)
{
if
(
pVgroup
==
NULL
)
{
mDebug
(
"dnode:%s, vgId:%d, no vgroup info"
,
taosIpStr
(
pCfg
->
dnodeId
),
pCfg
->
vgId
);
mDebug
(
"dnode:%s, vgId:%d, no vgroup info"
,
taosIpStr
(
pCfg
->
dnodeId
),
pCfg
->
vgId
);
mnodeDecDnodeRef
(
pDnode
);
return
TSDB_CODE_MND_VGROUP_NOT_EXIST
;
return
TSDB_CODE_MND_VGROUP_NOT_EXIST
;
}
}
mnodeDecVgroupRef
(
pVgroup
);
mDebug
(
"vgId:%d, send create vnode msg to dnode %s for vnode cfg msg"
,
pVgroup
->
vgId
,
pDnode
->
dnodeEp
);
mDebug
(
"vgId:%d, send create vnode msg to dnode %s for vnode cfg msg"
,
pVgroup
->
vgId
,
pDnode
->
dnodeEp
);
SRpcIpSet
ipSet
=
mnodeGetIpSetFromIp
(
pDnode
->
dnodeEp
);
SRpcIpSet
ipSet
=
mnodeGetIpSetFromIp
(
pDnode
->
dnodeEp
);
mnodeSendCreateVnodeMsg
(
pVgroup
,
&
ipSet
,
NULL
);
mnodeSendCreateVnodeMsg
(
pVgroup
,
&
ipSet
,
NULL
);
mnodeDecDnodeRef
(
pDnode
);
mnodeDecVgroupRef
(
pVgroup
);
return
TSDB_CODE_SUCCESS
;
return
TSDB_CODE_SUCCESS
;
}
}
...
...
src/plugins/http/inc/httpInt.h
浏览文件 @
867bc9cc
...
@@ -206,7 +206,7 @@ typedef struct HttpThread {
...
@@ -206,7 +206,7 @@ typedef struct HttpThread {
pthread_mutex_t
threadMutex
;
pthread_mutex_t
threadMutex
;
bool
stop
;
bool
stop
;
int
pollFd
;
int
pollFd
;
int
numOf
Fd
s
;
int
numOf
Context
s
;
int
threadId
;
int
threadId
;
char
label
[
HTTP_LABEL_SIZE
];
char
label
[
HTTP_LABEL_SIZE
];
bool
(
*
processData
)(
HttpContext
*
pContext
);
bool
(
*
processData
)(
HttpContext
*
pContext
);
...
...
src/plugins/http/src/httpContext.c
浏览文件 @
867bc9cc
...
@@ -44,7 +44,7 @@ static void httpDestroyContext(void *data) {
...
@@ -44,7 +44,7 @@ static void httpDestroyContext(void *data) {
HttpThread
*
pThread
=
pContext
->
pThread
;
HttpThread
*
pThread
=
pContext
->
pThread
;
httpRemoveContextFromEpoll
(
pContext
);
httpRemoveContextFromEpoll
(
pContext
);
httpReleaseSession
(
pContext
);
httpReleaseSession
(
pContext
);
atomic_sub_fetch_32
(
&
pThread
->
numOf
Fd
s
,
1
);
atomic_sub_fetch_32
(
&
pThread
->
numOf
Context
s
,
1
);
pContext
->
pThread
=
0
;
pContext
->
pThread
=
0
;
pContext
->
state
=
HTTP_CONTEXT_STATE_CLOSED
;
pContext
->
state
=
HTTP_CONTEXT_STATE_CLOSED
;
...
@@ -171,38 +171,39 @@ bool httpInitContext(HttpContext *pContext) {
...
@@ -171,38 +171,39 @@ bool httpInitContext(HttpContext *pContext) {
void
httpCloseContextByApp
(
HttpContext
*
pContext
)
{
void
httpCloseContextByApp
(
HttpContext
*
pContext
)
{
pContext
->
parsed
=
false
;
pContext
->
parsed
=
false
;
bool
keepAlive
=
true
;
bool
keepAlive
=
true
;
if
(
pContext
->
httpVersion
==
HTTP_VERSION_10
&&
pContext
->
httpKeepAlive
!=
HTTP_KEEPALIVE_ENABLE
)
{
if
(
pContext
->
httpVersion
==
HTTP_VERSION_10
&&
pContext
->
httpKeepAlive
!=
HTTP_KEEPALIVE_ENABLE
)
{
keepAlive
=
false
;
keepAlive
=
false
;
}
else
if
(
pContext
->
httpVersion
!=
HTTP_VERSION_10
&&
pContext
->
httpKeepAlive
==
HTTP_KEEPALIVE_DISABLE
)
{
}
else
if
(
pContext
->
httpVersion
!=
HTTP_VERSION_10
&&
pContext
->
httpKeepAlive
==
HTTP_KEEPALIVE_DISABLE
)
{
keepAlive
=
false
;
keepAlive
=
false
;
}
else
{}
}
else
{
}
if
(
keepAlive
)
{
if
(
keepAlive
)
{
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_HANDLING
,
HTTP_CONTEXT_STATE_READY
))
{
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_HANDLING
,
HTTP_CONTEXT_STATE_READY
))
{
httpDebug
(
"context:%p, fd:%d, ip:%s, last state:handling, keepAlive:true, reuse con
nect"
,
httpDebug
(
"context:%p, fd:%d, ip:%s, last state:handling, keepAlive:true, reuse con
text"
,
pContext
,
pContext
->
fd
,
pContext
,
pContext
->
fd
,
pContext
->
ipstr
);
pContext
->
ipstr
);
}
else
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_DROPPING
,
HTTP_CONTEXT_STATE_CLOSED
))
{
}
else
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_DROPPING
,
HTTP_CONTEXT_STATE_CLOSED
))
{
httpRemoveContextFromEpoll
(
pContext
);
httpRemoveContextFromEpoll
(
pContext
);
httpDebug
(
"context:%p, fd:%d, ip:%s, last state:dropping, keepAlive:true, close connect"
,
httpDebug
(
"context:%p, fd:%d, ip:%s, last state:dropping, keepAlive:true, close connect"
,
pContext
,
pContext
->
fd
,
pContext
,
pContext
->
fd
,
pContext
->
ipstr
);
pContext
->
ipstr
);
}
else
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_READY
,
HTTP_CONTEXT_STATE_READY
))
{
}
else
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_READY
,
HTTP_CONTEXT_STATE_READY
))
{
httpDebug
(
"context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, reuse con
nect"
,
httpDebug
(
"context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, reuse con
text"
,
pContext
,
pContext
->
fd
,
pContext
,
pContext
->
fd
,
pContext
->
ipstr
);
pContext
->
ipstr
);
}
else
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_CLOSED
,
HTTP_CONTEXT_STATE_CLOSED
))
{
}
else
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_CLOSED
,
HTTP_CONTEXT_STATE_CLOSED
))
{
httpRemoveContextFromEpoll
(
pContext
);
httpRemoveContextFromEpoll
(
pContext
);
httpDebug
(
"context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, close connect"
,
httpDebug
(
"context:%p, fd:%d, ip:%s, last state:ready, keepAlive:true, close connect"
,
pContext
,
pContext
->
fd
,
pContext
,
pContext
->
fd
,
pContext
->
ipstr
);
pContext
->
ipstr
);
}
else
{
}
else
{
httpRemoveContextFromEpoll
(
pContext
);
httpRemoveContextFromEpoll
(
pContext
);
httpError
(
"context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:true, close connect"
,
httpError
(
"context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:true, close connect"
,
pContext
,
pContext
->
fd
,
pContext
,
pContext
->
fd
,
pContext
->
ipstr
,
httpContextStateStr
(
pContext
->
state
),
pContext
->
state
);
pContext
->
ipstr
,
httpContextStateStr
(
pContext
->
state
),
pContext
->
state
);
}
}
}
else
{
}
else
{
httpRemoveContextFromEpoll
(
pContext
);
httpRemoveContextFromEpoll
(
pContext
);
httpDebug
(
"context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:false, close con
nect"
,
httpDebug
(
"context:%p, fd:%d, ip:%s, last state:%s:%d, keepAlive:false, close con
text"
,
pContext
,
pContext
->
fd
,
pContext
,
pContext
->
fd
,
pContext
->
ipstr
,
httpContextStateStr
(
pContext
->
state
),
pContext
->
state
);
pContext
->
ipstr
,
httpContextStateStr
(
pContext
->
state
),
pContext
->
state
);
}
}
httpReleaseContext
(
pContext
);
httpReleaseContext
(
pContext
);
...
@@ -214,7 +215,7 @@ void httpCloseContextByServer(HttpContext *pContext) {
...
@@ -214,7 +215,7 @@ void httpCloseContextByServer(HttpContext *pContext) {
}
else
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_DROPPING
,
HTTP_CONTEXT_STATE_DROPPING
))
{
}
else
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_DROPPING
,
HTTP_CONTEXT_STATE_DROPPING
))
{
httpDebug
(
"context:%p, fd:%d, ip:%s, epoll already finished, wait app finished"
,
pContext
,
pContext
->
fd
,
pContext
->
ipstr
);
httpDebug
(
"context:%p, fd:%d, ip:%s, epoll already finished, wait app finished"
,
pContext
,
pContext
->
fd
,
pContext
->
ipstr
);
}
else
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_READY
,
HTTP_CONTEXT_STATE_CLOSED
))
{
}
else
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_READY
,
HTTP_CONTEXT_STATE_CLOSED
))
{
httpDebug
(
"context:%p, fd:%d, ip:%s, epoll finished, close con
tex
t"
,
pContext
,
pContext
->
fd
,
pContext
->
ipstr
);
httpDebug
(
"context:%p, fd:%d, ip:%s, epoll finished, close con
nec
t"
,
pContext
,
pContext
->
fd
,
pContext
->
ipstr
);
}
else
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_CLOSED
,
HTTP_CONTEXT_STATE_CLOSED
))
{
}
else
if
(
httpAlterContextState
(
pContext
,
HTTP_CONTEXT_STATE_CLOSED
,
HTTP_CONTEXT_STATE_CLOSED
))
{
httpDebug
(
"context:%p, fd:%d, ip:%s, epoll finished, will be closed soon"
,
pContext
,
pContext
->
fd
,
pContext
->
ipstr
);
httpDebug
(
"context:%p, fd:%d, ip:%s, epoll finished, will be closed soon"
,
pContext
,
pContext
->
fd
,
pContext
->
ipstr
);
}
else
{
}
else
{
...
...
src/plugins/http/src/httpHandle.c
浏览文件 @
867bc9cc
...
@@ -313,9 +313,9 @@ bool httpParseRequest(HttpContext* pContext) {
...
@@ -313,9 +313,9 @@ bool httpParseRequest(HttpContext* pContext) {
return
true
;
return
true
;
}
}
httpTraceL
(
"context:%p, fd:%d, ip:%s, thread:%s, numOf
Fds:%d, read size:%d, raw data:
\n
%s"
,
pContext
,
pContext
->
fd
,
httpTraceL
(
"context:%p, fd:%d, ip:%s, thread:%s, numOf
Contexts:%d, read size:%d, raw data:
\n
%s"
,
pContext
,
pContext
->
ipstr
,
pContext
->
pThread
->
label
,
pContext
->
pThread
->
numOfFds
,
pContext
->
parser
.
bufsize
,
pContext
->
fd
,
pContext
->
ipstr
,
pContext
->
pThread
->
label
,
pContext
->
pThread
->
numOfContexts
,
pContext
->
parser
.
buffer
);
pContext
->
parser
.
bufsize
,
pContext
->
parser
.
buffer
);
if
(
!
httpGetHttpMethod
(
pContext
))
{
if
(
!
httpGetHttpMethod
(
pContext
))
{
return
false
;
return
false
;
...
...
src/plugins/http/src/httpServer.c
浏览文件 @
867bc9cc
...
@@ -293,7 +293,7 @@ static void *httpAcceptHttpConnection(void *arg) {
...
@@ -293,7 +293,7 @@ static void *httpAcceptHttpConnection(void *arg) {
totalFds
=
1
;
totalFds
=
1
;
for
(
int
i
=
0
;
i
<
pServer
->
numOfThreads
;
++
i
)
{
for
(
int
i
=
0
;
i
<
pServer
->
numOfThreads
;
++
i
)
{
totalFds
+=
pServer
->
pThreads
[
i
].
numOf
Fd
s
;
totalFds
+=
pServer
->
pThreads
[
i
].
numOf
Context
s
;
}
}
if
(
totalFds
>
tsHttpCacheSessions
*
100
)
{
if
(
totalFds
>
tsHttpCacheSessions
*
100
)
{
...
@@ -332,9 +332,9 @@ static void *httpAcceptHttpConnection(void *arg) {
...
@@ -332,9 +332,9 @@ static void *httpAcceptHttpConnection(void *arg) {
}
}
// notify the data process, add into the FdObj list
// notify the data process, add into the FdObj list
atomic_add_fetch_32
(
&
pThread
->
numOf
Fd
s
,
1
);
atomic_add_fetch_32
(
&
pThread
->
numOf
Context
s
,
1
);
httpDebug
(
"context:%p, fd:%d, ip:%s, thread:%s numOf
Fds:%d totalFds:%d, accept a new connection"
,
pContext
,
connFd
,
httpDebug
(
"context:%p, fd:%d, ip:%s, thread:%s numOf
Contexts:%d totalFds:%d, accept a new connection"
,
pContext
,
pContext
->
ipstr
,
pThread
->
label
,
pThread
->
numOfFd
s
,
totalFds
);
connFd
,
pContext
->
ipstr
,
pThread
->
label
,
pThread
->
numOfContext
s
,
totalFds
);
// pick up next thread for next connection
// pick up next thread for next connection
threadId
++
;
threadId
++
;
...
...
src/query/inc/qExecutor.h
浏览文件 @
867bc9cc
...
@@ -172,6 +172,7 @@ typedef struct SQueryRuntimeEnv {
...
@@ -172,6 +172,7 @@ typedef struct SQueryRuntimeEnv {
bool
topBotQuery
;
// false
bool
topBotQuery
;
// false
bool
groupbyNormalCol
;
// denote if this is a groupby normal column query
bool
groupbyNormalCol
;
// denote if this is a groupby normal column query
bool
hasTagResults
;
// if there are tag values in final result or not
bool
hasTagResults
;
// if there are tag values in final result or not
int32_t
interBufSize
;
// intermediate buffer sizse
int32_t
prevGroupId
;
// previous executed group id
int32_t
prevGroupId
;
// previous executed group id
SDiskbasedResultBuf
*
pResultBuf
;
// query result buffer based on blocked-wised disk file
SDiskbasedResultBuf
*
pResultBuf
;
// query result buffer based on blocked-wised disk file
}
SQueryRuntimeEnv
;
}
SQueryRuntimeEnv
;
...
...
src/query/inc/qUtil.h
浏览文件 @
867bc9cc
...
@@ -15,6 +15,8 @@
...
@@ -15,6 +15,8 @@
#ifndef TDENGINE_QUERYUTIL_H
#ifndef TDENGINE_QUERYUTIL_H
#define TDENGINE_QUERYUTIL_H
#define TDENGINE_QUERYUTIL_H
int32_t
getOutputInterResultBufSize
(
SQuery
*
pQuery
);
void
clearTimeWindowResBuf
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SWindowResult
*
pOneOutputRes
);
void
clearTimeWindowResBuf
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SWindowResult
*
pOneOutputRes
);
void
copyTimeWindowResBuf
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SWindowResult
*
dst
,
const
SWindowResult
*
src
);
void
copyTimeWindowResBuf
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SWindowResult
*
dst
,
const
SWindowResult
*
src
);
...
@@ -35,7 +37,7 @@ SWindowResult *getWindowResult(SWindowResInfo *pWindowResInfo, int32_t slot);
...
@@ -35,7 +37,7 @@ SWindowResult *getWindowResult(SWindowResInfo *pWindowResInfo, int32_t slot);
#define curTimeWindow(_winres) ((_winres)->curIndex)
#define curTimeWindow(_winres) ((_winres)->curIndex)
bool
isWindowResClosed
(
SWindowResInfo
*
pWindowResInfo
,
int32_t
slot
);
bool
isWindowResClosed
(
SWindowResInfo
*
pWindowResInfo
,
int32_t
slot
);
void
createQueryResultInfo
(
SQuery
*
pQuery
,
SWindowResult
*
pResultRow
,
bool
isSTableQuery
,
SPosInfo
*
posInfo
);
void
createQueryResultInfo
(
SQuery
*
pQuery
,
SWindowResult
*
pResultRow
,
bool
isSTableQuery
,
SPosInfo
*
posInfo
,
size_t
interBufSize
);
char
*
getPosInResultPage
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
int32_t
columnIndex
,
SWindowResult
*
pResult
);
char
*
getPosInResultPage
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
int32_t
columnIndex
,
SWindowResult
*
pResult
);
...
...
src/query/inc/tsqlfunction.h
浏览文件 @
867bc9cc
...
@@ -272,7 +272,7 @@ bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *mi
...
@@ -272,7 +272,7 @@ bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *mi
bool
stableQueryFunctChanged
(
int32_t
funcId
);
bool
stableQueryFunctChanged
(
int32_t
funcId
);
void
resetResultInfo
(
SResultInfo
*
pResInfo
);
void
resetResultInfo
(
SResultInfo
*
pResInfo
);
void
setResultInfoBuf
(
SResultInfo
*
pResInfo
,
int32_t
size
,
bool
superTable
);
void
setResultInfoBuf
(
SResultInfo
*
pResInfo
,
int32_t
size
,
bool
superTable
,
char
*
buf
);
static
FORCE_INLINE
void
initResultInfo
(
SResultInfo
*
pResInfo
)
{
static
FORCE_INLINE
void
initResultInfo
(
SResultInfo
*
pResInfo
)
{
pResInfo
->
initialized
=
true
;
// the this struct has been initialized flag
pResInfo
->
initialized
=
true
;
// the this struct has been initialized flag
...
...
src/query/src/qExecutor.c
浏览文件 @
867bc9cc
此差异已折叠。
点击以展开。
src/query/src/qUtil.c
浏览文件 @
867bc9cc
...
@@ -17,15 +17,24 @@
...
@@ -17,15 +17,24 @@
#include "hash.h"
#include "hash.h"
#include "taosmsg.h"
#include "taosmsg.h"
#include "qextbuffer.h"
#include "ttime.h"
#include "qfill.h"
#include "ttime.h"
#include "ttime.h"
#include "qExecutor.h"
#include "qExecutor.h"
#include "qUtil.h"
#include "qUtil.h"
int32_t
getOutputInterResultBufSize
(
SQuery
*
pQuery
)
{
int32_t
size
=
0
;
for
(
int32_t
i
=
0
;
i
<
pQuery
->
numOfOutput
;
++
i
)
{
assert
(
pQuery
->
pSelectExpr
[
i
].
interBytes
<=
DEFAULT_INTERN_BUF_PAGE_SIZE
);
size
+=
pQuery
->
pSelectExpr
[
i
].
interBytes
;
}
assert
(
size
>
0
);
return
size
;
}
int32_t
initWindowResInfo
(
SWindowResInfo
*
pWindowResInfo
,
SQueryRuntimeEnv
*
pRuntimeEnv
,
int32_t
size
,
int32_t
initWindowResInfo
(
SWindowResInfo
*
pWindowResInfo
,
SQueryRuntimeEnv
*
pRuntimeEnv
,
int32_t
size
,
int32_t
threshold
,
int16_t
type
)
{
int32_t
threshold
,
int16_t
type
)
{
pWindowResInfo
->
capacity
=
size
;
pWindowResInfo
->
capacity
=
size
;
...
@@ -43,7 +52,7 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
...
@@ -43,7 +52,7 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
pWindowResInfo
->
pResult
=
calloc
(
threshold
,
sizeof
(
SWindowResult
));
pWindowResInfo
->
pResult
=
calloc
(
threshold
,
sizeof
(
SWindowResult
));
for
(
int32_t
i
=
0
;
i
<
pWindowResInfo
->
capacity
;
++
i
)
{
for
(
int32_t
i
=
0
;
i
<
pWindowResInfo
->
capacity
;
++
i
)
{
SPosInfo
posInfo
=
{
-
1
,
-
1
};
SPosInfo
posInfo
=
{
-
1
,
-
1
};
createQueryResultInfo
(
pRuntimeEnv
->
pQuery
,
&
pWindowResInfo
->
pResult
[
i
],
pRuntimeEnv
->
stableQuery
,
&
posInfo
);
createQueryResultInfo
(
pRuntimeEnv
->
pQuery
,
&
pWindowResInfo
->
pResult
[
i
],
pRuntimeEnv
->
stableQuery
,
&
posInfo
,
pRuntimeEnv
->
interBufSize
);
}
}
return
TSDB_CODE_SUCCESS
;
return
TSDB_CODE_SUCCESS
;
...
@@ -54,11 +63,7 @@ void destroyTimeWindowRes(SWindowResult *pWindowRes, int32_t nOutputCols) {
...
@@ -54,11 +63,7 @@ void destroyTimeWindowRes(SWindowResult *pWindowRes, int32_t nOutputCols) {
return
;
return
;
}
}
// TODO opt malloc strategy
free
(
pWindowRes
->
resultInfo
[
0
].
interResultBuf
);
for
(
int32_t
i
=
0
;
i
<
nOutputCols
;
++
i
)
{
free
(
pWindowRes
->
resultInfo
[
i
].
interResultBuf
);
}
free
(
pWindowRes
->
resultInfo
);
free
(
pWindowRes
->
resultInfo
);
}
}
...
@@ -241,10 +246,9 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow
...
@@ -241,10 +246,9 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow
}
}
pWindowRes
->
numOfRows
=
0
;
pWindowRes
->
numOfRows
=
0
;
// pWindowRes->nAlloc = 0;
pWindowRes
->
pos
=
(
SPosInfo
){
-
1
,
-
1
};
pWindowRes
->
pos
=
(
SPosInfo
){
-
1
,
-
1
};
pWindowRes
->
status
.
closed
=
false
;
pWindowRes
->
status
.
closed
=
false
;
pWindowRes
->
window
=
(
STimeWindow
){
0
,
0
}
;
pWindowRes
->
window
=
TSWINDOW_INITIALIZER
;
}
}
/**
/**
...
@@ -254,7 +258,6 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow
...
@@ -254,7 +258,6 @@ void clearTimeWindowResBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *pWindow
*/
*/
void
copyTimeWindowResBuf
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SWindowResult
*
dst
,
const
SWindowResult
*
src
)
{
void
copyTimeWindowResBuf
(
SQueryRuntimeEnv
*
pRuntimeEnv
,
SWindowResult
*
dst
,
const
SWindowResult
*
src
)
{
dst
->
numOfRows
=
src
->
numOfRows
;
dst
->
numOfRows
=
src
->
numOfRows
;
// dst->nAlloc = src->nAlloc;
dst
->
window
=
src
->
window
;
dst
->
window
=
src
->
window
;
dst
->
status
=
src
->
status
;
dst
->
status
=
src
->
status
;
...
...
src/rpc/src/rpcMain.c
浏览文件 @
867bc9cc
...
@@ -1366,6 +1366,7 @@ static int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen) {
...
@@ -1366,6 +1366,7 @@ static int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen) {
}
}
int32_t
compLen
=
LZ4_compress_default
(
pCont
,
buf
,
contLen
,
contLen
+
overhead
);
int32_t
compLen
=
LZ4_compress_default
(
pCont
,
buf
,
contLen
,
contLen
+
overhead
);
tDebug
(
"compress rpc msg, before:%d, after:%d, overhead:%d"
,
contLen
,
compLen
,
overhead
);
/*
/*
* only the compressed size is less than the value of contLen - overhead, the compression is applied
* only the compressed size is less than the value of contLen - overhead, the compression is applied
...
@@ -1378,7 +1379,7 @@ static int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen) {
...
@@ -1378,7 +1379,7 @@ static int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen) {
memcpy
(
pCont
+
overhead
,
buf
,
compLen
);
memcpy
(
pCont
+
overhead
,
buf
,
compLen
);
pHead
->
comp
=
1
;
pHead
->
comp
=
1
;
//
tDebug("compress rpc msg, before:%d, after:%d", contLen, compLen);
tDebug
(
"compress rpc msg, before:%d, after:%d"
,
contLen
,
compLen
);
finalLen
=
compLen
+
overhead
;
finalLen
=
compLen
+
overhead
;
}
else
{
}
else
{
finalLen
=
contLen
;
finalLen
=
contLen
;
...
...
src/wal/src/walMain.c
浏览文件 @
867bc9cc
...
@@ -28,6 +28,7 @@
...
@@ -28,6 +28,7 @@
#include "taoserror.h"
#include "taoserror.h"
#include "twal.h"
#include "twal.h"
#include "tqueue.h"
#include "tqueue.h"
#include "tfile.h"
#define walPrefix "wal"
#define walPrefix "wal"
...
@@ -180,7 +181,7 @@ int walWrite(void *handle, SWalHead *pHead) {
...
@@ -180,7 +181,7 @@ int walWrite(void *handle, SWalHead *pHead) {
taosCalcChecksumAppend
(
0
,
(
uint8_t
*
)
pHead
,
sizeof
(
SWalHead
));
taosCalcChecksumAppend
(
0
,
(
uint8_t
*
)
pHead
,
sizeof
(
SWalHead
));
int
contLen
=
pHead
->
len
+
sizeof
(
SWalHead
);
int
contLen
=
pHead
->
len
+
sizeof
(
SWalHead
);
if
(
write
(
pWal
->
fd
,
pHead
,
contLen
)
!=
contLen
)
{
if
(
t
write
(
pWal
->
fd
,
pHead
,
contLen
)
!=
contLen
)
{
wError
(
"wal:%s, failed to write(%s)"
,
pWal
->
name
,
strerror
(
errno
));
wError
(
"wal:%s, failed to write(%s)"
,
pWal
->
name
,
strerror
(
errno
));
terrno
=
TAOS_SYSTEM_ERROR
(
errno
);
terrno
=
TAOS_SYSTEM_ERROR
(
errno
);
}
else
{
}
else
{
...
@@ -325,7 +326,7 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
...
@@ -325,7 +326,7 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
wDebug
(
"wal:%s, start to restore"
,
name
);
wDebug
(
"wal:%s, start to restore"
,
name
);
while
(
1
)
{
while
(
1
)
{
int
ret
=
read
(
fd
,
pHead
,
sizeof
(
SWalHead
));
int
ret
=
t
read
(
fd
,
pHead
,
sizeof
(
SWalHead
));
if
(
ret
==
0
)
break
;
if
(
ret
==
0
)
break
;
if
(
ret
!=
sizeof
(
SWalHead
))
{
if
(
ret
!=
sizeof
(
SWalHead
))
{
...
@@ -340,7 +341,7 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
...
@@ -340,7 +341,7 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
break
;
break
;
}
}
ret
=
read
(
fd
,
pHead
->
cont
,
pHead
->
len
);
ret
=
t
read
(
fd
,
pHead
->
cont
,
pHead
->
len
);
if
(
ret
!=
pHead
->
len
)
{
if
(
ret
!=
pHead
->
len
)
{
wWarn
(
"wal:%s, failed to read body, skip, len:%d ret:%d"
,
name
,
pHead
->
len
,
ret
);
wWarn
(
"wal:%s, failed to read body, skip, len:%d ret:%d"
,
name
,
pHead
->
len
,
ret
);
terrno
=
TAOS_SYSTEM_ERROR
(
errno
);
terrno
=
TAOS_SYSTEM_ERROR
(
errno
);
...
...
tests/pytest/crash_gen.py
浏览文件 @
867bc9cc
...
@@ -606,6 +606,47 @@ class DbConnRest(DbConn):
...
@@ -606,6 +606,47 @@ class DbConnRest(DbConn):
print
(
self
.
_result
)
print
(
self
.
_result
)
raise
RuntimeError
(
"TBD"
)
raise
RuntimeError
(
"TBD"
)
# Duplicate code from TDMySQL, TODO: merge all this into DbConnNative
class
MyTDSql
:
def
__init__
(
self
):
self
.
queryRows
=
0
self
.
queryCols
=
0
self
.
affectedRows
=
0
def
init
(
self
,
cursor
,
log
=
True
):
self
.
cursor
=
cursor
# if (log):
# caller = inspect.getframeinfo(inspect.stack()[1][0])
# self.cursor.log(caller.filename + ".sql")
def
close
(
self
):
self
.
cursor
.
close
()
def
query
(
self
,
sql
):
self
.
sql
=
sql
try
:
self
.
cursor
.
execute
(
sql
)
self
.
queryResult
=
self
.
cursor
.
fetchall
()
self
.
queryRows
=
len
(
self
.
queryResult
)
self
.
queryCols
=
len
(
self
.
cursor
.
description
)
except
Exception
as
e
:
# caller = inspect.getframeinfo(inspect.stack()[1][0])
# args = (caller.filename, caller.lineno, sql, repr(e))
# tdLog.exit("%s(%d) failed: sql:%s, %s" % args)
raise
return
self
.
queryRows
def
execute
(
self
,
sql
):
self
.
sql
=
sql
try
:
self
.
affectedRows
=
self
.
cursor
.
execute
(
sql
)
except
Exception
as
e
:
# caller = inspect.getframeinfo(inspect.stack()[1][0])
# args = (caller.filename, caller.lineno, sql, repr(e))
# tdLog.exit("%s(%d) failed: sql:%s, %s" % args)
raise
return
self
.
affectedRows
class
DbConnNative
(
DbConn
):
class
DbConnNative
(
DbConn
):
def
__init__
(
self
):
def
__init__
(
self
):
super
().
__init__
()
super
().
__init__
()
...
@@ -623,7 +664,7 @@ class DbConnNative(DbConn):
...
@@ -623,7 +664,7 @@ class DbConnNative(DbConn):
# self._cursor.execute('use db') # do this at the beginning of every step
# self._cursor.execute('use db') # do this at the beginning of every step
# Open connection
# Open connection
self
.
_tdSql
=
TDSql
()
self
.
_tdSql
=
My
TDSql
()
self
.
_tdSql
.
init
(
self
.
_cursor
)
self
.
_tdSql
.
init
(
self
.
_cursor
)
def
close
(
self
):
def
close
(
self
):
...
@@ -1213,9 +1254,15 @@ class Task():
...
@@ -1213,9 +1254,15 @@ class Task():
self
.
_executeInternal
(
te
,
wt
)
# TODO: no return value?
self
.
_executeInternal
(
te
,
wt
)
# TODO: no return value?
except
taos
.
error
.
ProgrammingError
as
err
:
except
taos
.
error
.
ProgrammingError
as
err
:
errno2
=
err
.
errno
if
(
err
.
errno
>
0
)
else
0x80000000
+
err
.
errno
# correct error scheme
errno2
=
err
.
errno
if
(
err
.
errno
>
0
)
else
0x80000000
+
err
.
errno
# correct error scheme
if
(
errno2
in
[
if
(
gConfig
.
continue_on_exception
):
# user choose to continue
self
.
logDebug
(
"[=] Continue after TAOS exception: errno=0x{:X}, msg: {}, SQL: {}"
.
format
(
errno2
,
err
,
self
.
_lastSql
))
self
.
_err
=
err
elif
(
errno2
in
[
0x05
,
# TSDB_CODE_RPC_NOT_READY
0x05
,
# TSDB_CODE_RPC_NOT_READY
0x200
,
0x360
,
0x362
,
0x36A
,
0x36B
,
0x36D
,
0x381
,
0x380
,
0x383
,
0x503
,
0x200
,
0x360
,
0x362
,
0x36A
,
0x36B
,
0x36D
,
0x381
,
0x380
,
0x383
,
0x386
,
# DB is being dropped?!
0x503
,
0x510
,
# vnode not in ready state
0x510
,
# vnode not in ready state
0x600
,
0x600
,
1000
# REST catch-all error
1000
# REST catch-all error
...
@@ -2077,6 +2124,8 @@ def main():
...
@@ -2077,6 +2124,8 @@ def main():
help
=
'Maximum number of steps to run (default: 100)'
)
help
=
'Maximum number of steps to run (default: 100)'
)
parser
.
add_argument
(
'-t'
,
'--num-threads'
,
action
=
'store'
,
default
=
5
,
type
=
int
,
parser
.
add_argument
(
'-t'
,
'--num-threads'
,
action
=
'store'
,
default
=
5
,
type
=
int
,
help
=
'Number of threads to run (default: 10)'
)
help
=
'Number of threads to run (default: 10)'
)
parser
.
add_argument
(
'-x'
,
'--continue-on-exception'
,
action
=
'store_true'
,
help
=
'Continue execution after encountering unexpected/disallowed errors/exceptions (default: false)'
)
global
gConfig
global
gConfig
gConfig
=
parser
.
parse_args
()
gConfig
=
parser
.
parse_args
()
...
...
tests/pytest/fulltest.sh
浏览文件 @
867bc9cc
...
@@ -121,7 +121,7 @@ python3 ./test.py -f import_merge/importTORestart.py
...
@@ -121,7 +121,7 @@ python3 ./test.py -f import_merge/importTORestart.py
python3 ./test.py
-f
import_merge/importTPORestart.py
python3 ./test.py
-f
import_merge/importTPORestart.py
python3 ./test.py
-f
import_merge/importTRestart.py
python3 ./test.py
-f
import_merge/importTRestart.py
python3 ./test.py
-f
import_merge/importInsertThenImport.py
python3 ./test.py
-f
import_merge/importInsertThenImport.py
python3 ./test.py
-f
import_merge/importCSV.py
# user
# user
python3 ./test.py
-f
user/user_create.py
python3 ./test.py
-f
user/user_create.py
python3 ./test.py
-f
user/pass_len.py
python3 ./test.py
-f
user/pass_len.py
...
...
tests/pytest/import_merge/importCSV.py
0 → 100644
浏览文件 @
867bc9cc
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
sys
import
os
import
csv
import
random
from
util.log
import
*
from
util.cases
import
*
from
util.sql
import
*
from
util.dnodes
import
*
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
(),
logSql
)
self
.
csvfile
=
"/tmp/file.csv"
self
.
rows
=
10000
self
.
ntables
=
1
self
.
startTime
=
1520000010000
def
genRandomStr
(
self
,
maxLen
):
H
=
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
salt
=
''
if
maxLen
<=
1
:
maxLen
=
2
l
=
random
.
randint
(
1
,
maxLen
)
for
i
in
range
(
l
):
salt
+=
random
.
choice
(
H
)
return
salt
def
createCSVFile
(
self
):
f
=
open
(
self
.
csvfile
,
'w'
,
encoding
=
'utf-8'
)
csv_writer
=
csv
.
writer
(
f
,
quoting
=
csv
.
QUOTE_NONNUMERIC
)
for
i
in
range
(
self
.
rows
):
csv_writer
.
writerow
([
self
.
startTime
+
i
,
self
.
genRandomStr
(
5
),
self
.
genRandomStr
(
6
),
self
.
genRandomStr
(
7
),
self
.
genRandomStr
(
8
),
self
.
genRandomStr
(
9
),
self
.
genRandomStr
(
10
),
self
.
genRandomStr
(
11
),
self
.
genRandomStr
(
12
),
self
.
genRandomStr
(
13
),
self
.
genRandomStr
(
14
)])
f
.
close
()
def
destroyCSVFile
(
self
):
os
.
remove
(
self
.
csvfile
)
def
run
(
self
):
self
.
createCSVFile
()
tdDnodes
.
stop
(
1
)
tdDnodes
.
deploy
(
1
)
tdDnodes
.
start
(
1
)
tdSql
.
execute
(
'reset query cache'
)
tdSql
.
execute
(
'drop database if exists db'
)
tdSql
.
execute
(
'create database db'
)
tdSql
.
execute
(
'use db'
)
tdSql
.
execute
(
'''create table tbx (ts TIMESTAMP,
collect_area NCHAR(5),
device_id BINARY(6),
imsi BINARY(7),
imei BINARY(8),
mdn BINARY(9),
net_type BINARY(10),
mno NCHAR(11),
province NCHAR(12),
city NCHAR(13),
alarm BINARY(14))'''
)
tdSql
.
execute
(
"import into tbx file
\'
%s
\'
"
%
(
self
.
csvfile
))
tdSql
.
query
(
'select * from tbx'
)
tdSql
.
checkRows
(
self
.
rows
)
def
stop
(
self
):
self
.
destroyCSVFile
()
tdSql
.
close
()
tdLog
.
success
(
"%s successfully executed"
%
__file__
)
tdDnodes
.
stop
(
1
)
tdCases
.
addWindows
(
__file__
,
TDTestCase
())
tdCases
.
addLinux
(
__file__
,
TDTestCase
())
tests/pytest/regressiontest.sh
浏览文件 @
867bc9cc
...
@@ -121,7 +121,7 @@ python3 ./test.py -f import_merge/importTORestart.py
...
@@ -121,7 +121,7 @@ python3 ./test.py -f import_merge/importTORestart.py
python3 ./test.py
-f
import_merge/importTPORestart.py
python3 ./test.py
-f
import_merge/importTPORestart.py
python3 ./test.py
-f
import_merge/importTRestart.py
python3 ./test.py
-f
import_merge/importTRestart.py
python3 ./test.py
-f
import_merge/importInsertThenImport.py
python3 ./test.py
-f
import_merge/importInsertThenImport.py
python3 ./test.py
-f
import_merge/importCSV.py
# user
# user
python3 ./test.py
-f
user/user_create.py
python3 ./test.py
-f
user/user_create.py
python3 ./test.py
-f
user/pass_len.py
python3 ./test.py
-f
user/pass_len.py
...
...
tests/pytest/testCompress.py
0 → 100644
浏览文件 @
867bc9cc
此差异已折叠。
点击以展开。
tests/pytest/testNoCompress.py
0 → 100644
浏览文件 @
867bc9cc
此差异已折叠。
点击以展开。
tests/pytest/util/dnodes.py
浏览文件 @
867bc9cc
...
@@ -22,35 +22,59 @@ class TDSimClient:
...
@@ -22,35 +22,59 @@ class TDSimClient:
def
__init__
(
self
):
def
__init__
(
self
):
self
.
testCluster
=
False
self
.
testCluster
=
False
self
.
cfgDict
=
{
"numOfLogLines"
:
"100000000"
,
"numOfThreadsPerCore"
:
"2.0"
,
"locale"
:
"en_US.UTF-8"
,
"charset"
:
"UTF-8"
,
"asyncLog"
:
"0"
,
"anyIp"
:
"0"
,
"sdbDebugFlag"
:
"135"
,
"rpcDebugFlag"
:
"135"
,
"tmrDebugFlag"
:
"131"
,
"cDebugFlag"
:
"135"
,
"udebugFlag"
:
"135"
,
"jnidebugFlag"
:
"135"
,
"qdebugFlag"
:
"135"
,
}
def
init
(
self
,
path
):
def
init
(
self
,
path
):
self
.
__init__
()
self
.
__init__
()
self
.
path
=
path
self
.
path
=
path
def
getLogDir
(
self
):
self
.
logDir
=
"%s/sim/psim/log"
%
(
self
.
path
)
return
self
.
logDir
def
getCfgDir
(
self
):
def
getCfgDir
(
self
):
self
.
cfgDir
=
"%s/sim/psim/cfg"
%
(
self
.
path
)
return
self
.
cfgDir
return
self
.
cfgDir
def
setTestCluster
(
self
,
value
):
def
setTestCluster
(
self
,
value
):
self
.
testCluster
=
value
self
.
testCluster
=
value
def
addExtraCfg
(
self
,
option
,
value
):
self
.
cfgDict
.
update
({
option
:
value
})
def
cfg
(
self
,
option
,
value
):
def
cfg
(
self
,
option
,
value
):
cmd
=
"echo '%s %s' >> %s"
%
(
option
,
value
,
self
.
cfgPath
)
cmd
=
"echo '%s %s' >> %s"
%
(
option
,
value
,
self
.
cfgPath
)
if
os
.
system
(
cmd
)
!=
0
:
if
os
.
system
(
cmd
)
!=
0
:
tdLog
.
exit
(
cmd
)
tdLog
.
exit
(
cmd
)
def
deploy
(
self
):
def
deploy
(
self
):
self
.
logDir
=
"%s/sim/psim/log"
%
(
self
.
path
,
)
self
.
logDir
=
"%s/sim/psim/log"
%
(
self
.
path
)
self
.
cfgDir
=
"%s/sim/psim/cfg"
%
(
self
.
path
)
self
.
cfgDir
=
"%s/sim/psim/cfg"
%
(
self
.
path
)
self
.
cfgPath
=
"%s/sim/psim/cfg/taos.cfg"
%
(
self
.
path
)
self
.
cfgPath
=
"%s/sim/psim/cfg/taos.cfg"
%
(
self
.
path
)
cmd
=
"rm -rf "
+
self
.
logDir
cmd
=
"rm -rf "
+
self
.
logDir
if
os
.
system
(
cmd
)
!=
0
:
if
os
.
system
(
cmd
)
!=
0
:
tdLog
.
exit
(
cmd
)
tdLog
.
exit
(
cmd
)
cmd
=
"
rm -rf "
+
self
.
cf
gDir
cmd
=
"
mkdir -p "
+
self
.
lo
gDir
if
os
.
system
(
cmd
)
!=
0
:
if
os
.
system
(
cmd
)
!=
0
:
tdLog
.
exit
(
cmd
)
tdLog
.
exit
(
cmd
)
cmd
=
"
mkdir -p "
+
self
.
lo
gDir
cmd
=
"
rm -rf "
+
self
.
cf
gDir
if
os
.
system
(
cmd
)
!=
0
:
if
os
.
system
(
cmd
)
!=
0
:
tdLog
.
exit
(
cmd
)
tdLog
.
exit
(
cmd
)
...
@@ -66,19 +90,10 @@ class TDSimClient:
...
@@ -66,19 +90,10 @@ class TDSimClient:
self
.
cfg
(
"masterIp"
,
"192.168.0.1"
)
self
.
cfg
(
"masterIp"
,
"192.168.0.1"
)
self
.
cfg
(
"secondIp"
,
"192.168.0.2"
)
self
.
cfg
(
"secondIp"
,
"192.168.0.2"
)
self
.
cfg
(
"logDir"
,
self
.
logDir
)
self
.
cfg
(
"logDir"
,
self
.
logDir
)
self
.
cfg
(
"numOfLogLines"
,
"100000000"
)
self
.
cfg
(
"numOfThreadsPerCore"
,
"2.0"
)
for
key
,
value
in
self
.
cfgDict
.
items
():
self
.
cfg
(
"locale"
,
"en_US.UTF-8"
)
self
.
cfg
(
key
,
value
)
self
.
cfg
(
"charset"
,
"UTF-8"
)
self
.
cfg
(
"asyncLog"
,
"0"
)
self
.
cfg
(
"anyIp"
,
"0"
)
self
.
cfg
(
"sdbDebugFlag"
,
"135"
)
self
.
cfg
(
"rpcDebugFlag"
,
"135"
)
self
.
cfg
(
"tmrDebugFlag"
,
"131"
)
self
.
cfg
(
"cDebugFlag"
,
"135"
)
self
.
cfg
(
"udebugFlag"
,
"135"
)
self
.
cfg
(
"jnidebugFlag"
,
"135"
)
self
.
cfg
(
"qdebugFlag"
,
"135"
)
tdLog
.
debug
(
"psim is deployed and configured by %s"
%
(
self
.
cfgPath
))
tdLog
.
debug
(
"psim is deployed and configured by %s"
%
(
self
.
cfgPath
))
...
@@ -378,6 +393,9 @@ class TDDnodes:
...
@@ -378,6 +393,9 @@ class TDDnodes:
for
i
in
range
(
len
(
self
.
dnodes
)):
for
i
in
range
(
len
(
self
.
dnodes
)):
self
.
dnodes
[
i
].
init
(
self
.
path
)
self
.
dnodes
[
i
].
init
(
self
.
path
)
self
.
sim
=
TDSimClient
()
self
.
sim
.
init
(
self
.
path
)
def
setTestCluster
(
self
,
value
):
def
setTestCluster
(
self
,
value
):
self
.
testCluster
=
value
self
.
testCluster
=
value
...
@@ -385,8 +403,6 @@ class TDDnodes:
...
@@ -385,8 +403,6 @@ class TDDnodes:
self
.
valgrind
=
value
self
.
valgrind
=
value
def
deploy
(
self
,
index
):
def
deploy
(
self
,
index
):
self
.
sim
=
TDSimClient
()
self
.
sim
.
init
(
self
.
path
)
self
.
sim
.
setTestCluster
(
self
.
testCluster
)
self
.
sim
.
setTestCluster
(
self
.
testCluster
)
if
(
self
.
simDeployed
==
False
):
if
(
self
.
simDeployed
==
False
):
...
@@ -474,5 +490,11 @@ class TDDnodes:
...
@@ -474,5 +490,11 @@ class TDDnodes:
def
getSimCfgPath
(
self
):
def
getSimCfgPath
(
self
):
return
self
.
sim
.
getCfgDir
()
return
self
.
sim
.
getCfgDir
()
def
getSimLogPath
(
self
):
return
self
.
sim
.
getLogDir
()
def
addSimExtraCfg
(
self
,
option
,
value
):
self
.
sim
.
addExtraCfg
(
option
,
value
)
tdDnodes
=
TDDnodes
()
tdDnodes
=
TDDnodes
()
tests/pytest/util/sql.py
浏览文件 @
867bc9cc
...
@@ -71,7 +71,8 @@ class TDSql:
...
@@ -71,7 +71,8 @@ class TDSql:
except
Exception
as
e
:
except
Exception
as
e
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
repr
(
e
))
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
repr
(
e
))
tdLog
.
exit
(
"%s(%d) failed: sql:%s, %s"
%
args
)
tdLog
.
notice
(
"%s(%d) failed: sql:%s, %s"
%
args
)
raise
Exception
(
repr
(
e
))
return
self
.
queryRows
return
self
.
queryRows
def
waitedQuery
(
self
,
sql
,
expectRows
,
timeout
):
def
waitedQuery
(
self
,
sql
,
expectRows
,
timeout
):
...
@@ -89,7 +90,8 @@ class TDSql:
...
@@ -89,7 +90,8 @@ class TDSql:
except
Exception
as
e
:
except
Exception
as
e
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
repr
(
e
))
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
repr
(
e
))
tdLog
.
exit
(
"%s(%d) failed: sql:%s, %s"
%
args
)
tdLog
.
notice
(
"%s(%d) failed: sql:%s, %s"
%
args
)
raise
Exception
(
repr
(
e
))
return
(
self
.
queryRows
,
timeout
)
return
(
self
.
queryRows
,
timeout
)
def
checkRows
(
self
,
expectRows
):
def
checkRows
(
self
,
expectRows
):
...
@@ -158,7 +160,8 @@ class TDSql:
...
@@ -158,7 +160,8 @@ class TDSql:
except
Exception
as
e
:
except
Exception
as
e
:
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
caller
=
inspect
.
getframeinfo
(
inspect
.
stack
()[
1
][
0
])
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
repr
(
e
))
args
=
(
caller
.
filename
,
caller
.
lineno
,
sql
,
repr
(
e
))
tdLog
.
exit
(
"%s(%d) failed: sql:%s, %s"
%
args
)
tdLog
.
notice
(
"%s(%d) failed: sql:%s, %s"
%
args
)
raise
Exception
(
repr
(
e
))
return
self
.
affectedRows
return
self
.
affectedRows
def
checkAffectedRows
(
self
,
expectAffectedRows
):
def
checkAffectedRows
(
self
,
expectAffectedRows
):
...
...
tests/script/general/parser/fill_us.sim
浏览文件 @
867bc9cc
system sh/stop_dnodes.sh
system sh/stop_dnodes.sh
system sh/ip.sh -i 1 -s up
system sh/deploy.sh -n dnode1 -
m 192.168.0.1 -i 192.168.0.
1
system sh/deploy.sh -n dnode1 -
i
1
system sh/cfg.sh -n dnode1 -c
commitLog
-v 0
system sh/cfg.sh -n dnode1 -c
walLevel
-v 0
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode1 -s start
sleep 3000
sleep 3000
sql connect
sql connect
...
...
tests/script/general/parser/testSuite.sim
浏览文件 @
867bc9cc
sleep 2000
run general/parser/alter.sim
run general/parser/alter.sim
sleep 2000
sleep 2000
run general/parser/alter1.sim
run general/parser/alter1.sim
...
@@ -7,7 +8,6 @@ sleep 2000
...
@@ -7,7 +8,6 @@ sleep 2000
run general/parser/auto_create_tb.sim
run general/parser/auto_create_tb.sim
sleep 2000
sleep 2000
run general/parser/auto_create_tb_drop_tb.sim
run general/parser/auto_create_tb_drop_tb.sim
sleep 2000
sleep 2000
run general/parser/col_arithmetic_operation.sim
run general/parser/col_arithmetic_operation.sim
sleep 2000
sleep 2000
...
@@ -23,65 +23,61 @@ run general/parser/create_tb.sim
...
@@ -23,65 +23,61 @@ run general/parser/create_tb.sim
sleep 2000
sleep 2000
run general/parser/dbtbnameValidate.sim
run general/parser/dbtbnameValidate.sim
sleep 2000
sleep 2000
run general/parser/fill.sim
sleep 2000
run general/parser/fill_stb.sim
sleep 2000
#run general/parser/fill_us.sim #
sleep 2000
run general/parser/first_last.sim
sleep 2000
run general/parser/import_commit1.sim
run general/parser/import_commit1.sim
sleep 2000
sleep 2000
run general/parser/import_commit2.sim
run general/parser/import_commit2.sim
sleep 2000
sleep 2000
run general/parser/import_commit3.sim
run general/parser/import_commit3.sim
sleep 2000
sleep 2000
#run general/parser/import_file.sim
sleep 2000
run general/parser/insert_tb.sim
run general/parser/insert_tb.sim
sleep 2000
sleep 2000
run general/parser/
first_last
.sim
run general/parser/
tags_dynamically_specifiy
.sim
sleep 2000
sleep 2000
#run general/parser/import_file
.sim
run general/parser/interp
.sim
sleep 2000
sleep 2000
run general/parser/lastrow.sim
run general/parser/lastrow.sim
sleep 2000
sleep 2000
run general/parser/nchar.sim
run general/parser/limit.sim
sleep 2000
#run general/parser/null_char.sim
sleep 2000
sleep 2000
run general/parser/
single_row_in_tb
.sim
run general/parser/
limit1
.sim
sleep 2000
sleep 2000
run general/parser/
select_from_cache_disk
.sim
run general/parser/
limit1_tblocks100
.sim
sleep 2000
sleep 2000
run general/parser/
selectResNum
.sim
run general/parser/
limit2
.sim
sleep 2000
sleep 2000
run general/parser/mixed_blocks.sim
run general/parser/mixed_blocks.sim
sleep 2000
sleep 2000
run general/parser/
limit1
.sim
run general/parser/
nchar
.sim
sleep 2000
sleep 2000
run general/parser/
limit
.sim
run general/parser/
null_char
.sim
sleep 2000
sleep 2000
run general/parser/
limit1_tblocks100
.sim
run general/parser/
selectResNum
.sim
sleep 2000
sleep 2000
run general/parser/select_across_vnodes.sim
run general/parser/select_across_vnodes.sim
sleep 2000
sleep 2000
run general/parser/slimit1.sim
run general/parser/select_from_cache_disk.sim
sleep 2000
run general/parser/tbnameIn.sim
sleep 2000
run general/parser/projection_limit_offset.sim
sleep 2000
run general/parser/limit2.sim
sleep 2000
run general/parser/fill.sim
sleep 2000
sleep 2000
run general/parser/
fill_stb
.sim
run general/parser/
set_tag_vals
.sim
sleep 2000
sleep 2000
run general/parser/
where
.sim
run general/parser/
single_row_in_tb
.sim
sleep 2000
sleep 2000
run general/parser/slimit.sim
run general/parser/slimit.sim
sleep 2000
sleep 2000
run general/parser/select_with_tags.sim
run general/parser/slimit1.sim
sleep 2000
run general/parser/interp.sim
sleep 2000
sleep 2000
run general/parser/
tags_dynamically_specifiy
.sim
run general/parser/
slimit_alter_tags
.sim
sleep 2000
sleep 2000
run general/parser/groupby.sim
run general/parser/tbnameIn.sim
sleep 2000
run general/parser/set_tag_vals.sim
sleep 2000
sleep 2000
run general/parser/slimit_alter_tags.sim # persistent failed
run general/parser/slimit_alter_tags.sim # persistent failed
sleep 2000
sleep 2000
...
@@ -89,11 +85,19 @@ run general/parser/join.sim
...
@@ -89,11 +85,19 @@ run general/parser/join.sim
sleep 2000
sleep 2000
run general/parser/join_multivnode.sim
run general/parser/join_multivnode.sim
sleep 2000
sleep 2000
run general/parser/repeatAlter.sim
run general/parser/projection_limit_offset.sim
sleep 2000
run general/parser/select_with_tags.sim
sleep 2000
run general/parser/groupby.sim
sleep 2000
run general/parser/union.sim
sleep 2000
run general/parser/sliding.sim
sleep 2000
sleep 2000
run general/parser/
binary_escapeCharacter
.sim
run general/parser/
fill_us
.sim
sleep 2000
sleep 2000
run general/parser/
bug
.sim
run general/parser/
tags_filter
.sim
#sleep 2000
#sleep 2000
#run general/parser/repeatStream.sim
#run general/parser/repeatStream.sim
...
...
tests/script/jenkins/basic.txt
浏览文件 @
867bc9cc
...
@@ -117,8 +117,6 @@ cd ../../../debug; make
...
@@ -117,8 +117,6 @@ cd ../../../debug; make
./test.sh -f general/parser/import_commit3.sim
./test.sh -f general/parser/import_commit3.sim
./test.sh -f general/parser/insert_tb.sim
./test.sh -f general/parser/insert_tb.sim
./test.sh -f general/parser/first_last.sim
./test.sh -f general/parser/first_last.sim
# dyh is processing this script
#./test.sh -f general/parser/import_file.sim
./test.sh -f general/parser/lastrow.sim
./test.sh -f general/parser/lastrow.sim
./test.sh -f general/parser/nchar.sim
./test.sh -f general/parser/nchar.sim
./test.sh -f general/parser/null_char.sim
./test.sh -f general/parser/null_char.sim
...
@@ -145,7 +143,6 @@ cd ../../../debug; make
...
@@ -145,7 +143,6 @@ cd ../../../debug; make
./test.sh -f general/parser/groupby.sim
./test.sh -f general/parser/groupby.sim
./test.sh -f general/parser/set_tag_vals.sim
./test.sh -f general/parser/set_tag_vals.sim
#./test.sh -f general/parser/sliding.sim
#./test.sh -f general/parser/sliding.sim
./test.sh -f general/parser/tags_dynamically_specifiy.sim
./test.sh -f general/parser/tags_filter.sim
./test.sh -f general/parser/tags_filter.sim
./test.sh -f general/parser/slimit_alter_tags.sim
./test.sh -f general/parser/slimit_alter_tags.sim
./test.sh -f general/parser/join.sim
./test.sh -f general/parser/join.sim
...
...
tests/script/sh/deploy.sh
浏览文件 @
867bc9cc
...
@@ -125,7 +125,6 @@ echo "mqttDebugFlag 131" >> $TAOS_CFG
...
@@ -125,7 +125,6 @@ echo "mqttDebugFlag 131" >> $TAOS_CFG
echo
"qdebugFlag 135"
>>
$TAOS_CFG
echo
"qdebugFlag 135"
>>
$TAOS_CFG
echo
"rpcDebugFlag 135"
>>
$TAOS_CFG
echo
"rpcDebugFlag 135"
>>
$TAOS_CFG
echo
"tmrDebugFlag 131"
>>
$TAOS_CFG
echo
"tmrDebugFlag 131"
>>
$TAOS_CFG
echo
"cDebugFlag 135"
>>
$TAOS_CFG
echo
"udebugFlag 135"
>>
$TAOS_CFG
echo
"udebugFlag 135"
>>
$TAOS_CFG
echo
"sdebugFlag 135"
>>
$TAOS_CFG
echo
"sdebugFlag 135"
>>
$TAOS_CFG
echo
"wdebugFlag 135"
>>
$TAOS_CFG
echo
"wdebugFlag 135"
>>
$TAOS_CFG
...
...
tests/script/unique/cluster/client1_0.sim
浏览文件 @
867bc9cc
#system sh/stop_dnodes.sh
#system sh/deploy.sh -n dnode1 -i 1
#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 10000
#system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 256
#system sh/exec.sh -n dnode1 -s start
#sql connect
#$db = db1
#sql create database $db
#sql use $db
#$stb = stb1
#sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8))
$tblStart = 0
$tblStart = 0
$tblEnd = 1000
0
$tblEnd = 1000
$tsStart = 1325347200000 # 2012-01-01 00:00:00.000
$tsStart = 1325347200000 # 2012-01-01 00:00:00.000
###############################################################
###############################################################
...
@@ -10,7 +26,6 @@ $stb = stb1
...
@@ -10,7 +26,6 @@ $stb = stb1
sql use $db
sql use $db
######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8))
######sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8))
$tagPrex = ' . tag
$tagPrex = ' . tag
...
@@ -19,12 +34,15 @@ while $i < $tblEnd
...
@@ -19,12 +34,15 @@ while $i < $tblEnd
$tb = tb . $i
$tb = tb . $i
$tagBinary = $tagPrex . $i
$tagBinary = $tagPrex . $i
$tagBinary = $tagBinary . '
$tagBinary = $tagBinary . '
sql create table if not exists $tb using $stb tags ($i, $tagBinary)
# print create table if not exists $tb using $stb tags ( $i , $tagBinary )
sql create table if not exists $tb using $stb tags ( $i , $tagBinary )
$i = $i + 1
$i = $i + 1
endw
endw
print ====================== client1_0 create table end, start insert data ............
print ====================== client1_0 create table end, start insert data ............
sql select count(tbname) from $stb
print select count(tbname) from $stb
print data00 $data00
$rowsPerLoop = 100
$rowsPerLoop = 100
$ts = $tsStart
$ts = $tsStart
...
@@ -54,3 +72,4 @@ while $i < $tblEnd
...
@@ -54,3 +72,4 @@ while $i < $tblEnd
print ====================== client1_0 insert data complete once ............
print ====================== client1_0 insert data complete once ............
endi
endi
endw
endw
print ====================== client1_0 success and auto end =====================
\ No newline at end of file
tests/script/unique/cluster/cluster_main.sim
浏览文件 @
867bc9cc
...
@@ -3,41 +3,49 @@ system sh/deploy.sh -n dnode1 -i 1
...
@@ -3,41 +3,49 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/deploy.sh -n dnode4 -i 4
system sh/deploy.sh -n dnode5 -i 5
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode5 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode2 -c walLevel -v 1
system sh/cfg.sh -n dnode2 -c walLevel -v 1
system sh/cfg.sh -n dnode3 -c walLevel -v 1
system sh/cfg.sh -n dnode3 -c walLevel -v 1
system sh/cfg.sh -n dnode4 -c walLevel -v 1
system sh/cfg.sh -n dnode4 -c walLevel -v 1
system sh/cfg.sh -n dnode5 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode5 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 256
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 256
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 256
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 256
system sh/cfg.sh -n dnode5 -c numOfTotalVnodes -v 256
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
system sh/cfg.sh -n dnode3 -c alternativeRole -v 0
system sh/cfg.sh -n dnode3 -c alternativeRole -v 0
system sh/cfg.sh -n dnode4 -c alternativeRole -v 0
system sh/cfg.sh -n dnode4 -c alternativeRole -v 0
system sh/cfg.sh -n dnode5 -c alternativeRole -v 0
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 5000
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 5000
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 5000
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 5000
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 5000
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode5 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
system sh/exec_tarbitrator.sh -s start
...
@@ -64,407 +72,159 @@ $stb = stb1
...
@@ -64,407 +72,159 @@ $stb = stb1
sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8))
sql create table $stb (ts timestamp, c1 int) tags(t1 int, t2 binary(8))
print ============== step4: start 10 client1/ 10 client2/ 10 client3/ 10 client4/ 1 client5
print ============== step4: start 10 client1/ 10 client2/ 10 client3/ 10 client4/ 1 client5
#run_back unique/cluster/client_test.sim
run_back unique/cluster/client1_0.sim
run_back unique/cluster/client1_0.sim
run_back unique/cluster/client1_1.sim
#
run_back unique/cluster/client1_1.sim
run_back unique/cluster/client1_2.sim
#
run_back unique/cluster/client1_2.sim
run_back unique/cluster/client1_3.sim
#
run_back unique/cluster/client1_3.sim
run_back unique/cluster/client2_0.sim
#
run_back unique/cluster/client2_0.sim
run_back unique/cluster/client2_1.sim
#
run_back unique/cluster/client2_1.sim
run_back unique/cluster/client2_2.sim
#
run_back unique/cluster/client2_2.sim
run_back unique/cluster/client2_3.sim
#
run_back unique/cluster/client2_3.sim
run_back unique/cluster/client3.sim
#
run_back unique/cluster/client3.sim
run_back unique/cluster/client4.sim
#
run_back unique/cluster/client4.sim
sleep 20000
sleep 20000
wait_subsim_insert_complete_create_tables:
sql select count(tbname) from $stb
print select count(tbname) from $stb
print data00 $data00
if $data00 < 1000 then
sleep 3000
goto wait_subsim_insert_complete_create_tables
endi
wait_subsim_insert_data:
wait_subsim_insert_data:
print select count(*) from $stb
sql select count(*) from $stb
sql select count(*) from $stb
print data00 $data00
print data00 $data00
if $data00 < 1 then
if $data00 < 1
000
then
sleep 3000
sleep 3000
goto wait_subsim_insert_data
goto wait_subsim_insert_data
endi
endi
print wait for a while to let clients start insert data
print wait for a while to let clients start insert data
sleep 5000
sleep 5000
$loop_cnt = 0
$loop_cnt = 0
loop_cluster_do:
loop_cluster_do:
print **** **** **** START loop cluster do (loop_cnt: $loop_cnt )**** **** **** ****
print **** **** **** START loop cluster do (loop_cnt: $loop_cnt )**** **** **** ****
print ============== step5: start dnode4
and add into cluster, then wait dnode4
ready
print ============== step5: start dnode4
/dnode5 and add into cluster, then wait
ready
system sh/exec.sh -n dnode4 -s start
system sh/exec.sh -n dnode4 -s start
system sh/exec.sh -n dnode5 -s start
sql create dnode $hostname4
sql create dnode $hostname4
sql create dnode $hostname5
wait_dnode4_ready_0:
sleep 5000
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode4_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode4Status = $data4_4
elif $loop_cnt == 1 then
$dnode4Status = $data4_6
elif $loop_cnt == 2 then
$dnode4Status = $data4_8
else then
print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** ****
return
endi
if $dnode4Status != ready then
sleep 2000
goto wait_dnode4_ready_0
endi
print ============== step6: stop and drop dnode1, then remove data dir of dnode1
print ============== step6: stop and drop dnode1, then remove data dir of dnode1
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 5000
$cnt = 0
wait_dnode1_offline_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode1_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode1Status = $data4_1
elif $loop_cnt == 1 then
$dnode1Status = $data4_5
elif $loop_cnt == 2 then
$dnode1Status = $data4_7
elif $loop_cnt == 3 then
$dnode1Status = $data4_9
else then
print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** ****
return
endi
if $dnode1Status != offline then
sleep 2000
goto wait_dnode1_offline_0
endi
$cnt = 0
wait_mnode1_offline_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
print show mnodes
sql show mnodes
if $rows != 3 then
sleep 2000
goto wait_mnode1_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$mnode1Status = $data2_1
$mnode2Status = $data2_2
$mnode3Status = $data2_3
$mnode4Status = $data2_4
if $loop_cnt == 0 then
$mnode1Status = $data2_1
elif $loop_cnt == 1 then
$mnode1Status = $data2_5
elif $loop_cnt == 2 then
$mnode1Status = $data2_7
elif $loop_cnt == 3 then
$mnode1Status = $data2_9
else then
print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** ****
return
endi
if $mnode1Status != offline then
sleep 2000
goto wait_mnode1_offline_0
endi
sql drop dnode $hostname1
sql drop dnode $hostname1
sleep 5000
system rm -rf ../../../sim/dnode1/data
system rm -rf ../../../sim/dnode1/data
sleep 20000
$cnt = 0
wait_mnode4_slave_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
print show mnodes
sql show mnodes
sql show mnodes
if $rows != 3 then
print show mnodes
sleep 2000
print rows: $rows
goto wait_mnode4_slave_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$mnode1Status = $data2_1
print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
$mnode2Status = $data2_2
print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$mnode3Status = $data2_3
print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7
$mnode4Status = $data2_4
print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8
print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9
if $loop_cnt == 0 then
return -1
$mnode4Status = $data2_4
elif $loop_cnt == 1 then
$mnode4Status = $data2_6
elif $loop_cnt == 2 then
$mnode4Status = $data2_8
else then
print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** ****
return
endi
if $mnode4Status != slave then
sleep 2000
goto wait_mnode4_slave_0
endi
print ============== step7: stop dnode2
, waiting dnode4
print ============== step7: stop dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep 5000
$cnt = 0
wait_dnode2_offline_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $dnode2Status != offline then
sleep 2000
goto wait_dnode2_offline_0
endi
sleep 3000
print show mnodes
sql show mnodes
sql show mnodes
print show mnodes
print rows: $rows
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7
print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8
print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9
print ============== step8: restart dnode2, then wait sync end
print ============== step8: restart dnode2, then wait sync end
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode2 -s start
sleep 20000
$cnt = 0
wait_dnode2_ready_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready_0
endi
sleep 3000
print show mnodes
sql show mnodes
sql show mnodes
print show mnodes
print rows: $rows
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7
print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8
print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9
print ============== step9: stop dnode3, then wait sync end
print ============== step9: stop dnode3, then wait sync end
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
sleep 3000
sleep 20000
sql show mnodes
$cnt = 0
print show mnodes
wait_dnode3_offline_0:
print rows: $rows
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode3_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
$dnode2Status = $data4_2
print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode3Status = $data4_3
print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7
$dnode4Status = $data4_4
print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8
print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9
if $dnode3Status != offline then
sleep 2000
goto wait_dnode3_offline_0
endi
print ============== step10: restart dnode3, then wait sync end
print ============== step10: restart dnode3, then wait sync end
system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode3 -s start
sleep 3000
sleep 20000
sql show mnodes
$cnt = 0
print show mnodes
wait_dnode3_ready_0:
print rows: $rows
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode3_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
$dnode2Status = $data4_2
print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode3Status = $data4_3
print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7
$dnode4Status = $data4_4
print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8
print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9
if $dnode3Status != ready then
sleep 2000
goto wait_dnode3_ready_0
endi
print ============== step11: stop dnode4, then wait sync end
print ============== step11: stop dnode4, then wait sync end
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
sleep 3000
sleep 20000
$cnt = 0
wait_dnode4_offline_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode4_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode4Status = $data4_4
elif $loop_cnt == 1 then
$dnode4Status = $data4_6
elif $loop_cnt == 2 then
$dnode4Status = $data4_8
else then
print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** ****
return
endi
if $dnode4Status != offline then
sleep 2000
goto wait_dnode4_offline_0
endi
print ============== step12: restart dnode4, then wait sync end
print ============== step12: restart dnode4, then wait sync end
system sh/exec.sh -n dnode4 -s start
system sh/exec.sh -n dnode4 -s start
sleep 3000
sleep 20000
sql show mnodes
$cnt = 0
print show mnodes
wait_dnode4_ready_1:
print rows: $rows
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode4_ready_1
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
$dnode2Status = $data4_2
print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode3Status = $data4_3
print $data0_7 $data1_7 $data2_7 $data3_7 $data4_7
#$dnode4Status = $data4_4
print $data0_8 $data1_8 $data2_8 $data3_8 $data4_8
print $data0_9 $data1_9 $data2_9 $data3_9 $data4_9
if $loop_cnt == 0 then
$dnode4Status = $data4_4
elif $loop_cnt == 1 then
$dnode4Status = $data4_6
elif $loop_cnt == 2 then
$dnode4Status = $data4_8
else then
print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** ****
return
endi
if $dnode4Status != ready then
sleep 2000
goto wait_dnode4_ready_1
endi
print ============== step13: alter replica 2
print ============== step13: alter replica 2
sql alter database $db replica 2
sql alter database $db replica 2
...
@@ -476,50 +236,14 @@ if $data04 != 2 then
...
@@ -476,50 +236,14 @@ if $data04 != 2 then
return -1
return -1
endi
endi
print ============== step14: stop and drop dnode4
, then remove data dir of dnode4
print ============== step14: stop and drop dnode4
/dnode5, then remove data dir of dnode4/dnode5
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
sleep 3000
system sh/exec.sh -n dnode5 -s stop -x SIGINT
sleep 20000
$cnt = 0
wait_dnode4_offline_1:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode4_offline_1
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode4Status = $data4_4
elif $loop_cnt == 1 then
$dnode4Status = $data4_6
elif $loop_cnt == 2 then
$dnode4Status = $data4_8
else then
print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** ****
return
endi
if $dnode4Status != offline then
sleep 2000
goto wait_dnode4_offline_1
endi
sleep 3000
sql drop dnode $hostname4
sql drop dnode $hostname4
sql drop dnode $hostname5
system rm -rf ../../../sim/dnode4/data
system rm -rf ../../../sim/dnode4/data
system rm -rf ../../../sim/dnode5/data
print ============== step15: alter replica 1
print ============== step15: alter replica 1
sql alter database $db replica 1
sql alter database $db replica 1
...
@@ -530,7 +254,6 @@ if $data04 != 1 then
...
@@ -530,7 +254,6 @@ if $data04 != 1 then
return -1
return -1
endi
endi
print ============== step16: alter replica 2
print ============== step16: alter replica 2
sql alter database $db replica 2
sql alter database $db replica 2
sql show databases
sql show databases
...
@@ -546,42 +269,7 @@ system sh/cfg.sh -n dnode1 -c second -v $hostname3
...
@@ -546,42 +269,7 @@ system sh/cfg.sh -n dnode1 -c second -v $hostname3
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode1 -s start
sql create dnode $hostname1
sql create dnode $hostname1
sleep 20000
wait_dnode1_ready_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode1_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode1Status = $data4_5
elif $loop_cnt == 1 then
$dnode1Status = $data4_7
elif $loop_cnt == 2 then
$dnode1Status = $data4_9
else then
print **** **** **** END loop cluster do (loop_cnt: $loop_cnt )**** **** **** ****
return
endi
if $dnode1Status != ready then
sleep 2000
goto wait_dnode1_ready_0
endi
print ============== step18: alter replica 3
print ============== step18: alter replica 3
sql alter database $db replica 3
sql alter database $db replica 3
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录