Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
29949a96
T
TDengine
项目概览
taosdata
/
TDengine
大约 2 年 前同步成功
通知
1192
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
29949a96
编写于
7月 06, 2022
作者:
H
Haojun Liao
浏览文件
操作
浏览文件
下载
差异文件
other: merge 3.0.
上级
b890676a
44bdb511
变更
54
显示空白变更内容
内联
并排
Showing
54 changed file
with
2494 addition
and
2099 deletion
+2494
-2099
include/libs/index/index.h
include/libs/index/index.h
+1
-1
include/libs/qcom/query.h
include/libs/qcom/query.h
+15
-11
include/libs/scheduler/scheduler.h
include/libs/scheduler/scheduler.h
+10
-32
include/libs/sync/sync.h
include/libs/sync/sync.h
+1
-1
include/os/osMemory.h
include/os/osMemory.h
+1
-1
include/util/taoserror.h
include/util/taoserror.h
+1
-1
source/client/inc/clientInt.h
source/client/inc/clientInt.h
+1
-1
source/client/src/clientImpl.c
source/client/src/clientImpl.c
+39
-27
source/client/src/clientMain.c
source/client/src/clientMain.c
+6
-1
source/client/src/clientMsgHandler.c
source/client/src/clientMsgHandler.c
+1
-1
source/dnode/mgmt/node_mgmt/src/dmMgmt.c
source/dnode/mgmt/node_mgmt/src/dmMgmt.c
+2
-0
source/dnode/mnode/impl/src/mndDb.c
source/dnode/mnode/impl/src/mndDb.c
+39
-65
source/libs/executor/inc/executorimpl.h
source/libs/executor/inc/executorimpl.h
+1
-1
source/libs/executor/src/dataInserter.c
source/libs/executor/src/dataInserter.c
+254
-0
source/libs/executor/src/executil.c
source/libs/executor/src/executil.c
+1
-1
source/libs/function/src/builtins.c
source/libs/function/src/builtins.c
+1
-1
source/libs/function/src/builtinsimpl.c
source/libs/function/src/builtinsimpl.c
+96
-7
source/libs/index/src/index.c
source/libs/index/src/index.c
+2
-1
source/libs/qcom/src/queryUtil.c
source/libs/qcom/src/queryUtil.c
+8
-8
source/libs/qworker/inc/qwInt.h
source/libs/qworker/inc/qwInt.h
+2
-2
source/libs/qworker/src/qwDbg.c
source/libs/qworker/src/qwDbg.c
+18
-18
source/libs/qworker/src/qworker.c
source/libs/qworker/src/qworker.c
+9
-9
source/libs/scheduler/inc/schInt.h
source/libs/scheduler/inc/schInt.h
+62
-22
source/libs/scheduler/src/schDbg.c
source/libs/scheduler/src/schDbg.c
+3
-3
source/libs/scheduler/src/schFlowCtrl.c
source/libs/scheduler/src/schFlowCtrl.c
+1
-1
source/libs/scheduler/src/schJob.c
source/libs/scheduler/src/schJob.c
+301
-1163
source/libs/scheduler/src/schRemote.c
source/libs/scheduler/src/schRemote.c
+60
-89
source/libs/scheduler/src/schStatus.c
source/libs/scheduler/src/schStatus.c
+94
-0
source/libs/scheduler/src/schTask.c
source/libs/scheduler/src/schTask.c
+830
-0
source/libs/scheduler/src/schUtil.c
source/libs/scheduler/src/schUtil.c
+34
-1
source/libs/scheduler/src/scheduler.c
source/libs/scheduler/src/scheduler.c
+26
-128
source/libs/scheduler/test/schedulerTests.cpp
source/libs/scheduler/test/schedulerTests.cpp
+33
-22
source/libs/sync/inc/syncRaftCfg.h
source/libs/sync/inc/syncRaftCfg.h
+7
-7
source/libs/sync/src/syncElection.c
source/libs/sync/src/syncElection.c
+13
-5
source/libs/sync/src/syncMain.c
source/libs/sync/src/syncMain.c
+20
-10
source/libs/sync/src/syncRaftCfg.c
source/libs/sync/src/syncRaftCfg.c
+8
-8
source/libs/sync/src/syncReplication.c
source/libs/sync/src/syncReplication.c
+24
-18
source/libs/sync/test/syncRaftCfgTest.cpp
source/libs/sync/test/syncRaftCfgTest.cpp
+2
-2
source/os/src/osFile.c
source/os/src/osFile.c
+39
-5
source/os/src/osMemory.c
source/os/src/osMemory.c
+3
-3
source/util/src/terror.c
source/util/src/terror.c
+1
-0
tests/pytest/util/gettime.py
tests/pytest/util/gettime.py
+62
-0
tests/script/jenkins/basic.txt
tests/script/jenkins/basic.txt
+2
-1
tests/script/sh/checkValgrind.sh
tests/script/sh/checkValgrind.sh
+19
-5
tests/script/tsim/valgrind/basic1.sim
tests/script/tsim/valgrind/basic1.sim
+14
-4
tests/script/tsim/valgrind/basic2.sim
tests/script/tsim/valgrind/basic2.sim
+2
-2
tests/script/tsim/valgrind/checkError.sim
tests/script/tsim/valgrind/checkError.sim
+0
-91
tests/script/tsim/valgrind/checkError1.sim
tests/script/tsim/valgrind/checkError1.sim
+50
-0
tests/script/tsim/valgrind/checkError2.sim
tests/script/tsim/valgrind/checkError2.sim
+41
-0
tests/system-test/1-insert/alter_stable.py
tests/system-test/1-insert/alter_stable.py
+5
-5
tests/system-test/2-query/Timediff.py
tests/system-test/2-query/Timediff.py
+158
-189
tests/system-test/2-query/distribute_agg_stddev.py
tests/system-test/2-query/distribute_agg_stddev.py
+26
-26
tests/system-test/2-query/timetruncate.py
tests/system-test/2-query/timetruncate.py
+44
-98
tests/system-test/simpletest.bat
tests/system-test/simpletest.bat
+1
-1
未找到文件。
include/libs/index/index.h
浏览文件 @
29949a96
...
...
@@ -208,7 +208,7 @@ int32_t doFilterTag(const SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* re
* destory index env
*
*/
void
indexClean
U
p
();
void
indexClean
u
p
();
#ifdef __cplusplus
}
...
...
include/libs/qcom/query.h
浏览文件 @
29949a96
...
...
@@ -29,12 +29,13 @@ extern "C" {
typedef
enum
{
JOB_TASK_STATUS_NULL
=
0
,
JOB_TASK_STATUS_NOT_START
=
1
,
JOB_TASK_STATUS_EXECUTING
,
JOB_TASK_STATUS_PARTIAL_SUCCEED
,
JOB_TASK_STATUS_SUCCEED
,
JOB_TASK_STATUS_FAILED
,
JOB_TASK_STATUS_DROPPING
,
JOB_TASK_STATUS_INIT
,
JOB_TASK_STATUS_EXEC
,
JOB_TASK_STATUS_PART_SUCC
,
JOB_TASK_STATUS_SUCC
,
JOB_TASK_STATUS_FAIL
,
JOB_TASK_STATUS_DROP
,
JOB_TASK_STATUS_MAX
,
}
EJobTaskType
;
typedef
enum
{
...
...
@@ -59,10 +60,6 @@ typedef struct STableComInfo {
int32_t
rowSize
;
// row size of the schema
}
STableComInfo
;
typedef
struct
SQueryExecRes
{
int32_t
msgType
;
void
*
res
;
}
SQueryExecRes
;
typedef
struct
SIndexMeta
{
#if defined(WINDOWS) || defined(_TD_DARWIN_64)
...
...
@@ -71,6 +68,13 @@ typedef struct SIndexMeta {
}
SIndexMeta
;
typedef
struct
SExecResult
{
int32_t
code
;
uint64_t
numOfRows
;
int32_t
msgType
;
void
*
res
;
}
SExecResult
;
typedef
struct
STbVerInfo
{
char
tbFName
[
TSDB_TABLE_FNAME_LEN
];
int32_t
sversion
;
...
...
@@ -210,7 +214,7 @@ char* jobTaskStatusStr(int32_t status);
SSchema
createSchema
(
int8_t
type
,
int32_t
bytes
,
col_id_t
colId
,
const
char
*
name
);
void
destroyQueryExecRes
(
S
QueryExecRes
*
pRes
);
void
destroyQueryExecRes
(
S
ExecResult
*
pRes
);
int32_t
dataConverToStr
(
char
*
str
,
int
type
,
void
*
buf
,
int32_t
bufSize
,
int32_t
*
len
);
char
*
parseTagDatatoJson
(
void
*
p
);
int32_t
cloneTableMeta
(
STableMeta
*
pSrc
,
STableMeta
**
pDst
);
...
...
include/libs/scheduler/scheduler.h
浏览文件 @
29949a96
...
...
@@ -53,12 +53,6 @@ typedef struct SQueryProfileSummary {
uint64_t
resultSize
;
// generated result size in Kb.
}
SQueryProfileSummary
;
typedef
struct
SQueryResult
{
int32_t
code
;
uint64_t
numOfRows
;
SQueryExecRes
res
;
}
SQueryResult
;
typedef
struct
STaskInfo
{
SQueryNodeAddr
addr
;
SSubQueryMsg
*
msg
;
...
...
@@ -69,50 +63,34 @@ typedef struct SSchdFetchParam {
int32_t
*
code
;
}
SSchdFetchParam
;
typedef
void
(
*
schedulerExecFp
)(
S
Query
Result
*
pResult
,
void
*
param
,
int32_t
code
);
typedef
void
(
*
schedulerExecFp
)(
S
Exec
Result
*
pResult
,
void
*
param
,
int32_t
code
);
typedef
void
(
*
schedulerFetchFp
)(
void
*
pResult
,
void
*
param
,
int32_t
code
);
typedef
bool
(
*
schedulerChkKillFp
)(
void
*
param
);
typedef
struct
SSchedulerReq
{
bool
syncReq
;
SRequestConnInfo
*
pConn
;
SArray
*
pNodeList
;
SQueryPlan
*
pDag
;
const
char
*
sql
;
int64_t
startTs
;
schedulerExecFp
execFp
;
void
*
execParam
;
schedulerFetchFp
fetchFp
;
void
*
cbParam
;
schedulerChkKillFp
chkKillFp
;
void
*
chkKillParam
;
SExecResult
*
pExecRes
;
void
**
pFetchRes
;
}
SSchedulerReq
;
int32_t
schedulerInit
(
SSchedulerCfg
*
cfg
);
/**
* Process the query job, generated according to the query physical plan.
* This is a synchronized API, and is also thread-safety.
* @param nodeList Qnode/Vnode address list, element is SQueryNodeAddr
* @return
*/
int32_t
schedulerExecJob
(
SSchedulerReq
*
pReq
,
int64_t
*
pJob
,
SQueryResult
*
pRes
);
/**
* Process the query job, generated according to the query physical plan.
* This is a asynchronized API, and is also thread-safety.
* @param pNodeList Qnode/Vnode address list, element is SQueryNodeAddr
* @return
*/
int32_t
schedulerAsyncExecJob
(
SSchedulerReq
*
pReq
,
int64_t
*
pJob
);
int32_t
schedulerExecJob
(
SSchedulerReq
*
pReq
,
int64_t
*
pJob
);
/**
* Fetch query result from the remote query executor
* @param pJob
* @param data
* @return
*/
int32_t
schedulerFetchRows
(
int64_t
job
,
void
**
data
);
int32_t
schedulerFetchRows
(
int64_t
jobId
,
SSchedulerReq
*
pReq
);
void
scheduler
AsyncFetchRows
(
int64_t
job
,
schedulerFetchFp
fp
,
void
*
param
);
void
scheduler
FetchRowsA
(
int64_t
job
,
schedulerFetchFp
fp
,
void
*
param
);
int32_t
schedulerGetTasksStatus
(
int64_t
job
,
SArray
*
pSub
);
...
...
@@ -134,7 +112,7 @@ void schedulerFreeJob(int64_t* job, int32_t errCode);
void
schedulerDestroy
(
void
);
void
schdExecCallback
(
S
Query
Result
*
pResult
,
void
*
param
,
int32_t
code
);
void
schdExecCallback
(
S
Exec
Result
*
pResult
,
void
*
param
,
int32_t
code
);
#ifdef __cplusplus
}
...
...
include/libs/sync/sync.h
浏览文件 @
29949a96
...
...
@@ -26,7 +26,7 @@ extern "C" {
extern
bool
gRaftDetailLog
;
#define SYNC_MAX_BATCH_SIZE
1
00
#define SYNC_MAX_BATCH_SIZE
5
00
#define SYNC_INDEX_BEGIN 0
#define SYNC_INDEX_INVALID -1
#define SYNC_TERM_INVALID 0xFFFFFFFFFFFFFFFF
...
...
include/os/osMemory.h
浏览文件 @
29949a96
...
...
@@ -32,7 +32,7 @@ extern "C" {
void
*
taosMemoryMalloc
(
int32_t
size
);
void
*
taosMemoryCalloc
(
int32_t
num
,
int32_t
size
);
void
*
taosMemoryRealloc
(
void
*
ptr
,
int32_t
size
);
void
*
taosMemoryStrDup
(
void
*
ptr
);
void
*
taosMemoryStrDup
(
const
char
*
ptr
);
void
taosMemoryFree
(
void
*
ptr
);
int32_t
taosMemorySize
(
void
*
ptr
);
void
taosPrintBackTrace
();
...
...
include/util/taoserror.h
浏览文件 @
29949a96
...
...
@@ -390,10 +390,10 @@ int32_t* taosGetErrno();
#define TSDB_CODE_QRY_TASK_MSG_ERROR TAOS_DEF_ERROR_CODE(0, 0x0719)
#define TSDB_CODE_QRY_JOB_FREED TAOS_DEF_ERROR_CODE(0, 0x071A)
#define TSDB_CODE_QRY_TASK_STATUS_ERROR TAOS_DEF_ERROR_CODE(0, 0x071B)
//json
#define TSDB_CODE_QRY_JSON_IN_ERROR TAOS_DEF_ERROR_CODE(0, 0x071C)
#define TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR TAOS_DEF_ERROR_CODE(0, 0x071D)
#define TSDB_CODE_QRY_JSON_IN_GROUP_ERROR TAOS_DEF_ERROR_CODE(0, 0x071E)
#define TSDB_CODE_QRY_JOB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x071F)
// grant
#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800)
...
...
source/client/inc/clientInt.h
浏览文件 @
29949a96
...
...
@@ -156,7 +156,7 @@ typedef struct SResultColumn {
}
SResultColumn
;
typedef
struct
SReqResultInfo
{
S
QueryExecRes
execRes
;
S
ExecResult
execRes
;
const
char
*
pRspMsg
;
const
char
*
pData
;
TAOS_FIELD
*
fields
;
// todo, column names are not needed.
...
...
source/client/src/clientImpl.c
浏览文件 @
29949a96
...
...
@@ -627,22 +627,26 @@ _return:
int32_t
scheduleQuery
(
SRequestObj
*
pRequest
,
SQueryPlan
*
pDag
,
SArray
*
pNodeList
)
{
void
*
pTransporter
=
pRequest
->
pTscObj
->
pAppInfo
->
pTransporter
;
S
Query
Result
res
=
{
0
};
S
Exec
Result
res
=
{
0
};
SRequestConnInfo
conn
=
{.
pTrans
=
pRequest
->
pTscObj
->
pAppInfo
->
pTransporter
,
.
requestId
=
pRequest
->
requestId
,
.
requestObjRefId
=
pRequest
->
self
};
SSchedulerReq
req
=
{.
pConn
=
&
conn
,
SSchedulerReq
req
=
{
.
syncReq
=
true
,
.
pConn
=
&
conn
,
.
pNodeList
=
pNodeList
,
.
pDag
=
pDag
,
.
sql
=
pRequest
->
sqlstr
,
.
startTs
=
pRequest
->
metric
.
start
,
.
execFp
=
NULL
,
.
exec
Param
=
NULL
,
.
cb
Param
=
NULL
,
.
chkKillFp
=
chkRequestKilled
,
.
chkKillParam
=
(
void
*
)
pRequest
->
self
};
.
chkKillParam
=
(
void
*
)
pRequest
->
self
,
.
pExecRes
=
&
res
,
};
int32_t
code
=
schedulerExecJob
(
&
req
,
&
pRequest
->
body
.
queryJob
,
&
res
);
pRequest
->
body
.
resInfo
.
execRes
=
res
.
res
;
int32_t
code
=
schedulerExecJob
(
&
req
,
&
pRequest
->
body
.
queryJob
);
memcpy
(
&
pRequest
->
body
.
resInfo
.
execRes
,
&
res
,
sizeof
(
res
))
;
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
schedulerFreeJob
(
&
pRequest
->
body
.
queryJob
,
0
);
...
...
@@ -753,7 +757,7 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
}
SEpSet
epset
=
getEpSet_s
(
&
pAppInfo
->
mgmtEp
);
S
QueryExecRes
*
pRes
=
&
pRequest
->
body
.
resInfo
.
execRes
;
S
ExecResult
*
pRes
=
&
pRequest
->
body
.
resInfo
.
execRes
;
switch
(
pRes
->
msgType
)
{
case
TDMT_VND_ALTER_TABLE
:
...
...
@@ -779,10 +783,10 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
return
code
;
}
void
schedulerExecCb
(
S
Query
Result
*
pResult
,
void
*
param
,
int32_t
code
)
{
void
schedulerExecCb
(
S
Exec
Result
*
pResult
,
void
*
param
,
int32_t
code
)
{
SRequestObj
*
pRequest
=
(
SRequestObj
*
)
param
;
pRequest
->
code
=
code
;
pRequest
->
body
.
resInfo
.
execRes
=
pResult
->
res
;
memcpy
(
&
pRequest
->
body
.
resInfo
.
execRes
,
pResult
,
sizeof
(
*
pResult
))
;
if
(
TDMT_VND_SUBMIT
==
pRequest
->
type
||
TDMT_VND_DELETE
==
pRequest
->
type
||
TDMT_VND_CREATE_TABLE
==
pRequest
->
type
)
{
...
...
@@ -939,16 +943,20 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
SRequestConnInfo
conn
=
{
.
pTrans
=
pAppInfo
->
pTransporter
,
.
requestId
=
pRequest
->
requestId
,
.
requestObjRefId
=
pRequest
->
self
};
SSchedulerReq
req
=
{.
pConn
=
&
conn
,
SSchedulerReq
req
=
{
.
syncReq
=
false
,
.
pConn
=
&
conn
,
.
pNodeList
=
pNodeList
,
.
pDag
=
pDag
,
.
sql
=
pRequest
->
sqlstr
,
.
startTs
=
pRequest
->
metric
.
start
,
.
execFp
=
schedulerExecCb
,
.
exec
Param
=
pRequest
,
.
cb
Param
=
pRequest
,
.
chkKillFp
=
chkRequestKilled
,
.
chkKillParam
=
(
void
*
)
pRequest
->
self
};
code
=
schedulerAsyncExecJob
(
&
req
,
&
pRequest
->
body
.
queryJob
);
.
chkKillParam
=
(
void
*
)
pRequest
->
self
,
.
pExecRes
=
NULL
,
};
code
=
schedulerExecJob
(
&
req
,
&
pRequest
->
body
.
queryJob
);
taosArrayDestroy
(
pNodeList
);
}
else
{
tscDebug
(
"0x%"
PRIx64
" plan not executed, code:%s 0x%"
PRIx64
,
pRequest
->
self
,
tstrerror
(
code
),
...
...
@@ -1387,7 +1395,11 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4)
}
SReqResultInfo
*
pResInfo
=
&
pRequest
->
body
.
resInfo
;
pRequest
->
code
=
schedulerFetchRows
(
pRequest
->
body
.
queryJob
,
(
void
**
)
&
pResInfo
->
pData
);
SSchedulerReq
req
=
{
.
syncReq
=
true
,
.
pFetchRes
=
(
void
**
)
&
pResInfo
->
pData
,
};
pRequest
->
code
=
schedulerFetchRows
(
pRequest
->
body
.
queryJob
,
&
req
);
if
(
pRequest
->
code
!=
TSDB_CODE_SUCCESS
)
{
pResultInfo
->
numOfRows
=
0
;
return
NULL
;
...
...
source/client/src/clientMain.c
浏览文件 @
29949a96
...
...
@@ -858,7 +858,12 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
}
}
schedulerAsyncFetchRows
(
pRequest
->
body
.
queryJob
,
fetchCallback
,
pRequest
);
SSchedulerReq
req
=
{
.
syncReq
=
false
,
.
fetchFp
=
fetchCallback
,
.
cbParam
=
pRequest
,
};
schedulerFetchRows
(
pRequest
->
body
.
queryJob
,
&
req
);
}
void
taos_fetch_raw_block_a
(
TAOS_RES
*
res
,
__taos_async_fn_t
fp
,
void
*
param
)
{
...
...
source/client/src/clientMsgHandler.c
浏览文件 @
29949a96
...
...
@@ -266,7 +266,7 @@ int32_t processAlterStbRsp(void* param, SDataBuf* pMsg, int32_t code) {
}
if
(
pRequest
->
body
.
queryFp
!=
NULL
)
{
S
QueryExecRes
*
pRes
=
&
pRequest
->
body
.
resInfo
.
execRes
;
S
ExecResult
*
pRes
=
&
pRequest
->
body
.
resInfo
.
execRes
;
if
(
code
==
TSDB_CODE_SUCCESS
)
{
SCatalog
*
pCatalog
=
NULL
;
...
...
source/dnode/mgmt/node_mgmt/src/dmMgmt.c
浏览文件 @
29949a96
...
...
@@ -16,6 +16,7 @@
#define _DEFAULT_SOURCE
#include "dmMgmt.h"
#include "dmNodes.h"
#include "index.h"
#include "qworker.h"
static
bool
dmRequireNode
(
SDnode
*
pDnode
,
SMgmtWrapper
*
pWrapper
)
{
...
...
@@ -213,6 +214,7 @@ void dmCleanupDnode(SDnode *pDnode) {
dmCleanupServer
(
pDnode
);
dmClearVars
(
pDnode
);
rpcCleanup
();
indexCleanup
();
dDebug
(
"dnode is closed, ptr:%p"
,
pDnode
);
}
...
...
source/dnode/mnode/impl/src/mndDb.c
浏览文件 @
29949a96
...
...
@@ -1373,9 +1373,9 @@ char *buildRetension(SArray *pRetension) {
static
void
dumpDbInfoData
(
SSDataBlock
*
pBlock
,
SDbObj
*
pDb
,
SShowObj
*
pShow
,
int32_t
rows
,
int64_t
numOfTables
,
bool
sysDb
,
ESdbStatus
objStatus
,
bool
sysinfo
)
{
int32_t
cols
=
0
;
int32_t
bytes
=
pShow
->
pMeta
->
pSchemas
[
cols
].
bytes
;
char
*
buf
=
taosMemoryMalloc
(
bytes
);
const
char
*
name
=
mndGetDbStr
(
pDb
->
name
);
if
(
name
!=
NULL
)
{
STR_WITH_MAXSIZE_TO_VARSTR
(
buf
,
name
,
bytes
);
...
...
@@ -1383,11 +1383,11 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
STR_WITH_MAXSIZE_TO_VARSTR
(
buf
,
"NULL"
,
bytes
);
}
char
*
status
=
"ready"
;
if
(
objStatus
==
SDB_STATUS_CREATING
)
status
=
"creating"
;
if
(
objStatus
==
SDB_STATUS_DROPPING
)
status
=
"dropping"
;
char
status
B
[
24
]
=
{
0
};
STR_WITH_SIZE_TO_VARSTR
(
status
B
,
status
,
strlen
(
status
));
char
*
status
Str
=
"ready"
;
if
(
objStatus
==
SDB_STATUS_CREATING
)
status
Str
=
"creating"
;
if
(
objStatus
==
SDB_STATUS_DROPPING
)
status
Str
=
"dropping"
;
char
status
Vstr
[
24
]
=
{
0
};
STR_WITH_SIZE_TO_VARSTR
(
status
Vstr
,
statusStr
,
strlen
(
statusStr
));
if
(
sysDb
||
!
sysinfo
)
{
for
(
int32_t
i
=
0
;
i
<
pShow
->
numOfColumns
;
++
i
)
{
...
...
@@ -1397,7 +1397,7 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
}
else
if
(
i
==
3
)
{
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
&
numOfTables
,
false
);
}
else
if
(
i
==
20
)
{
colDataAppend
(
pColInfo
,
rows
,
status
B
,
false
);
colDataAppend
(
pColInfo
,
rows
,
status
Vstr
,
false
);
}
else
{
colDataAppendNULL
(
pColInfo
,
rows
);
}
...
...
@@ -1405,7 +1405,6 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
}
else
{
SColumnInfoData
*
pColInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
cols
++
);
colDataAppend
(
pColInfo
,
rows
,
buf
,
false
);
taosMemoryFree
(
buf
);
pColInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
cols
++
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
&
pDb
->
createdTime
,
false
);
...
...
@@ -1419,30 +1418,29 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
pColInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
cols
++
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
&
pDb
->
cfg
.
replications
,
false
);
const
char
*
s
rc
=
pDb
->
cfg
.
strict
?
"strict"
:
"no_strict"
;
char
strict
[
24
]
=
{
0
};
STR_WITH_SIZE_TO_VARSTR
(
strict
,
src
,
strlen
(
src
));
const
char
*
s
trictStr
=
pDb
->
cfg
.
strict
?
"strict"
:
"no_strict"
;
char
strict
Vstr
[
24
]
=
{
0
};
STR_WITH_SIZE_TO_VARSTR
(
strict
Vstr
,
strictStr
,
strlen
(
strictStr
));
pColInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
cols
++
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
strict
,
false
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
strict
Vstr
,
false
);
char
tmp
[
128
]
=
{
0
};
int32_t
len
=
0
;
len
=
sprintf
(
&
tmp
[
VARSTR_HEADER_SIZE
],
"%dm"
,
pDb
->
cfg
.
daysPerFile
);
varDataSetLen
(
tmp
,
len
);
char
durationVstr
[
128
]
=
{
0
};
int32_t
len
=
sprintf
(
&
durationVstr
[
VARSTR_HEADER_SIZE
],
"%dm"
,
pDb
->
cfg
.
daysPerFile
);
varDataSetLen
(
durationVstr
,
len
);
pColInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
cols
++
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
tmp
,
false
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
durationVstr
,
false
);
char
keepVstr
[
128
]
=
{
0
};
if
(
pDb
->
cfg
.
daysToKeep0
>
pDb
->
cfg
.
daysToKeep1
||
pDb
->
cfg
.
daysToKeep0
>
pDb
->
cfg
.
daysToKeep2
)
{
len
=
sprintf
(
&
tmp
[
VARSTR_HEADER_SIZE
],
"%dm,%dm,%dm"
,
pDb
->
cfg
.
daysToKeep1
,
pDb
->
cfg
.
daysToKeep2
,
len
=
sprintf
(
&
keepVstr
[
VARSTR_HEADER_SIZE
],
"%dm,%dm,%dm"
,
pDb
->
cfg
.
daysToKeep1
,
pDb
->
cfg
.
daysToKeep2
,
pDb
->
cfg
.
daysToKeep0
);
}
else
{
len
=
sprintf
(
&
tmp
[
VARSTR_HEADER_SIZE
],
"%dm,%dm,%dm"
,
pDb
->
cfg
.
daysToKeep0
,
pDb
->
cfg
.
daysToKeep1
,
len
=
sprintf
(
&
keepVstr
[
VARSTR_HEADER_SIZE
],
"%dm,%dm,%dm"
,
pDb
->
cfg
.
daysToKeep0
,
pDb
->
cfg
.
daysToKeep1
,
pDb
->
cfg
.
daysToKeep2
);
}
varDataSetLen
(
tmp
,
len
);
varDataSetLen
(
keepVstr
,
len
);
pColInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
cols
++
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
tmp
,
false
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
keepVstr
,
false
);
pColInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
cols
++
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
&
pDb
->
cfg
.
buffer
,
false
);
...
...
@@ -1469,68 +1467,49 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
&
pDb
->
cfg
.
compression
,
false
);
pColInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
cols
++
);
STR_WITH_SIZE_TO_VARSTR
(
strict
,
src
,
strlen
(
src
));
#if 0
char cacheModel[24] = {0};
bool null = false;
if (pDb->cfg.cacheLastRow == 0) {
STR_TO_VARSTR(cacheModel, "no_cache");
} else if (pDb->cfg.cacheLastRow == 1) {
STR_TO_VARSTR(cacheModel, "last_row_cache")
} else {
null = true;
}
colDataAppend(pColInfo, rows, cacheModel, null);
#endif
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
&
pDb
->
cfg
.
cacheLastRow
,
false
);
c
har
*
prec
=
NULL
;
c
onst
char
*
precStr
=
NULL
;
switch
(
pDb
->
cfg
.
precision
)
{
case
TSDB_TIME_PRECISION_MILLI
:
prec
=
TSDB_TIME_PRECISION_MILLI_STR
;
prec
Str
=
TSDB_TIME_PRECISION_MILLI_STR
;
break
;
case
TSDB_TIME_PRECISION_MICRO
:
prec
=
TSDB_TIME_PRECISION_MICRO_STR
;
prec
Str
=
TSDB_TIME_PRECISION_MICRO_STR
;
break
;
case
TSDB_TIME_PRECISION_NANO
:
prec
=
TSDB_TIME_PRECISION_NANO_STR
;
prec
Str
=
TSDB_TIME_PRECISION_NANO_STR
;
break
;
default:
prec
=
"none"
;
prec
Str
=
"none"
;
break
;
}
char
t
[
10
]
=
{
0
};
STR_WITH_SIZE_TO_VARSTR
(
t
,
prec
,
2
);
char
precVstr
[
10
]
=
{
0
};
STR_WITH_SIZE_TO_VARSTR
(
precVstr
,
precStr
,
2
);
pColInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
cols
++
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
t
,
false
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
precVstr
,
false
);
pColInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
cols
++
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
&
pDb
->
cfg
.
numOfStables
,
false
);
pColInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
cols
++
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
statusB
,
false
);
// pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
// colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.schemaless, false);
char
*
p
=
buildRetension
(
pDb
->
cfg
.
pRetensions
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
statusVstr
,
false
);
char
*
rentensionVstr
=
buildRetension
(
pDb
->
cfg
.
pRetensions
);
pColInfo
=
taosArrayGet
(
pBlock
->
pDataBlock
,
cols
);
if
(
p
==
NULL
)
{
if
(
rentensionVstr
==
NULL
)
{
colDataAppendNULL
(
pColInfo
,
rows
);
}
else
{
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
p
,
false
);
taosMemoryFree
(
p
);
colDataAppend
(
pColInfo
,
rows
,
(
const
char
*
)
rentensionVstr
,
false
);
taosMemoryFree
(
rentensionVstr
);
}
}
taosMemoryFree
(
buf
);
}
static
void
setInformationSchemaDbCfg
(
SDbObj
*
pDbObj
)
{
ASSERT
(
pDbObj
!=
NULL
);
strncpy
(
pDbObj
->
name
,
TSDB_INFORMATION_SCHEMA_DB
,
tListLen
(
pDbObj
->
name
));
tstrncpy
(
pDbObj
->
name
,
TSDB_INFORMATION_SCHEMA_DB
,
tListLen
(
pDbObj
->
name
));
pDbObj
->
createdTime
=
0
;
pDbObj
->
cfg
.
numOfVgroups
=
0
;
pDbObj
->
cfg
.
strict
=
1
;
...
...
@@ -1539,9 +1518,7 @@ static void setInformationSchemaDbCfg(SDbObj *pDbObj) {
}
static
void
setPerfSchemaDbCfg
(
SDbObj
*
pDbObj
)
{
ASSERT
(
pDbObj
!=
NULL
);
strncpy
(
pDbObj
->
name
,
TSDB_PERFORMANCE_SCHEMA_DB
,
tListLen
(
pDbObj
->
name
));
tstrncpy
(
pDbObj
->
name
,
TSDB_PERFORMANCE_SCHEMA_DB
,
tListLen
(
pDbObj
->
name
));
pDbObj
->
createdTime
=
0
;
pDbObj
->
cfg
.
numOfVgroups
=
0
;
pDbObj
->
cfg
.
strict
=
1
;
...
...
@@ -1585,14 +1562,11 @@ static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc
while
(
numOfRows
<
rowsCapacity
)
{
pShow
->
pIter
=
sdbFetchAll
(
pSdb
,
SDB_DB
,
pShow
->
pIter
,
(
void
**
)
&
pDb
,
&
objStatus
);
if
(
pShow
->
pIter
==
NULL
)
{
break
;
}
if
(
pShow
->
pIter
==
NULL
)
break
;
if
(
mndCheckDbPrivilege
(
pMnode
,
pReq
->
info
.
conn
.
user
,
MND_OPER_READ_OR_WRITE_DB
,
pDb
)
==
0
)
{
int32_t
numOfTables
=
0
;
sdbTraverse
(
pSdb
,
SDB_VGROUP
,
mndGetTablesOfDbFp
,
&
numOfTables
,
NULL
,
NULL
);
dumpDbInfoData
(
pBlock
,
pDb
,
pShow
,
numOfRows
,
numOfTables
,
false
,
objStatus
,
sysinfo
);
numOfRows
++
;
}
...
...
source/libs/executor/inc/executorimpl.h
浏览文件 @
29949a96
...
...
@@ -240,7 +240,7 @@ typedef struct SColMatchInfo {
int32_t
srcSlotId
;
// source slot id
int32_t
colId
;
int32_t
targetSlotId
;
bool
output
;
bool
output
;
// todo remove this?
bool
reserved
;
int32_t
matchType
;
// determinate the source according to col id or slot id
}
SColMatchInfo
;
...
...
source/libs/executor/src/dataInserter.c
0 → 100644
浏览文件 @
29949a96
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "dataSinkInt.h"
#include "dataSinkMgt.h"
#include "executorimpl.h"
#include "planner.h"
#include "tcompression.h"
#include "tdatablock.h"
#include "tglobal.h"
#include "tqueue.h"
extern
SDataSinkStat
gDataSinkStat
;
typedef
struct
SDataInserterBuf
{
int32_t
useSize
;
int32_t
allocSize
;
char
*
pData
;
}
SDataInserterBuf
;
typedef
struct
SDataCacheEntry
{
int32_t
dataLen
;
int32_t
numOfRows
;
int32_t
numOfCols
;
int8_t
compressed
;
char
data
[];
}
SDataCacheEntry
;
typedef
struct
SDataInserterHandle
{
SDataSinkHandle
sink
;
SDataSinkManager
*
pManager
;
SDataBlockDescNode
*
pSchema
;
SDataDeleterNode
*
pDeleter
;
SDeleterParam
*
pParam
;
STaosQueue
*
pDataBlocks
;
SDataInserterBuf
nextOutput
;
int32_t
status
;
bool
queryEnd
;
uint64_t
useconds
;
uint64_t
cachedSize
;
TdThreadMutex
mutex
;
}
SDataInserterHandle
;
static
bool
needCompress
(
const
SSDataBlock
*
pData
,
int32_t
numOfCols
)
{
if
(
tsCompressColData
<
0
||
0
==
pData
->
info
.
rows
)
{
return
false
;
}
for
(
int32_t
col
=
0
;
col
<
numOfCols
;
++
col
)
{
SColumnInfoData
*
pColRes
=
taosArrayGet
(
pData
->
pDataBlock
,
col
);
int32_t
colSize
=
pColRes
->
info
.
bytes
*
pData
->
info
.
rows
;
if
(
NEEDTO_COMPRESS_QUERY
(
colSize
))
{
return
true
;
}
}
return
false
;
}
static
void
toDataCacheEntry
(
SDataInserterHandle
*
pHandle
,
const
SInputData
*
pInput
,
SDataInserterBuf
*
pBuf
)
{
int32_t
numOfCols
=
LIST_LENGTH
(
pHandle
->
pSchema
->
pSlots
);
SDataCacheEntry
*
pEntry
=
(
SDataCacheEntry
*
)
pBuf
->
pData
;
pEntry
->
compressed
=
0
;
pEntry
->
numOfRows
=
pInput
->
pData
->
info
.
rows
;
pEntry
->
numOfCols
=
taosArrayGetSize
(
pInput
->
pData
->
pDataBlock
);
pEntry
->
dataLen
=
sizeof
(
SDeleterRes
);
ASSERT
(
1
==
pEntry
->
numOfRows
);
ASSERT
(
1
==
pEntry
->
numOfCols
);
pBuf
->
useSize
=
sizeof
(
SDataCacheEntry
);
SColumnInfoData
*
pColRes
=
(
SColumnInfoData
*
)
taosArrayGet
(
pInput
->
pData
->
pDataBlock
,
0
);
SDeleterRes
*
pRes
=
(
SDeleterRes
*
)
pEntry
->
data
;
pRes
->
suid
=
pHandle
->
pParam
->
suid
;
pRes
->
uidList
=
pHandle
->
pParam
->
pUidList
;
pRes
->
skey
=
pHandle
->
pDeleter
->
deleteTimeRange
.
skey
;
pRes
->
ekey
=
pHandle
->
pDeleter
->
deleteTimeRange
.
ekey
;
pRes
->
affectedRows
=
*
(
int64_t
*
)
pColRes
->
pData
;
pBuf
->
useSize
+=
pEntry
->
dataLen
;
atomic_add_fetch_64
(
&
pHandle
->
cachedSize
,
pEntry
->
dataLen
);
atomic_add_fetch_64
(
&
gDataSinkStat
.
cachedSize
,
pEntry
->
dataLen
);
}
static
bool
allocBuf
(
SDataInserterHandle
*
pDeleter
,
const
SInputData
*
pInput
,
SDataInserterBuf
*
pBuf
)
{
uint32_t
capacity
=
pDeleter
->
pManager
->
cfg
.
maxDataBlockNumPerQuery
;
if
(
taosQueueItemSize
(
pDeleter
->
pDataBlocks
)
>
capacity
)
{
qError
(
"SinkNode queue is full, no capacity, max:%d, current:%d, no capacity"
,
capacity
,
taosQueueItemSize
(
pDeleter
->
pDataBlocks
));
return
false
;
}
pBuf
->
allocSize
=
sizeof
(
SDataCacheEntry
)
+
sizeof
(
SDeleterRes
);
pBuf
->
pData
=
taosMemoryMalloc
(
pBuf
->
allocSize
);
if
(
pBuf
->
pData
==
NULL
)
{
qError
(
"SinkNode failed to malloc memory, size:%d, code:%d"
,
pBuf
->
allocSize
,
TAOS_SYSTEM_ERROR
(
errno
));
}
return
NULL
!=
pBuf
->
pData
;
}
static
int32_t
updateStatus
(
SDataInserterHandle
*
pDeleter
)
{
taosThreadMutexLock
(
&
pDeleter
->
mutex
);
int32_t
blockNums
=
taosQueueItemSize
(
pDeleter
->
pDataBlocks
);
int32_t
status
=
(
0
==
blockNums
?
DS_BUF_EMPTY
:
(
blockNums
<
pDeleter
->
pManager
->
cfg
.
maxDataBlockNumPerQuery
?
DS_BUF_LOW
:
DS_BUF_FULL
));
pDeleter
->
status
=
status
;
taosThreadMutexUnlock
(
&
pDeleter
->
mutex
);
return
status
;
}
static
int32_t
getStatus
(
SDataInserterHandle
*
pDeleter
)
{
taosThreadMutexLock
(
&
pDeleter
->
mutex
);
int32_t
status
=
pDeleter
->
status
;
taosThreadMutexUnlock
(
&
pDeleter
->
mutex
);
return
status
;
}
static
int32_t
putDataBlock
(
SDataSinkHandle
*
pHandle
,
const
SInputData
*
pInput
,
bool
*
pContinue
)
{
SDataInserterHandle
*
pDeleter
=
(
SDataInserterHandle
*
)
pHandle
;
SDataInserterBuf
*
pBuf
=
taosAllocateQitem
(
sizeof
(
SDataInserterBuf
),
DEF_QITEM
);
if
(
NULL
==
pBuf
||
!
allocBuf
(
pDeleter
,
pInput
,
pBuf
))
{
return
TSDB_CODE_QRY_OUT_OF_MEMORY
;
}
toDataCacheEntry
(
pDeleter
,
pInput
,
pBuf
);
taosWriteQitem
(
pDeleter
->
pDataBlocks
,
pBuf
);
*
pContinue
=
(
DS_BUF_LOW
==
updateStatus
(
pDeleter
)
?
true
:
false
);
return
TSDB_CODE_SUCCESS
;
}
static
void
endPut
(
struct
SDataSinkHandle
*
pHandle
,
uint64_t
useconds
)
{
SDataInserterHandle
*
pDeleter
=
(
SDataInserterHandle
*
)
pHandle
;
taosThreadMutexLock
(
&
pDeleter
->
mutex
);
pDeleter
->
queryEnd
=
true
;
pDeleter
->
useconds
=
useconds
;
taosThreadMutexUnlock
(
&
pDeleter
->
mutex
);
}
static
void
getDataLength
(
SDataSinkHandle
*
pHandle
,
int32_t
*
pLen
,
bool
*
pQueryEnd
)
{
SDataInserterHandle
*
pDeleter
=
(
SDataInserterHandle
*
)
pHandle
;
if
(
taosQueueEmpty
(
pDeleter
->
pDataBlocks
))
{
*
pQueryEnd
=
pDeleter
->
queryEnd
;
*
pLen
=
0
;
return
;
}
SDataInserterBuf
*
pBuf
=
NULL
;
taosReadQitem
(
pDeleter
->
pDataBlocks
,
(
void
**
)
&
pBuf
);
memcpy
(
&
pDeleter
->
nextOutput
,
pBuf
,
sizeof
(
SDataInserterBuf
));
taosFreeQitem
(
pBuf
);
*
pLen
=
((
SDataCacheEntry
*
)(
pDeleter
->
nextOutput
.
pData
))
->
dataLen
;
*
pQueryEnd
=
pDeleter
->
queryEnd
;
qDebug
(
"got data len %d, row num %d in sink"
,
*
pLen
,
((
SDataCacheEntry
*
)(
pDeleter
->
nextOutput
.
pData
))
->
numOfRows
);
}
static
int32_t
getDataBlock
(
SDataSinkHandle
*
pHandle
,
SOutputData
*
pOutput
)
{
SDataInserterHandle
*
pDeleter
=
(
SDataInserterHandle
*
)
pHandle
;
if
(
NULL
==
pDeleter
->
nextOutput
.
pData
)
{
assert
(
pDeleter
->
queryEnd
);
pOutput
->
useconds
=
pDeleter
->
useconds
;
pOutput
->
precision
=
pDeleter
->
pSchema
->
precision
;
pOutput
->
bufStatus
=
DS_BUF_EMPTY
;
pOutput
->
queryEnd
=
pDeleter
->
queryEnd
;
return
TSDB_CODE_SUCCESS
;
}
SDataCacheEntry
*
pEntry
=
(
SDataCacheEntry
*
)(
pDeleter
->
nextOutput
.
pData
);
memcpy
(
pOutput
->
pData
,
pEntry
->
data
,
pEntry
->
dataLen
);
pOutput
->
numOfRows
=
pEntry
->
numOfRows
;
pOutput
->
numOfCols
=
pEntry
->
numOfCols
;
pOutput
->
compressed
=
pEntry
->
compressed
;
atomic_sub_fetch_64
(
&
pDeleter
->
cachedSize
,
pEntry
->
dataLen
);
atomic_sub_fetch_64
(
&
gDataSinkStat
.
cachedSize
,
pEntry
->
dataLen
);
taosMemoryFreeClear
(
pDeleter
->
nextOutput
.
pData
);
// todo persistent
pOutput
->
bufStatus
=
updateStatus
(
pDeleter
);
taosThreadMutexLock
(
&
pDeleter
->
mutex
);
pOutput
->
queryEnd
=
pDeleter
->
queryEnd
;
pOutput
->
useconds
=
pDeleter
->
useconds
;
pOutput
->
precision
=
pDeleter
->
pSchema
->
precision
;
taosThreadMutexUnlock
(
&
pDeleter
->
mutex
);
return
TSDB_CODE_SUCCESS
;
}
static
int32_t
destroyDataSinker
(
SDataSinkHandle
*
pHandle
)
{
SDataInserterHandle
*
pDeleter
=
(
SDataInserterHandle
*
)
pHandle
;
atomic_sub_fetch_64
(
&
gDataSinkStat
.
cachedSize
,
pDeleter
->
cachedSize
);
taosMemoryFreeClear
(
pDeleter
->
nextOutput
.
pData
);
while
(
!
taosQueueEmpty
(
pDeleter
->
pDataBlocks
))
{
SDataInserterBuf
*
pBuf
=
NULL
;
taosReadQitem
(
pDeleter
->
pDataBlocks
,
(
void
**
)
&
pBuf
);
taosMemoryFreeClear
(
pBuf
->
pData
);
taosFreeQitem
(
pBuf
);
}
taosCloseQueue
(
pDeleter
->
pDataBlocks
);
taosThreadMutexDestroy
(
&
pDeleter
->
mutex
);
return
TSDB_CODE_SUCCESS
;
}
static
int32_t
getCacheSize
(
struct
SDataSinkHandle
*
pHandle
,
uint64_t
*
size
)
{
SDataInserterHandle
*
pDispatcher
=
(
SDataInserterHandle
*
)
pHandle
;
*
size
=
atomic_load_64
(
&
pDispatcher
->
cachedSize
);
return
TSDB_CODE_SUCCESS
;
}
int32_t
createDataInserter
(
SDataSinkManager
*
pManager
,
const
SDataSinkNode
*
pDataSink
,
DataSinkHandle
*
pHandle
,
void
*
pParam
)
{
SDataInserterHandle
*
inserter
=
taosMemoryCalloc
(
1
,
sizeof
(
SDataInserterHandle
));
if
(
NULL
==
inserter
)
{
terrno
=
TSDB_CODE_QRY_OUT_OF_MEMORY
;
return
TSDB_CODE_QRY_OUT_OF_MEMORY
;
}
SDataDeleterNode
*
pDeleterNode
=
(
SDataDeleterNode
*
)
pDataSink
;
inserter
->
sink
.
fPut
=
putDataBlock
;
inserter
->
sink
.
fEndPut
=
endPut
;
inserter
->
sink
.
fGetLen
=
getDataLength
;
inserter
->
sink
.
fGetData
=
getDataBlock
;
inserter
->
sink
.
fDestroy
=
destroyDataSinker
;
inserter
->
sink
.
fGetCacheSize
=
getCacheSize
;
inserter
->
pManager
=
pManager
;
inserter
->
pDeleter
=
pDeleterNode
;
inserter
->
pSchema
=
pDataSink
->
pInputDataBlockDesc
;
inserter
->
pParam
=
pParam
;
inserter
->
status
=
DS_BUF_EMPTY
;
inserter
->
queryEnd
=
false
;
inserter
->
pDataBlocks
=
taosOpenQueue
();
taosThreadMutexInit
(
&
inserter
->
mutex
,
NULL
);
if
(
NULL
==
inserter
->
pDataBlocks
)
{
terrno
=
TSDB_CODE_QRY_OUT_OF_MEMORY
;
return
TSDB_CODE_QRY_OUT_OF_MEMORY
;
}
*
pHandle
=
inserter
;
return
TSDB_CODE_SUCCESS
;
}
source/libs/executor/src/executil.c
浏览文件 @
29949a96
...
...
@@ -704,7 +704,7 @@ void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray
while
(
i
<
numOfSrcCols
&&
j
<
taosArrayGetSize
(
pColMatchInfo
))
{
SColumnInfoData
*
p
=
taosArrayGet
(
pCols
,
i
);
SColMatchInfo
*
pmInfo
=
taosArrayGet
(
pColMatchInfo
,
j
);
if
(
!
outputEveryColumn
&&
!
pmInfo
->
output
)
{
if
(
!
outputEveryColumn
&&
pmInfo
->
reserved
)
{
j
++
;
continue
;
}
...
...
source/libs/function/src/builtins.c
浏览文件 @
29949a96
...
...
@@ -605,7 +605,7 @@ static int32_t translateTopBot(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
SValueNode
*
pValue
=
(
SValueNode
*
)
pParamNode1
;
if
(
pValue
->
node
.
resType
.
type
!=
TSDB_DATA_TYPE_BIGINT
)
{
if
(
!
IS_INTEGER_TYPE
(
pValue
->
node
.
resType
.
type
)
)
{
return
invaildFuncParaTypeErrMsg
(
pErrBuf
,
len
,
pFunc
->
functionName
);
}
...
...
source/libs/function/src/builtinsimpl.c
浏览文件 @
29949a96
...
...
@@ -92,10 +92,12 @@ typedef struct SStddevRes {
union
{
double
quadraticDSum
;
int64_t
quadraticISum
;
uint64_t
quadraticUSum
;
};
union
{
double
dsum
;
int64_t
isum
;
uint64_t
usum
;
};
int16_t
type
;
}
SStddevRes
;
...
...
@@ -1729,6 +1731,68 @@ int32_t stddevFunction(SqlFunctionCtx* pCtx) {
break
;
}
case
TSDB_DATA_TYPE_UTINYINT
:
{
uint8_t
*
plist
=
(
uint8_t
*
)
pCol
->
pData
;
for
(
int32_t
i
=
start
;
i
<
numOfRows
+
start
;
++
i
)
{
if
(
pCol
->
hasNull
&&
colDataIsNull_f
(
pCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElem
+=
1
;
pStddevRes
->
count
+=
1
;
pStddevRes
->
usum
+=
plist
[
i
];
pStddevRes
->
quadraticISum
+=
plist
[
i
]
*
plist
[
i
];
}
break
;
}
case
TSDB_DATA_TYPE_USMALLINT
:
{
uint16_t
*
plist
=
(
uint16_t
*
)
pCol
->
pData
;
for
(
int32_t
i
=
start
;
i
<
numOfRows
+
pInput
->
startRowIndex
;
++
i
)
{
if
(
pCol
->
hasNull
&&
colDataIsNull_f
(
pCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElem
+=
1
;
pStddevRes
->
count
+=
1
;
pStddevRes
->
usum
+=
plist
[
i
];
pStddevRes
->
quadraticISum
+=
plist
[
i
]
*
plist
[
i
];
}
break
;
}
case
TSDB_DATA_TYPE_UINT
:
{
uint32_t
*
plist
=
(
uint32_t
*
)
pCol
->
pData
;
for
(
int32_t
i
=
start
;
i
<
numOfRows
+
pInput
->
startRowIndex
;
++
i
)
{
if
(
pCol
->
hasNull
&&
colDataIsNull_f
(
pCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElem
+=
1
;
pStddevRes
->
count
+=
1
;
pStddevRes
->
usum
+=
plist
[
i
];
pStddevRes
->
quadraticISum
+=
plist
[
i
]
*
plist
[
i
];
}
break
;
}
case
TSDB_DATA_TYPE_UBIGINT
:
{
uint64_t
*
plist
=
(
uint64_t
*
)
pCol
->
pData
;
for
(
int32_t
i
=
start
;
i
<
numOfRows
+
pInput
->
startRowIndex
;
++
i
)
{
if
(
pCol
->
hasNull
&&
colDataIsNull_f
(
pCol
->
nullbitmap
,
i
))
{
continue
;
}
numOfElem
+=
1
;
pStddevRes
->
count
+=
1
;
pStddevRes
->
usum
+=
plist
[
i
];
pStddevRes
->
quadraticISum
+=
plist
[
i
]
*
plist
[
i
];
}
break
;
}
case
TSDB_DATA_TYPE_FLOAT
:
{
float
*
plist
=
(
float
*
)
pCol
->
pData
;
for
(
int32_t
i
=
start
;
i
<
numOfRows
+
pInput
->
startRowIndex
;
++
i
)
{
...
...
@@ -1771,9 +1835,12 @@ _stddev_over:
static
void
stddevTransferInfo
(
SStddevRes
*
pInput
,
SStddevRes
*
pOutput
)
{
pOutput
->
type
=
pInput
->
type
;
if
(
IS_
INTEGER
_TYPE
(
pOutput
->
type
))
{
if
(
IS_
SIGNED_NUMERIC
_TYPE
(
pOutput
->
type
))
{
pOutput
->
quadraticISum
+=
pInput
->
quadraticISum
;
pOutput
->
isum
+=
pInput
->
isum
;
}
else
if
(
IS_UNSIGNED_NUMERIC_TYPE
(
pOutput
->
type
))
{
pOutput
->
quadraticUSum
+=
pInput
->
quadraticUSum
;
pOutput
->
usum
+=
pInput
->
usum
;
}
else
{
pOutput
->
quadraticDSum
+=
pInput
->
quadraticDSum
;
pOutput
->
dsum
+=
pInput
->
dsum
;
...
...
@@ -1848,6 +1915,22 @@ int32_t stddevInvertFunction(SqlFunctionCtx* pCtx) {
LIST_STDDEV_SUB_N
(
pStddevRes
->
isum
,
int64_t
);
break
;
}
case
TSDB_DATA_TYPE_UTINYINT
:
{
LIST_STDDEV_SUB_N
(
pStddevRes
->
isum
,
uint8_t
);
break
;
}
case
TSDB_DATA_TYPE_USMALLINT
:
{
LIST_STDDEV_SUB_N
(
pStddevRes
->
isum
,
uint16_t
);
break
;
}
case
TSDB_DATA_TYPE_UINT
:
{
LIST_STDDEV_SUB_N
(
pStddevRes
->
isum
,
uint32_t
);
break
;
}
case
TSDB_DATA_TYPE_UBIGINT
:
{
LIST_STDDEV_SUB_N
(
pStddevRes
->
isum
,
uint64_t
);
break
;
}
case
TSDB_DATA_TYPE_FLOAT
:
{
LIST_STDDEV_SUB_N
(
pStddevRes
->
dsum
,
float
);
break
;
...
...
@@ -1871,9 +1954,12 @@ int32_t stddevFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
int32_t
type
=
pStddevRes
->
type
;
double
avg
;
if
(
IS_
INTEGER
_TYPE
(
type
))
{
if
(
IS_
SIGNED_NUMERIC
_TYPE
(
type
))
{
avg
=
pStddevRes
->
isum
/
((
double
)
pStddevRes
->
count
);
pStddevRes
->
result
=
sqrt
(
fabs
(
pStddevRes
->
quadraticISum
/
((
double
)
pStddevRes
->
count
)
-
avg
*
avg
));
}
else
if
(
IS_UNSIGNED_NUMERIC_TYPE
(
type
))
{
avg
=
pStddevRes
->
usum
/
((
double
)
pStddevRes
->
count
);
pStddevRes
->
result
=
sqrt
(
fabs
(
pStddevRes
->
quadraticUSum
/
((
double
)
pStddevRes
->
count
)
-
avg
*
avg
));
}
else
{
avg
=
pStddevRes
->
dsum
/
((
double
)
pStddevRes
->
count
);
pStddevRes
->
result
=
sqrt
(
fabs
(
pStddevRes
->
quadraticDSum
/
((
double
)
pStddevRes
->
count
)
-
avg
*
avg
));
...
...
@@ -1913,9 +1999,12 @@ int32_t stddevCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
SResultRowEntryInfo
*
pSResInfo
=
GET_RES_INFO
(
pSourceCtx
);
SStddevRes
*
pSBuf
=
GET_ROWCELL_INTERBUF
(
pSResInfo
);
if
(
IS_
INTEGER
_TYPE
(
type
))
{
if
(
IS_
SIGNED_NUMERIC
_TYPE
(
type
))
{
pDBuf
->
isum
+=
pSBuf
->
isum
;
pDBuf
->
quadraticISum
+=
pSBuf
->
quadraticISum
;
}
else
if
(
IS_UNSIGNED_NUMERIC_TYPE
(
type
))
{
pDBuf
->
usum
+=
pSBuf
->
usum
;
pDBuf
->
quadraticUSum
+=
pSBuf
->
quadraticUSum
;
}
else
{
pDBuf
->
dsum
+=
pSBuf
->
dsum
;
pDBuf
->
quadraticDSum
+=
pSBuf
->
quadraticDSum
;
...
...
source/libs/index/src/index.c
浏览文件 @
29949a96
...
...
@@ -65,9 +65,10 @@ void indexInit() {
indexQhandle
=
taosInitScheduler
(
INDEX_QUEUE_SIZE
,
INDEX_NUM_OF_THREADS
,
"index"
);
indexRefMgt
=
taosOpenRef
(
10
,
indexDestroy
);
}
void
indexClean
U
p
()
{
void
indexClean
u
p
()
{
// refacto later
taosCleanUpScheduler
(
indexQhandle
);
taosCloseRef
(
indexRefMgt
);
}
typedef
struct
SIdxColInfo
{
...
...
source/libs/qcom/src/queryUtil.c
浏览文件 @
29949a96
...
...
@@ -171,17 +171,17 @@ char* jobTaskStatusStr(int32_t status) {
switch
(
status
)
{
case
JOB_TASK_STATUS_NULL
:
return
"NULL"
;
case
JOB_TASK_STATUS_
NOT_STAR
T
:
return
"
NOT_STAR
T"
;
case
JOB_TASK_STATUS_EXEC
UTING
:
case
JOB_TASK_STATUS_
INI
T
:
return
"
INI
T"
;
case
JOB_TASK_STATUS_EXEC
:
return
"EXECUTING"
;
case
JOB_TASK_STATUS_PART
IAL_SUCCEED
:
case
JOB_TASK_STATUS_PART
_SUCC
:
return
"PARTIAL_SUCCEED"
;
case
JOB_TASK_STATUS_SUCC
EED
:
case
JOB_TASK_STATUS_SUCC
:
return
"SUCCEED"
;
case
JOB_TASK_STATUS_FAIL
ED
:
case
JOB_TASK_STATUS_FAIL
:
return
"FAILED"
;
case
JOB_TASK_STATUS_DROP
PING
:
case
JOB_TASK_STATUS_DROP
:
return
"DROPPING"
;
default:
break
;
...
...
@@ -200,7 +200,7 @@ SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* nam
return
s
;
}
void
destroyQueryExecRes
(
S
QueryExecRes
*
pRes
)
{
void
destroyQueryExecRes
(
S
ExecResult
*
pRes
)
{
if
(
NULL
==
pRes
||
NULL
==
pRes
->
res
)
{
return
;
}
...
...
source/libs/qworker/inc/qwInt.h
浏览文件 @
29949a96
...
...
@@ -226,8 +226,8 @@ typedef struct SQWorkerMgmt {
#define QW_TASK_NOT_EXIST(code) (TSDB_CODE_QRY_SCH_NOT_EXIST == (code) || TSDB_CODE_QRY_TASK_NOT_EXIST == (code))
#define QW_TASK_ALREADY_EXIST(code) (TSDB_CODE_QRY_TASK_ALREADY_EXIST == (code))
#define QW_TASK_READY(status) \
(status == JOB_TASK_STATUS_SUCC
EED || status == JOB_TASK_STATUS_FAILED
|| status == JOB_TASK_STATUS_CANCELLED || \
status == JOB_TASK_STATUS_PART
IAL_SUCCEED
)
(status == JOB_TASK_STATUS_SUCC
|| status == JOB_TASK_STATUS_FAIL
|| status == JOB_TASK_STATUS_CANCELLED || \
status == JOB_TASK_STATUS_PART
_SUCC
)
#define QW_SET_QTID(id, qId, tId, eId) \
do { \
*(uint64_t *)(id) = (qId); \
...
...
source/libs/qworker/src/qwDbg.c
浏览文件 @
29949a96
...
...
@@ -19,7 +19,7 @@ int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus,
int32_t
code
=
0
;
if
(
oriStatus
==
newStatus
)
{
if
(
newStatus
==
JOB_TASK_STATUS_EXEC
UTING
||
newStatus
==
JOB_TASK_STATUS_FAILED
)
{
if
(
newStatus
==
JOB_TASK_STATUS_EXEC
||
newStatus
==
JOB_TASK_STATUS_FAIL
)
{
*
ignore
=
true
;
return
TSDB_CODE_SUCCESS
;
}
...
...
@@ -29,47 +29,47 @@ int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus,
switch
(
oriStatus
)
{
case
JOB_TASK_STATUS_NULL
:
if
(
newStatus
!=
JOB_TASK_STATUS_EXEC
UTING
&&
newStatus
!=
JOB_TASK_STATUS_FAILED
&&
newStatus
!=
JOB_TASK_STATUS_
NOT_STAR
T
)
{
if
(
newStatus
!=
JOB_TASK_STATUS_EXEC
&&
newStatus
!=
JOB_TASK_STATUS_FAIL
&&
newStatus
!=
JOB_TASK_STATUS_
INI
T
)
{
QW_ERR_JRET
(
TSDB_CODE_QRY_APP_ERROR
);
}
break
;
case
JOB_TASK_STATUS_
NOT_STAR
T
:
if
(
newStatus
!=
JOB_TASK_STATUS_DROP
PING
&&
newStatus
!=
JOB_TASK_STATUS_EXECUTING
&&
newStatus
!=
JOB_TASK_STATUS_FAIL
ED
)
{
case
JOB_TASK_STATUS_
INI
T
:
if
(
newStatus
!=
JOB_TASK_STATUS_DROP
&&
newStatus
!=
JOB_TASK_STATUS_EXEC
&&
newStatus
!=
JOB_TASK_STATUS_FAIL
)
{
QW_ERR_JRET
(
TSDB_CODE_QRY_APP_ERROR
);
}
break
;
case
JOB_TASK_STATUS_EXEC
UTING
:
if
(
newStatus
!=
JOB_TASK_STATUS_PART
IAL_SUCCEED
&&
newStatus
!=
JOB_TASK_STATUS_SUCCEED
&&
newStatus
!=
JOB_TASK_STATUS_FAIL
ED
&&
newStatus
!=
JOB_TASK_STATUS_DROPPING
)
{
case
JOB_TASK_STATUS_EXEC
:
if
(
newStatus
!=
JOB_TASK_STATUS_PART
_SUCC
&&
newStatus
!=
JOB_TASK_STATUS_SUCC
&&
newStatus
!=
JOB_TASK_STATUS_FAIL
&&
newStatus
!=
JOB_TASK_STATUS_DROP
)
{
QW_ERR_JRET
(
TSDB_CODE_QRY_APP_ERROR
);
}
break
;
case
JOB_TASK_STATUS_PART
IAL_SUCCEED
:
if
(
newStatus
!=
JOB_TASK_STATUS_EXEC
UTING
&&
newStatus
!=
JOB_TASK_STATUS_SUCCEED
&&
newStatus
!=
JOB_TASK_STATUS_FAIL
ED
&&
newStatus
!=
JOB_TASK_STATUS_DROPPING
)
{
case
JOB_TASK_STATUS_PART
_SUCC
:
if
(
newStatus
!=
JOB_TASK_STATUS_EXEC
&&
newStatus
!=
JOB_TASK_STATUS_SUCC
&&
newStatus
!=
JOB_TASK_STATUS_FAIL
&&
newStatus
!=
JOB_TASK_STATUS_DROP
)
{
QW_ERR_JRET
(
TSDB_CODE_QRY_APP_ERROR
);
}
break
;
case
JOB_TASK_STATUS_SUCC
EED
:
if
(
newStatus
!=
JOB_TASK_STATUS_DROP
PING
&&
newStatus
!=
JOB_TASK_STATUS_FAILED
)
{
case
JOB_TASK_STATUS_SUCC
:
if
(
newStatus
!=
JOB_TASK_STATUS_DROP
&&
newStatus
!=
JOB_TASK_STATUS_FAIL
)
{
QW_ERR_JRET
(
TSDB_CODE_QRY_APP_ERROR
);
}
break
;
case
JOB_TASK_STATUS_FAIL
ED
:
if
(
newStatus
!=
JOB_TASK_STATUS_DROP
PING
)
{
case
JOB_TASK_STATUS_FAIL
:
if
(
newStatus
!=
JOB_TASK_STATUS_DROP
)
{
QW_ERR_JRET
(
TSDB_CODE_QRY_APP_ERROR
);
}
break
;
case
JOB_TASK_STATUS_DROP
PING
:
if
(
newStatus
!=
JOB_TASK_STATUS_FAIL
ED
&&
newStatus
!=
JOB_TASK_STATUS_PARTIAL_SUCCEED
)
{
case
JOB_TASK_STATUS_DROP
:
if
(
newStatus
!=
JOB_TASK_STATUS_FAIL
&&
newStatus
!=
JOB_TASK_STATUS_PART_SUCC
)
{
QW_ERR_JRET
(
TSDB_CODE_QRY_APP_ERROR
);
}
break
;
...
...
source/libs/qworker/src/qworker.c
浏览文件 @
29949a96
...
...
@@ -206,7 +206,7 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
QW_TASK_DLOG_E
(
"no data in sink and query end"
);
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_SUCC
EED
);
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_SUCC
);
QW_ERR_RET
(
qwMallocFetchRsp
(
len
,
&
rsp
));
*
rspMsg
=
rsp
;
...
...
@@ -236,7 +236,7 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
if
(
DS_BUF_EMPTY
==
pOutput
->
bufStatus
&&
pOutput
->
queryEnd
)
{
QW_TASK_DLOG_E
(
"task all data fetched, done"
);
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_SUCC
EED
);
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_SUCC
);
}
return
TSDB_CODE_SUCCESS
;
...
...
@@ -319,7 +319,7 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
break
;
}
QW_ERR_JRET
(
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_EXEC
UTING
));
QW_ERR_JRET
(
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_EXEC
));
break
;
}
case
QW_PHASE_PRE_FETCH
:
{
...
...
@@ -436,7 +436,7 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
_return:
if
(
TSDB_CODE_SUCCESS
==
code
&&
QW_PHASE_POST_QUERY
==
phase
)
{
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_PART
IAL_SUCCEED
);
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_PART
_SUCC
);
}
if
(
rspConnection
)
{
...
...
@@ -456,7 +456,7 @@ _return:
}
if
(
code
)
{
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_FAIL
ED
);
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_FAIL
);
}
QW_TASK_DLOG
(
"end to handle event at phase %s, code:%x - %s"
,
qwPhaseStr
(
phase
),
code
,
tstrerror
(
code
));
...
...
@@ -488,7 +488,7 @@ int32_t qwPrerocessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
ctx
->
ctrlConnInfo
=
qwMsg
->
connInfo
;
QW_ERR_JRET
(
qwAddTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_
NOT_STAR
T
));
QW_ERR_JRET
(
qwAddTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_
INI
T
));
_return:
...
...
@@ -687,7 +687,7 @@ int32_t qwProcessFetch(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
if
(
QW_IS_QUERY_RUNNING
(
ctx
))
{
atomic_store_8
((
int8_t
*
)
&
ctx
->
queryContinue
,
1
);
}
else
if
(
0
==
atomic_load_8
((
int8_t
*
)
&
ctx
->
queryInQueue
))
{
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_EXEC
UTING
);
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_EXEC
);
atomic_store_8
((
int8_t
*
)
&
ctx
->
queryInQueue
,
1
);
...
...
@@ -738,7 +738,7 @@ int32_t qwProcessDrop(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
if
(
QW_IS_QUERY_RUNNING
(
ctx
))
{
QW_ERR_JRET
(
qwKillTaskHandle
(
QW_FPARAMS
(),
ctx
));
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_DROP
PING
);
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_DROP
);
}
else
if
(
ctx
->
phase
>
0
)
{
QW_ERR_JRET
(
qwDropTask
(
QW_FPARAMS
()));
rsped
=
true
;
...
...
@@ -759,7 +759,7 @@ _return:
QW_UPDATE_RSP_CODE
(
ctx
,
code
);
}
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_FAIL
ED
);
qwUpdateTaskStatus
(
QW_FPARAMS
(),
JOB_TASK_STATUS_FAIL
);
}
if
(
locked
)
{
...
...
source/libs/scheduler/inc/sch
eduler
Int.h
→
source/libs/scheduler/inc/schInt.h
浏览文件 @
29949a96
...
...
@@ -52,6 +52,7 @@ typedef enum {
SCH_OP_NULL
=
0
,
SCH_OP_EXEC
,
SCH_OP_FETCH
,
SCH_OP_GET_STATUS
,
}
SCH_OP_TYPE
;
typedef
struct
SSchTrans
{
...
...
@@ -97,13 +98,30 @@ typedef struct SSchStat {
}
SSchStat
;
typedef
struct
SSchResInfo
{
S
QueryResult
*
query
Res
;
S
ExecResult
*
exec
Res
;
void
**
fetchRes
;
schedulerExecFp
execFp
;
schedulerFetchFp
fetchFp
;
void
*
user
Param
;
void
*
cb
Param
;
}
SSchResInfo
;
typedef
struct
SSchOpEvent
{
SCH_OP_TYPE
type
;
bool
begin
;
SSchedulerReq
*
pReq
;
}
SSchOpEvent
;
typedef
int32_t
(
*
schStatusEnterFp
)(
void
*
pHandle
,
void
*
pParam
);
typedef
int32_t
(
*
schStatusLeaveFp
)(
void
*
pHandle
,
void
*
pParam
);
typedef
int32_t
(
*
schStatusEventFp
)(
void
*
pHandle
,
void
*
pParam
,
void
*
pEvent
);
typedef
struct
SSchStatusFps
{
EJobTaskType
status
;
schStatusEnterFp
enterFp
;
schStatusLeaveFp
leaveFp
;
schStatusEventFp
eventFp
;
}
SSchStatusFps
;
typedef
struct
SSchedulerMgmt
{
uint64_t
taskId
;
// sequential taksId
uint64_t
sId
;
// schedulerId
...
...
@@ -157,7 +175,7 @@ typedef struct SSchLevel {
int32_t
taskNum
;
int32_t
taskLaunchedNum
;
int32_t
taskDoneNum
;
SArray
*
subTasks
;
// Element is S
Query
Task
SArray
*
subTasks
;
// Element is S
Sch
Task
}
SSchLevel
;
typedef
struct
SSchTaskProfile
{
...
...
@@ -195,12 +213,13 @@ typedef struct SSchTask {
typedef
struct
SSchJobAttr
{
EExplainMode
explainMode
;
bool
queryJob
;
bool
needFetch
;
bool
needFlowCtrl
;
}
SSchJobAttr
;
typedef
struct
{
int32_t
op
;
bool
sync
;
bool
sync
Req
;
}
SSchOpStatus
;
typedef
struct
SSchJob
{
...
...
@@ -231,7 +250,7 @@ typedef struct SSchJob {
SSchTask
*
fetchTask
;
int32_t
errCode
;
SRWLatch
resLock
;
S
QueryExecRes
execRes
;
S
ExecResult
execRes
;
void
*
resData
;
//TODO free it or not
int32_t
resNumOfRows
;
SSchResInfo
userRes
;
...
...
@@ -292,19 +311,19 @@ extern SSchedulerMgmt schMgmt;
#define SCH_GET_JOB_STATUS(job) atomic_load_8(&(job)->status)
#define SCH_GET_JOB_STATUS_STR(job) jobTaskStatusStr(SCH_GET_JOB_STATUS(job))
#define SCH_JOB_IN_SYNC_OP(job) ((job)->opStatus.op && (job)->opStatus.sync)
#define SCH_JOB_IN_ASYNC_EXEC_OP(job) ((
(job)->opStatus.op == SCH_OP_EXEC) && (!(job)->opStatus.sync
))
#define SCH_JOB_IN_ASYNC_FETCH_OP(job) ((
(job)->opStatus.op == SCH_OP_FETCH) && (!(job)->opStatus.sync
))
#define SCH_JOB_IN_SYNC_OP(job) ((job)->opStatus.op && (job)->opStatus.sync
Req
)
#define SCH_JOB_IN_ASYNC_EXEC_OP(job) ((
SCH_OP_EXEC == atomic_val_compare_exchange_32(&(job)->opStatus.op, SCH_OP_EXEC, SCH_OP_NULL)) && (!(job)->opStatus.syncReq
))
#define SCH_JOB_IN_ASYNC_FETCH_OP(job) ((
SCH_OP_FETCH == atomic_val_compare_exchange_32(&(job)->opStatus.op, SCH_OP_FETCH, SCH_OP_NULL)) && (!(job)->opStatus.syncReq
))
#define SCH_SET_JOB_NEED_FLOW_CTRL(_job) (_job)->attr.needFlowCtrl = true
#define SCH_JOB_NEED_FLOW_CTRL(_job) ((_job)->attr.needFlowCtrl)
#define SCH_TASK_NEED_FLOW_CTRL(_job, _task) (SCH_IS_DATA_SRC_QRY_TASK(_task) && SCH_JOB_NEED_FLOW_CTRL(_job) && SCH_IS_LEVEL_UNFINISHED((_task)->level))
#define SCH_SET_JOB_TYPE(_job, type)
(_job)->attr.queryJob = ((type) != SUBPLAN_TYPE_MODIFY
)
#define SCH_SET_JOB_TYPE(_job, type)
do { if ((type) != SUBPLAN_TYPE_MODIFY) { (_job)->attr.queryJob = true; } } while (0
)
#define SCH_IS_QUERY_JOB(_job) ((_job)->attr.queryJob)
#define SCH_JOB_NEED_FETCH(_job)
SCH_IS_QUERY_JOB(_job
)
#define SCH_
IS_WAIT_ALL_JOB
(_job) (!SCH_IS_QUERY_JOB(_job))
#define SCH_
IS_NEED_DROP_JOB
(_job) (SCH_IS_QUERY_JOB(_job))
#define SCH_JOB_NEED_FETCH(_job)
((_job)->attr.needFetch
)
#define SCH_
JOB_NEED_WAIT
(_job) (!SCH_IS_QUERY_JOB(_job))
#define SCH_
JOB_NEED_DROP
(_job) (SCH_IS_QUERY_JOB(_job))
#define SCH_IS_EXPLAIN_JOB(_job) (EXPLAIN_MODE_ANALYZE == (_job)->attr.explainMode)
#define SCH_NETWORK_ERR(_code) ((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
#define SCH_SUB_TASK_NETWORK_ERR(_code, _len) (SCH_NETWORK_ERR(_code) && ((_len) > 0))
...
...
@@ -329,9 +348,10 @@ extern SSchedulerMgmt schMgmt;
#define SCH_TASK_WLOG(param, ...) \
qWarn("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask),__VA_ARGS__)
#define SCH_ERR_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; return _code; } } while (0)
#define SCH_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; } return _code; } while (0)
#define SCH_ERR_JRET(c) do { code = c; if (code != TSDB_CODE_SUCCESS) { terrno = code; goto _return; } } while (0)
#define SCH_SET_ERRNO(_err) do { if (TSDB_CODE_SCH_IGNORE_ERROR != (_err)) { terrno = (_err); } } while (0)
#define SCH_ERR_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { SCH_SET_ERRNO(_code); return _code; } } while (0)
#define SCH_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { SCH_SET_ERRNO(_code); } return _code; } while (0)
#define SCH_ERR_JRET(c) do { code = c; if (code != TSDB_CODE_SUCCESS) { SCH_SET_ERRNO(code); goto _return; } } while (0)
#define SCH_LOCK(type, _lock) (SCH_READ == (type) ? taosRLockLatch(_lock) : taosWLockLatch(_lock))
#define SCH_UNLOCK(type, _lock) (SCH_READ == (type) ? taosRUnLockLatch(_lock) : taosWUnLockLatch(_lock))
...
...
@@ -349,7 +369,7 @@ int32_t schDecTaskFlowQuota(SSchJob *pJob, SSchTask *pTask);
int32_t
schCheckIncTaskFlowQuota
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
bool
*
enough
);
int32_t
schLaunchTasksInFlowCtrlList
(
SSchJob
*
pJob
,
SSchTask
*
pTask
);
int32_t
schLaunchTaskImpl
(
SSchJob
*
pJob
,
SSchTask
*
pTask
);
int32_t
sch
FetchFromRemote
(
SSchJob
*
pJob
);
int32_t
sch
LaunchFetchTask
(
SSchJob
*
pJob
);
int32_t
schProcessOnTaskFailure
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
int32_t
errCode
);
int32_t
schBuildAndSendHbMsg
(
SQueryNodeEpId
*
nodeEpId
,
SArray
*
taskAction
);
int32_t
schCloneSMsgSendInfo
(
void
*
src
,
void
**
dst
);
...
...
@@ -371,25 +391,45 @@ void schFreeRpcCtxVal(const void *arg);
int32_t
schMakeBrokenLinkVal
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SRpcBrokenlinkVal
*
brokenVal
,
bool
isHb
);
int32_t
schAppendTaskExecNode
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SQueryNodeAddr
*
addr
,
int32_t
execId
);
int32_t
schExecStaticExplainJob
(
SSchedulerReq
*
pReq
,
int64_t
*
job
,
bool
sync
);
int32_t
schExecJobImpl
(
SSchedulerReq
*
pReq
,
SSchJob
*
pJob
,
bool
sync
);
int32_t
schUpdateJobStatus
(
SSchJob
*
pJob
,
int8_t
newStatus
);
int32_t
schCancelJob
(
SSchJob
*
pJob
);
int32_t
schProcessOnJobDropped
(
SSchJob
*
pJob
,
int32_t
errCode
);
uint64_t
schGenTaskId
(
void
);
void
schCloseJobRef
(
void
);
int32_t
schExecJob
(
SSchedulerReq
*
pReq
,
int64_t
*
pJob
,
SQueryResult
*
pRes
);
int32_t
schAsyncExecJob
(
SSchedulerReq
*
pReq
,
int64_t
*
pJob
);
int32_t
schFetchRows
(
SSchJob
*
pJob
);
int32_t
sch
AsyncFetchRows
(
SSchJob
*
pJob
);
int32_t
sch
Job
FetchRows
(
SSchJob
*
pJob
);
int32_t
sch
JobFetchRowsA
(
SSchJob
*
pJob
);
int32_t
schUpdateTaskHandle
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
bool
dropExecNode
,
void
*
handle
,
int32_t
execId
);
int32_t
schProcessOnTaskStatusRsp
(
SQueryNodeEpId
*
pEpId
,
SArray
*
pStatusList
);
void
schFreeSMsgSendInfo
(
SMsgSendInfo
*
msgSendInfo
);
char
*
schGetOpStr
(
SCH_OP_TYPE
type
);
int32_t
schBeginOperation
(
SSchJob
*
pJob
,
SCH_OP_TYPE
type
,
bool
sync
);
int32_t
schInitJob
(
SSchedulerReq
*
pReq
,
SSchJob
**
pSchJob
);
int32_t
schSetJobQueryRes
(
SSchJob
*
pJob
,
SQueryResult
*
pRes
);
int32_t
schInitJob
(
int64_t
*
pJobId
,
SSchedulerReq
*
pReq
);
int32_t
schExecJob
(
SSchJob
*
pJob
,
SSchedulerReq
*
pReq
);
int32_t
schDumpJobExecRes
(
SSchJob
*
pJob
,
SExecResult
*
pRes
);
int32_t
schUpdateTaskCandidateAddr
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SEpSet
*
pEpSet
);
int32_t
schHandleRedirect
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SDataBuf
*
pData
,
int32_t
rspCode
);
void
schProcessOnOpEnd
(
SSchJob
*
pJob
,
SCH_OP_TYPE
type
,
SSchedulerReq
*
pReq
,
int32_t
errCode
);
int32_t
schProcessOnOpBegin
(
SSchJob
*
pJob
,
SCH_OP_TYPE
type
,
SSchedulerReq
*
pReq
);
void
schProcessOnCbEnd
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
int32_t
errCode
);
int32_t
schProcessOnCbBegin
(
SSchJob
**
job
,
SSchTask
**
task
,
uint64_t
qId
,
int64_t
rId
,
uint64_t
tId
);
void
schDropTaskOnExecNode
(
SSchJob
*
pJob
,
SSchTask
*
pTask
);
bool
schJobDone
(
SSchJob
*
pJob
);
int32_t
schRemoveTaskFromExecList
(
SSchJob
*
pJob
,
SSchTask
*
pTask
);
int32_t
schLaunchJobLowerLevel
(
SSchJob
*
pJob
,
SSchTask
*
pTask
);
int32_t
schSwitchJobStatus
(
SSchJob
*
pJob
,
int32_t
status
,
void
*
param
);
int32_t
schHandleOpBeginEvent
(
int64_t
jobId
,
SSchJob
**
job
,
SCH_OP_TYPE
type
,
SSchedulerReq
*
pReq
);
int32_t
schHandleOpEndEvent
(
SSchJob
*
pJob
,
SCH_OP_TYPE
type
,
SSchedulerReq
*
pReq
,
int32_t
errCode
);
int32_t
schHandleTaskRetry
(
SSchJob
*
pJob
,
SSchTask
*
pTask
);
void
schUpdateJobErrCode
(
SSchJob
*
pJob
,
int32_t
errCode
);
int32_t
schTaskCheckSetRetry
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
int32_t
errCode
,
bool
*
needRetry
);
int32_t
schProcessOnJobFailure
(
SSchJob
*
pJob
,
int32_t
errCode
);
int32_t
schProcessOnJobPartialSuccess
(
SSchJob
*
pJob
);
void
schFreeTask
(
SSchJob
*
pJob
,
SSchTask
*
pTask
);
void
schDropTaskInHashList
(
SSchJob
*
pJob
,
SHashObj
*
list
);
int32_t
schLaunchLevelTasks
(
SSchJob
*
pJob
,
SSchLevel
*
level
);
int32_t
schGetTaskFromList
(
SHashObj
*
pTaskList
,
uint64_t
taskId
,
SSchTask
**
pTask
);
int32_t
schInitTask
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SSubplan
*
pPlan
,
SSchLevel
*
pLevel
);
#ifdef __cplusplus
...
...
source/libs/scheduler/src/schDbg.c
浏览文件 @
29949a96
...
...
@@ -14,16 +14,16 @@
*/
#include "query.h"
#include "sch
eduler
Int.h"
#include "schInt.h"
tsem_t
schdRspSem
;
void
schdExecCallback
(
S
Query
Result
*
pResult
,
void
*
param
,
int32_t
code
)
{
void
schdExecCallback
(
S
Exec
Result
*
pResult
,
void
*
param
,
int32_t
code
)
{
if
(
code
)
{
pResult
->
code
=
code
;
}
*
(
S
Query
Result
*
)
param
=
*
pResult
;
*
(
S
Exec
Result
*
)
param
=
*
pResult
;
taosMemoryFree
(
pResult
);
...
...
source/libs/scheduler/src/schFlowCtrl.c
浏览文件 @
29949a96
...
...
@@ -13,7 +13,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "sch
eduler
Int.h"
#include "schInt.h"
#include "tmsg.h"
#include "query.h"
#include "catalog.h"
...
...
source/libs/scheduler/src/schJob.c
浏览文件 @
29949a96
...
...
@@ -16,135 +16,11 @@
#include "catalog.h"
#include "command.h"
#include "query.h"
#include "sch
eduler
Int.h"
#include "schInt.h"
#include "tmsg.h"
#include "tref.h"
#include "trpc.h"
FORCE_INLINE
SSchJob
*
schAcquireJob
(
int64_t
refId
)
{
qDebug
(
"sch acquire jobId:0x%"
PRIx64
,
refId
);
return
(
SSchJob
*
)
taosAcquireRef
(
schMgmt
.
jobRef
,
refId
);
}
FORCE_INLINE
int32_t
schReleaseJob
(
int64_t
refId
)
{
qDebug
(
"sch release jobId:0x%"
PRIx64
,
refId
);
return
taosReleaseRef
(
schMgmt
.
jobRef
,
refId
);
}
int32_t
schInitTask
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SSubplan
*
pPlan
,
SSchLevel
*
pLevel
)
{
pTask
->
plan
=
pPlan
;
pTask
->
level
=
pLevel
;
pTask
->
execId
=
-
1
;
pTask
->
maxExecTimes
=
SCH_TASK_MAX_EXEC_TIMES
;
pTask
->
timeoutUsec
=
SCH_DEFAULT_TASK_TIMEOUT_USEC
;
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_NOT_START
);
pTask
->
taskId
=
schGenTaskId
();
pTask
->
execNodes
=
taosHashInit
(
SCH_MAX_CANDIDATE_EP_NUM
,
taosGetDefaultHashFunction
(
TSDB_DATA_TYPE_INT
),
true
,
HASH_NO_LOCK
);
if
(
NULL
==
pTask
->
execNodes
)
{
SCH_TASK_ELOG
(
"taosHashInit %d execNodes failed"
,
SCH_MAX_CANDIDATE_EP_NUM
);
SCH_ERR_RET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
return
TSDB_CODE_SUCCESS
;
}
int32_t
schInitJob
(
SSchedulerReq
*
pReq
,
SSchJob
**
pSchJob
)
{
int32_t
code
=
0
;
int64_t
refId
=
-
1
;
SSchJob
*
pJob
=
taosMemoryCalloc
(
1
,
sizeof
(
SSchJob
));
if
(
NULL
==
pJob
)
{
qError
(
"QID:0x%"
PRIx64
" calloc %d failed"
,
pReq
->
pDag
->
queryId
,
(
int32_t
)
sizeof
(
SSchJob
));
SCH_ERR_RET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
pJob
->
attr
.
explainMode
=
pReq
->
pDag
->
explainInfo
.
mode
;
pJob
->
conn
=
*
pReq
->
pConn
;
pJob
->
sql
=
pReq
->
sql
;
pJob
->
pDag
=
pReq
->
pDag
;
pJob
->
chkKillFp
=
pReq
->
chkKillFp
;
pJob
->
chkKillParam
=
pReq
->
chkKillParam
;
pJob
->
userRes
.
execFp
=
pReq
->
execFp
;
pJob
->
userRes
.
userParam
=
pReq
->
execParam
;
if
(
pReq
->
pNodeList
==
NULL
||
taosArrayGetSize
(
pReq
->
pNodeList
)
<=
0
)
{
qDebug
(
"QID:0x%"
PRIx64
" input exec nodeList is empty"
,
pReq
->
pDag
->
queryId
);
}
else
{
pJob
->
nodeList
=
taosArrayDup
(
pReq
->
pNodeList
);
}
pJob
->
taskList
=
taosHashInit
(
pReq
->
pDag
->
numOfSubplans
,
taosGetDefaultHashFunction
(
TSDB_DATA_TYPE_UBIGINT
),
false
,
HASH_ENTRY_LOCK
);
if
(
NULL
==
pJob
->
taskList
)
{
SCH_JOB_ELOG
(
"taosHashInit %d taskList failed"
,
pReq
->
pDag
->
numOfSubplans
);
SCH_ERR_JRET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
SCH_ERR_JRET
(
schValidateAndBuildJob
(
pReq
->
pDag
,
pJob
));
if
(
SCH_IS_EXPLAIN_JOB
(
pJob
))
{
SCH_ERR_JRET
(
qExecExplainBegin
(
pReq
->
pDag
,
&
pJob
->
explainCtx
,
pReq
->
startTs
));
}
pJob
->
execTasks
=
taosHashInit
(
pReq
->
pDag
->
numOfSubplans
,
taosGetDefaultHashFunction
(
TSDB_DATA_TYPE_UBIGINT
),
false
,
HASH_ENTRY_LOCK
);
if
(
NULL
==
pJob
->
execTasks
)
{
SCH_JOB_ELOG
(
"taosHashInit %d execTasks failed"
,
pReq
->
pDag
->
numOfSubplans
);
SCH_ERR_JRET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
tsem_init
(
&
pJob
->
rspSem
,
0
,
0
);
refId
=
taosAddRef
(
schMgmt
.
jobRef
,
pJob
);
if
(
refId
<
0
)
{
SCH_JOB_ELOG
(
"taosAddRef job failed, error:%s"
,
tstrerror
(
terrno
));
SCH_ERR_JRET
(
terrno
);
}
atomic_add_fetch_32
(
&
schMgmt
.
jobNum
,
1
);
if
(
NULL
==
schAcquireJob
(
refId
))
{
SCH_JOB_ELOG
(
"schAcquireJob job failed, refId:0x%"
PRIx64
,
refId
);
SCH_ERR_JRET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
pJob
->
refId
=
refId
;
SCH_JOB_DLOG
(
"job refId:0x%"
PRIx64
" created"
,
pJob
->
refId
);
schUpdateJobStatus
(
pJob
,
JOB_TASK_STATUS_NOT_START
);
*
pSchJob
=
pJob
;
return
TSDB_CODE_SUCCESS
;
_return:
if
(
refId
<
0
)
{
schFreeJobImpl
(
pJob
);
}
else
{
taosRemoveRef
(
schMgmt
.
jobRef
,
refId
);
}
SCH_RET
(
code
);
}
void
schFreeTask
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
schDeregisterTaskHb
(
pJob
,
pTask
);
if
(
pTask
->
candidateAddrs
)
{
taosArrayDestroy
(
pTask
->
candidateAddrs
);
}
taosMemoryFreeClear
(
pTask
->
msg
);
if
(
pTask
->
children
)
{
taosArrayDestroy
(
pTask
->
children
);
}
if
(
pTask
->
parents
)
{
taosArrayDestroy
(
pTask
->
parents
);
}
if
(
pTask
->
execNodes
)
{
taosHashCleanup
(
pTask
->
execNodes
);
}
}
void
schUpdateJobErrCode
(
SSchJob
*
pJob
,
int32_t
errCode
)
{
if
(
TSDB_CODE_SUCCESS
==
errCode
)
{
return
;
...
...
@@ -175,7 +51,12 @@ _return:
SCH_JOB_DLOG
(
"job errCode updated to %x - %s"
,
errCode
,
tstrerror
(
errCode
));
}
bool
schJobDone
(
SSchJob
*
pJob
)
{
int8_t
status
=
SCH_GET_JOB_STATUS
(
pJob
);
return
(
status
==
JOB_TASK_STATUS_FAIL
||
status
==
JOB_TASK_STATUS_DROP
||
status
==
JOB_TASK_STATUS_SUCC
);
}
FORCE_INLINE
bool
schJobNeedToStop
(
SSchJob
*
pJob
,
int8_t
*
pStatus
)
{
int8_t
status
=
SCH_GET_JOB_STATUS
(
pJob
);
...
...
@@ -183,13 +64,16 @@ FORCE_INLINE bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus) {
*
pStatus
=
status
;
}
if
(
schJobDone
(
pJob
))
{
return
true
;
}
if
((
*
pJob
->
chkKillFp
)(
pJob
->
chkKillParam
))
{
schUpdateJobErrCode
(
pJob
,
TSDB_CODE_TSC_QUERY_KILLED
);
return
true
;
}
return
(
status
==
JOB_TASK_STATUS_FAILED
||
status
==
JOB_TASK_STATUS_DROPPING
||
status
==
JOB_TASK_STATUS_SUCCEED
);
return
false
;
}
int32_t
schUpdateJobStatus
(
SSchJob
*
pJob
,
int8_t
newStatus
)
{
...
...
@@ -201,48 +85,44 @@ int32_t schUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
oriStatus
=
SCH_GET_JOB_STATUS
(
pJob
);
if
(
oriStatus
==
newStatus
)
{
if
(
newStatus
==
JOB_TASK_STATUS_DROPPING
)
{
SCH_ERR_JRET
(
TSDB_CODE_SCH_JOB_IS_DROPPING
);
}
SCH_ERR_JRET
(
TSDB_CODE_SCH_IGNORE_ERROR
);
}
switch
(
oriStatus
)
{
case
JOB_TASK_STATUS_NULL
:
if
(
newStatus
!=
JOB_TASK_STATUS_
NOT_STAR
T
)
{
if
(
newStatus
!=
JOB_TASK_STATUS_
INI
T
)
{
SCH_ERR_JRET
(
TSDB_CODE_QRY_APP_ERROR
);
}
break
;
case
JOB_TASK_STATUS_
NOT_STAR
T
:
if
(
newStatus
!=
JOB_TASK_STATUS_EXEC
UTING
&&
newStatus
!=
JOB_TASK_STATUS_DROPPING
)
{
case
JOB_TASK_STATUS_
INI
T
:
if
(
newStatus
!=
JOB_TASK_STATUS_EXEC
&&
newStatus
!=
JOB_TASK_STATUS_DROP
)
{
SCH_ERR_JRET
(
TSDB_CODE_QRY_APP_ERROR
);
}
break
;
case
JOB_TASK_STATUS_EXEC
UTING
:
if
(
newStatus
!=
JOB_TASK_STATUS_PART
IAL_SUCCEED
&&
newStatus
!=
JOB_TASK_STATUS_FAILED
&&
newStatus
!=
JOB_TASK_STATUS_DROP
PING
)
{
case
JOB_TASK_STATUS_EXEC
:
if
(
newStatus
!=
JOB_TASK_STATUS_PART
_SUCC
&&
newStatus
!=
JOB_TASK_STATUS_FAIL
&&
newStatus
!=
JOB_TASK_STATUS_DROP
)
{
SCH_ERR_JRET
(
TSDB_CODE_QRY_APP_ERROR
);
}
break
;
case
JOB_TASK_STATUS_PART
IAL_SUCCEED
:
if
(
newStatus
!=
JOB_TASK_STATUS_FAIL
ED
&&
newStatus
!=
JOB_TASK_STATUS_SUCCEED
&&
newStatus
!=
JOB_TASK_STATUS_DROP
PING
)
{
case
JOB_TASK_STATUS_PART
_SUCC
:
if
(
newStatus
!=
JOB_TASK_STATUS_FAIL
&&
newStatus
!=
JOB_TASK_STATUS_SUCC
&&
newStatus
!=
JOB_TASK_STATUS_DROP
)
{
SCH_ERR_JRET
(
TSDB_CODE_QRY_APP_ERROR
);
}
break
;
case
JOB_TASK_STATUS_SUCC
EED
:
case
JOB_TASK_STATUS_FAIL
ED
:
if
(
newStatus
!=
JOB_TASK_STATUS_DROP
PING
)
{
case
JOB_TASK_STATUS_SUCC
:
case
JOB_TASK_STATUS_FAIL
:
if
(
newStatus
!=
JOB_TASK_STATUS_DROP
)
{
SCH_ERR_JRET
(
TSDB_CODE_QRY_APP_ERROR
);
}
break
;
case
JOB_TASK_STATUS_DROP
PING
:
case
JOB_TASK_STATUS_DROP
:
SCH_ERR_JRET
(
TSDB_CODE_QRY_JOB_FREED
);
break
;
...
...
@@ -264,67 +144,11 @@ int32_t schUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
_return:
if
(
TSDB_CODE_SCH_IGNORE_ERROR
==
code
)
{
SCH_JOB_DLOG
(
"ignore job status update, from %s to %s"
,
jobTaskStatusStr
(
oriStatus
),
jobTaskStatusStr
(
newStatus
));
}
else
{
SCH_JOB_ELOG
(
"invalid job status update, from %s to %s"
,
jobTaskStatusStr
(
oriStatus
),
jobTaskStatusStr
(
newStatus
));
SCH_RET
(
code
);
}
void
schEndOperation
(
SSchJob
*
pJob
)
{
int32_t
op
=
atomic_load_32
(
&
pJob
->
opStatus
.
op
);
if
(
SCH_OP_NULL
==
op
)
{
SCH_JOB_DLOG
(
"job already not in any operation, status:%s"
,
jobTaskStatusStr
(
pJob
->
status
));
return
;
}
atomic_store_32
(
&
pJob
->
opStatus
.
op
,
SCH_OP_NULL
);
SCH_JOB_DLOG
(
"job end %s operation"
,
schGetOpStr
(
op
));
}
int32_t
schBeginOperation
(
SSchJob
*
pJob
,
SCH_OP_TYPE
type
,
bool
sync
)
{
int32_t
code
=
0
;
int8_t
status
=
0
;
if
(
schJobNeedToStop
(
pJob
,
&
status
))
{
SCH_JOB_ELOG
(
"abort op %s cause of job need to stop"
,
schGetOpStr
(
type
));
SCH_ERR_JRET
(
pJob
->
errCode
);
}
if
(
SCH_OP_NULL
!=
atomic_val_compare_exchange_32
(
&
pJob
->
opStatus
.
op
,
SCH_OP_NULL
,
type
))
{
SCH_JOB_ELOG
(
"job already in %s operation"
,
schGetOpStr
(
pJob
->
opStatus
.
op
));
SCH_ERR_JRET
(
TSDB_CODE_TSC_APP_ERROR
);
}
SCH_JOB_DLOG
(
"job start %s operation"
,
schGetOpStr
(
pJob
->
opStatus
.
op
));
pJob
->
opStatus
.
sync
=
sync
;
switch
(
type
)
{
case
SCH_OP_EXEC
:
SCH_ERR_JRET
(
schUpdateJobStatus
(
pJob
,
JOB_TASK_STATUS_EXECUTING
));
break
;
case
SCH_OP_FETCH
:
if
(
!
SCH_JOB_NEED_FETCH
(
pJob
))
{
SCH_JOB_ELOG
(
"no need to fetch data, status:%s"
,
SCH_GET_JOB_STATUS_STR
(
pJob
));
SCH_ERR_JRET
(
TSDB_CODE_QRY_APP_ERROR
);
}
if
(
status
!=
JOB_TASK_STATUS_PARTIAL_SUCCEED
)
{
SCH_JOB_ELOG
(
"job status error for fetch, status:%s"
,
jobTaskStatusStr
(
status
));
SCH_ERR_JRET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
break
;
default:
SCH_JOB_ELOG
(
"unknown operation type %d"
,
type
);
SCH_ERR_JRET
(
TSDB_CODE_TSC_APP_ERROR
);
}
return
TSDB_CODE_SUCCESS
;
_return:
schEndOperation
(
pJob
);
SCH_RET
(
code
);
}
...
...
@@ -406,86 +230,23 @@ int32_t schBuildTaskRalation(SSchJob *pJob, SHashObj *planToTask) {
}
SSchLevel
*
pLevel
=
taosArrayGet
(
pJob
->
levels
,
0
);
if
(
SCH_IS_QUERY_JOB
(
pJob
)
&&
pLevel
->
taskNum
>
1
)
{
if
(
SCH_IS_QUERY_JOB
(
pJob
))
{
if
(
pLevel
->
taskNum
>
1
)
{
SCH_JOB_ELOG
(
"invalid query plan, level:0, taskNum:%d"
,
pLevel
->
taskNum
);
SCH_ERR_RET
(
TSDB_CODE_SCH_INTERNAL_ERROR
);
}
return
TSDB_CODE_SUCCESS
;
}
int32_t
schRecordTaskSucceedNode
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
SQueryNodeAddr
*
addr
=
taosArrayGet
(
pTask
->
candidateAddrs
,
pTask
->
candidateIdx
);
if
(
NULL
==
addr
)
{
SCH_TASK_ELOG
(
"taosArrayGet candidate addr failed, idx:%d, size:%d"
,
pTask
->
candidateIdx
,
(
int32_t
)
taosArrayGetSize
(
pTask
->
candidateAddrs
));
SCH_ERR_RET
(
TSDB_CODE_SCH_INTERNAL_ERROR
);
}
pTask
->
succeedAddr
=
*
addr
;
return
TSDB_CODE_SUCCESS
;
}
int32_t
schAppendTaskExecNode
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SQueryNodeAddr
*
addr
,
int32_t
execId
)
{
SSchNodeInfo
nodeInfo
=
{.
addr
=
*
addr
,
.
handle
=
NULL
};
if
(
taosHashPut
(
pTask
->
execNodes
,
&
execId
,
sizeof
(
execId
),
&
nodeInfo
,
sizeof
(
nodeInfo
)))
{
SCH_TASK_ELOG
(
"taosHashPut nodeInfo to execNodes failed, errno:%d"
,
errno
);
SCH_ERR_RET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
SCH_TASK_DLOG
(
"task execNode added, execId:%d"
,
execId
);
return
TSDB_CODE_SUCCESS
;
}
int32_t
schDropTaskExecNode
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
void
*
handle
,
int32_t
execId
)
{
if
(
NULL
==
pTask
->
execNodes
)
{
return
TSDB_CODE_SUCCESS
;
}
if
(
taosHashRemove
(
pTask
->
execNodes
,
&
execId
,
sizeof
(
execId
)))
{
SCH_TASK_ELOG
(
"fail to remove execId %d from execNodeList"
,
execId
);
}
else
{
SCH_TASK_DLOG
(
"execId %d removed from execNodeList"
,
execId
);
}
if
(
execId
!=
pTask
->
execId
)
{
// ignore it
SCH_TASK_DLOG
(
"execId %d is not current execId %d"
,
execId
,
pTask
->
execId
);
SCH_RET
(
TSDB_CODE_SCH_IGNORE_ERROR
);
}
return
TSDB_CODE_SUCCESS
;
}
int32_t
schUpdateTaskExecNode
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
void
*
handle
,
int32_t
execId
)
{
if
(
taosHashGetSize
(
pTask
->
execNodes
)
<=
0
)
{
return
TSDB_CODE_SUCCESS
;
SSchTask
*
pTask
=
taosArrayGet
(
pLevel
->
subTasks
,
0
);
if
(
SUBPLAN_TYPE_MODIFY
!=
pTask
->
plan
->
subplanType
)
{
pJob
->
attr
.
needFetch
=
true
;
}
SSchNodeInfo
*
nodeInfo
=
taosHashGet
(
pTask
->
execNodes
,
&
execId
,
sizeof
(
execId
));
nodeInfo
->
handle
=
handle
;
SCH_TASK_DLOG
(
"handle updated to %p for execId %d"
,
handle
,
execId
);
return
TSDB_CODE_SUCCESS
;
}
int32_t
schUpdateTaskHandle
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
bool
dropExecNode
,
void
*
handle
,
int32_t
execId
)
{
if
(
dropExecNode
)
{
SCH_RET
(
schDropTaskExecNode
(
pJob
,
pTask
,
handle
,
execId
));
}
SCH_SET_TASK_HANDLE
(
pTask
,
handle
);
schUpdateTaskExecNode
(
pJob
,
pTask
,
handle
,
execId
);
return
TSDB_CODE_SUCCESS
;
}
int32_t
sch
RecordQuery
DataSrc
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
int32_t
sch
AppendJob
DataSrc
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
if
(
!
SCH_IS_DATA_SRC_QRY_TASK
(
pTask
))
{
return
TSDB_CODE_SUCCESS
;
}
...
...
@@ -539,7 +300,7 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) {
int32_t
taskNum
=
0
;
SSchLevel
*
pLevel
=
NULL
;
level
.
status
=
JOB_TASK_STATUS_
NOT_STAR
T
;
level
.
status
=
JOB_TASK_STATUS_
INI
T
;
for
(
int32_t
i
=
0
;
i
<
levelNum
;
++
i
)
{
if
(
NULL
==
taosArrayPush
(
pJob
->
levels
,
&
level
))
{
...
...
@@ -584,7 +345,7 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) {
SCH_ERR_JRET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
SCH_ERR_JRET
(
sch
RecordQuery
DataSrc
(
pJob
,
pTask
));
SCH_ERR_JRET
(
sch
AppendJob
DataSrc
(
pJob
,
pTask
));
if
(
0
!=
taosHashPut
(
planToTask
,
&
plan
,
POINTER_BYTES
,
&
pTask
,
POINTER_BYTES
))
{
SCH_TASK_ELOG
(
"taosHashPut to planToTaks failed, taskIdx:%d"
,
n
);
...
...
@@ -613,708 +374,173 @@ _return:
SCH_RET
(
code
);
}
int32_t
schSetAddrsFromNodeList
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
int32_t
addNum
=
0
;
int32_t
nodeNum
=
0
;
if
(
pJob
->
nodeList
)
{
nodeNum
=
taosArrayGetSize
(
pJob
->
nodeList
);
for
(
int32_t
i
=
0
;
i
<
nodeNum
&&
addNum
<
SCH_MAX_CANDIDATE_EP_NUM
;
++
i
)
{
SQueryNodeLoad
*
nload
=
taosArrayGet
(
pJob
->
nodeList
,
i
);
SQueryNodeAddr
*
naddr
=
&
nload
->
addr
;
if
(
NULL
==
taosArrayPush
(
pTask
->
candidateAddrs
,
naddr
))
{
SCH_TASK_ELOG
(
"taosArrayPush execNode to candidate addrs failed, addNum:%d, errno:%d"
,
addNum
,
errno
);
SCH_ERR_RET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
SCH_TASK_DLOG
(
"set %dth candidate addr, id %d, fqdn:%s, port:%d"
,
i
,
naddr
->
nodeId
,
SCH_GET_CUR_EP
(
naddr
)
->
fqdn
,
SCH_GET_CUR_EP
(
naddr
)
->
port
);
++
addNum
;
}
}
int32_t
schDumpJobExecRes
(
SSchJob
*
pJob
,
SExecResult
*
pRes
)
{
pRes
->
code
=
atomic_load_32
(
&
pJob
->
errCode
);
pRes
->
numOfRows
=
pJob
->
resNumOfRows
;
pRes
->
res
=
pJob
->
execRes
.
res
;
pRes
->
msgType
=
pJob
->
execRes
.
msgType
;
pJob
->
execRes
.
res
=
NULL
;
if
(
addNum
<=
0
)
{
SCH_TASK_ELOG
(
"no available execNode as candidates, nodeNum:%d"
,
nodeNum
);
SCH_ERR_RET
(
TSDB_CODE_TSC_NO_EXEC_NODE
);
}
SCH_JOB_DLOG
(
"execRes dumped, code: %s"
,
tstrerror
(
pRes
->
code
));
return
TSDB_CODE_SUCCESS
;
}
int32_t
schSetTaskCandidateAddrs
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
if
(
NULL
!=
pTask
->
candidateAddrs
)
{
return
TSDB_CODE_SUCCESS
;
int32_t
schDumpJobFetchRes
(
SSchJob
*
pJob
,
void
**
pData
)
{
int32_t
code
=
0
;
if
(
pJob
->
resData
&&
((
SRetrieveTableRsp
*
)
pJob
->
resData
)
->
completed
)
{
SCH_ERR_RET
(
schSwitchJobStatus
(
pJob
,
JOB_TASK_STATUS_SUCC
,
NULL
))
;
}
pTask
->
candidateIdx
=
0
;
pTask
->
candidateAddrs
=
taosArrayInit
(
SCH_MAX_CANDIDATE_EP_NUM
,
sizeof
(
SQueryNodeAddr
));
if
(
NULL
==
pTask
->
candidateAddrs
)
{
SCH_TASK_ELOG
(
"taosArrayInit %d condidate addrs failed"
,
SCH_MAX_CANDIDATE_EP_NUM
);
SCH_ERR_RET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
while
(
true
)
{
*
pData
=
atomic_load_ptr
(
&
pJob
->
resData
);
if
(
*
pData
!=
atomic_val_compare_exchange_ptr
(
&
pJob
->
resData
,
*
pData
,
NULL
))
{
continue
;
}
if
(
pTask
->
plan
->
execNode
.
epSet
.
numOfEps
>
0
)
{
if
(
NULL
==
taosArrayPush
(
pTask
->
candidateAddrs
,
&
pTask
->
plan
->
execNode
))
{
SCH_TASK_ELOG
(
"taosArrayPush execNode to candidate addrs failed, errno:%d"
,
errno
);
SCH_ERR_RET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
break
;
}
SCH_TASK_DLOG
(
"use execNode in plan as candidate addr, numOfEps:%d"
,
pTask
->
plan
->
execNode
.
epSet
.
numOfEps
);
return
TSDB_CODE_SUCCESS
;
if
(
NULL
==
*
pData
)
{
SRetrieveTableRsp
*
rsp
=
(
SRetrieveTableRsp
*
)
taosMemoryCalloc
(
1
,
sizeof
(
SRetrieveTableRsp
));
if
(
rsp
)
{
rsp
->
completed
=
1
;
}
if
(
SCH_IS_DATA_SRC_QRY_TASK
(
pTask
))
{
SCH_TASK_ELOG
(
"no execNode specifed for data src task, numOfEps:%d"
,
pTask
->
plan
->
execNode
.
epSet
.
numOfEps
);
SCH_ERR_RET
(
TSDB_CODE_QRY_APP_ERROR
);
*
pData
=
rsp
;
SCH_JOB_DLOG
(
"empty res and set query complete, code:%x"
,
code
);
}
SCH_ERR_RET
(
schSetAddrsFromNodeList
(
pJob
,
pTask
));
/*
for (int32_t i = 0; i < job->dataSrcEps.numOfEps && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) {
strncpy(epSet->fqdn[epSet->numOfEps], job->dataSrcEps.fqdn[i], sizeof(job->dataSrcEps.fqdn[i]));
epSet->port[epSet->numOfEps] = job->dataSrcEps.port[i];
++epSet->numOfEps;
}
*/
SCH_JOB_DLOG
(
"fetch done, totalRows:%d"
,
pJob
->
resNumOfRows
);
return
TSDB_CODE_SUCCESS
;
}
int32_t
sch
UpdateTaskCandidateAddr
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SEpSet
*
pEpSet
)
{
if
(
NULL
==
pTask
->
candidateAddrs
||
1
!=
taosArrayGetSize
(
pTask
->
candidateAddrs
))
{
SCH_TASK_ELOG
(
"not able to update cndidate addr, addr num %d"
,
(
int32_t
)(
pTask
->
candidateAddrs
?
taosArrayGetSize
(
pTask
->
candidateAddrs
)
:
0
));
SCH_ERR_RET
(
TSDB_CODE_APP_ERROR
);
int32_t
sch
NotifyUserExecRes
(
SSchJob
*
pJob
)
{
SExecResult
*
pRes
=
taosMemoryCalloc
(
1
,
sizeof
(
SExecResult
));
if
(
pRes
)
{
schDumpJobExecRes
(
pJob
,
pRes
);
}
SQueryNodeAddr
*
pAddr
=
taosArrayGet
(
pTask
->
candidateAddrs
,
0
);
SEp
*
pOld
=
&
pAddr
->
epSet
.
eps
[
pAddr
->
epSet
.
inUse
];
SEp
*
pNew
=
&
pEpSet
->
eps
[
pEpSet
->
inUse
];
SCH_TASK_DLOG
(
"update task ep from %s:%d to %s:%d"
,
pOld
->
fqdn
,
pOld
->
port
,
pNew
->
fqdn
,
pNew
->
port
);
memcpy
(
&
pAddr
->
epSet
,
pEpSet
,
sizeof
(
pAddr
->
epSet
));
SCH_JOB_DLOG
(
"sch start to invoke exec cb, code: %s"
,
tstrerror
(
pJob
->
errCode
));
(
*
pJob
->
userRes
.
execFp
)(
pRes
,
pJob
->
userRes
.
cbParam
,
atomic_load_32
(
&
pJob
->
errCode
));
SCH_JOB_DLOG
(
"sch end from exec cb, code: %s"
,
tstrerror
(
pJob
->
errCode
));
return
TSDB_CODE_SUCCESS
;
}
int32_t
schNotifyUserFetchRes
(
SSchJob
*
pJob
)
{
void
*
pRes
=
NULL
;
int32_t
schRemoveTaskFromExecList
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
int32_t
code
=
taosHashRemove
(
pJob
->
execTasks
,
&
pTask
->
taskId
,
sizeof
(
pTask
->
taskId
));
if
(
code
)
{
SCH_TASK_ELOG
(
"task failed to rm from execTask list, code:%x"
,
code
);
SCH_ERR_RET
(
TSDB_CODE_SCH_INTERNAL_ERROR
);
}
schDumpJobFetchRes
(
pJob
,
&
pRes
);
SCH_JOB_DLOG
(
"sch start to invoke fetch cb, code: %s"
,
tstrerror
(
pJob
->
errCode
));
(
*
pJob
->
userRes
.
fetchFp
)(
pRes
,
pJob
->
userRes
.
cbParam
,
atomic_load_32
(
&
pJob
->
errCode
));
SCH_JOB_DLOG
(
"sch end from fetch cb, code: %s"
,
tstrerror
(
pJob
->
errCode
));
return
TSDB_CODE_SUCCESS
;
}
int32_t
schPushTaskToExecList
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
int32_t
code
=
taosHashPut
(
pJob
->
execTasks
,
&
pTask
->
taskId
,
sizeof
(
pTask
->
taskId
),
&
pTask
,
POINTER_BYTES
);
if
(
0
!=
code
)
{
if
(
HASH_NODE_EXIST
(
code
))
{
SCH_TASK_ELOG
(
"task already in execTask list, code:%x"
,
code
);
SCH_ERR_RET
(
TSDB_CODE_SCH_INTERNAL_ERROR
);
void
schPostJobRes
(
SSchJob
*
pJob
,
SCH_OP_TYPE
op
)
{
if
(
SCH_OP_NULL
==
pJob
->
opStatus
.
op
)
{
SCH_JOB_DLOG
(
"job not in any operation, no need to post job res, status:%s"
,
jobTaskStatusStr
(
pJob
->
status
));
return
;
}
SCH_TASK_ELOG
(
"taosHashPut task to execTask list failed, errno:%d"
,
errno
);
SCH_ERR_RET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
if
(
op
&&
pJob
->
opStatus
.
op
!=
op
)
{
SCH_JOB_ELOG
(
"job in operation %s mis-match with expected %s"
,
schGetOpStr
(
pJob
->
opStatus
.
op
),
schGetOpStr
(
op
));
return
;
}
SCH_TASK_DLOG
(
"task added to execTask list, numOfTasks:%d"
,
taosHashGetSize
(
pJob
->
execTasks
));
return
TSDB_CODE_SUCCESS
;
}
/*
int32_t schMoveTaskToSuccList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
if (0 != taosHashRemove(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId))) {
SCH_TASK_WLOG("remove task from execTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
if
(
SCH_JOB_IN_SYNC_OP
(
pJob
))
{
tsem_post
(
&
pJob
->
rspSem
);
}
else
if
(
SCH_JOB_IN_ASYNC_EXEC_OP
(
pJob
))
{
schNotifyUserExecRes
(
pJob
);
}
else
if
(
SCH_JOB_IN_ASYNC_FETCH_OP
(
pJob
))
{
schNotifyUserFetchRes
(
pJob
);
}
else
{
SCH_
TASK_DLOG("task removed from execTask list, numOfTasks:%d", taosHashGetSize(pJob->execTask
s));
SCH_
JOB_ELOG
(
"job not in any operation, status:%s"
,
jobTaskStatusStr
(
pJob
->
statu
s
));
}
}
int32_t code = taosHashPut(pJob->succTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES);
if (0 != code) {
if (HASH_NODE_EXIST(code)) {
*moved = true;
SCH_TASK_ELOG("task already in succTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
SCH_TASK_ELOG("taosHashPut task to succTask list failed, errno:%d", errno);
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
*moved = true;
SCH_TASK_DLOG("task moved to succTask list, numOfTasks:%d", taosHashGetSize(pJob->succTasks));
return TSDB_CODE_SUCCESS;
}
int32_t schMoveTaskToFailList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
*moved = false;
if (0 != taosHashRemove(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId))) {
SCH_TASK_WLOG("remove task from execTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
}
int32_t code = taosHashPut(pJob->failTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES);
if (0 != code) {
if (HASH_NODE_EXIST(code)) {
*moved = true;
SCH_TASK_WLOG("task already in failTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
SCH_TASK_ELOG("taosHashPut task to failTask list failed, errno:%d", errno);
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
*moved = true;
SCH_TASK_DLOG("task moved to failTask list, numOfTasks:%d", taosHashGetSize(pJob->failTasks));
return TSDB_CODE_SUCCESS;
}
int32_t schMoveTaskToExecList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
if (0 != taosHashRemove(pJob->succTasks, &pTask->taskId, sizeof(pTask->taskId))) {
SCH_TASK_WLOG("remove task from succTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
}
int32_t code = taosHashPut(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES);
if (0 != code) {
if (HASH_NODE_EXIST(code)) {
*moved = true;
SCH_TASK_ELOG("task already in execTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
SCH_TASK_ELOG("taosHashPut task to execTask list failed, errno:%d", errno);
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
*moved = true;
SCH_TASK_DLOG("task moved to execTask list, numOfTasks:%d", taosHashGetSize(pJob->execTasks));
return TSDB_CODE_SUCCESS;
}
*/
int32_t
schTaskCheckSetRetry
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
int32_t
errCode
,
bool
*
needRetry
)
{
if
(
TSDB_CODE_SCH_TIMEOUT_ERROR
==
errCode
)
{
pTask
->
maxExecTimes
++
;
if
(
pTask
->
timeoutUsec
<
SCH_MAX_TASK_TIMEOUT_USEC
)
{
pTask
->
timeoutUsec
*=
2
;
if
(
pTask
->
timeoutUsec
>
SCH_MAX_TASK_TIMEOUT_USEC
)
{
pTask
->
timeoutUsec
=
SCH_MAX_TASK_TIMEOUT_USEC
;
}
}
}
if
((
pTask
->
execId
+
1
)
>=
pTask
->
maxExecTimes
)
{
*
needRetry
=
false
;
SCH_TASK_DLOG
(
"task no more retry since reach max try times, execId:%d"
,
pTask
->
execId
);
return
TSDB_CODE_SUCCESS
;
}
if
(
!
SCH_NEED_RETRY
(
pTask
->
lastMsgType
,
errCode
))
{
*
needRetry
=
false
;
SCH_TASK_DLOG
(
"task no more retry cause of errCode, errCode:%x - %s"
,
errCode
,
tstrerror
(
errCode
));
return
TSDB_CODE_SUCCESS
;
}
if
(
SCH_IS_DATA_SRC_TASK
(
pTask
))
{
if
((
pTask
->
execId
+
1
)
>=
SCH_TASK_NUM_OF_EPS
(
&
pTask
->
plan
->
execNode
))
{
*
needRetry
=
false
;
SCH_TASK_DLOG
(
"task no more retry since all ep tried, execId:%d, epNum:%d"
,
pTask
->
execId
,
SCH_TASK_NUM_OF_EPS
(
&
pTask
->
plan
->
execNode
));
return
TSDB_CODE_SUCCESS
;
}
}
else
{
int32_t
candidateNum
=
taosArrayGetSize
(
pTask
->
candidateAddrs
);
if
((
pTask
->
candidateIdx
+
1
)
>=
candidateNum
&&
(
TSDB_CODE_SCH_TIMEOUT_ERROR
!=
errCode
))
{
*
needRetry
=
false
;
SCH_TASK_DLOG
(
"task no more retry since all candiates tried, candidateIdx:%d, candidateNum:%d"
,
pTask
->
candidateIdx
,
candidateNum
);
return
TSDB_CODE_SUCCESS
;
}
}
*
needRetry
=
true
;
SCH_TASK_DLOG
(
"task need the %dth retry, errCode:%x - %s"
,
pTask
->
execId
+
1
,
errCode
,
tstrerror
(
errCode
));
return
TSDB_CODE_SUCCESS
;
}
int32_t
schHandleTaskRetry
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
atomic_sub_fetch_32
(
&
pTask
->
level
->
taskLaunchedNum
,
1
);
SCH_ERR_RET
(
schRemoveTaskFromExecList
(
pJob
,
pTask
));
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_NOT_START
);
if
(
SCH_TASK_NEED_FLOW_CTRL
(
pJob
,
pTask
))
{
SCH_ERR_RET
(
schLaunchTasksInFlowCtrlList
(
pJob
,
pTask
));
}
schDeregisterTaskHb
(
pJob
,
pTask
);
if
(
SCH_IS_DATA_SRC_TASK
(
pTask
))
{
SCH_SWITCH_EPSET
(
&
pTask
->
plan
->
execNode
);
}
else
{
int32_t
candidateNum
=
taosArrayGetSize
(
pTask
->
candidateAddrs
);
if
(
++
pTask
->
candidateIdx
>=
candidateNum
)
{
pTask
->
candidateIdx
=
0
;
}
}
SCH_ERR_RET
(
schLaunchTask
(
pJob
,
pTask
));
return
TSDB_CODE_SUCCESS
;
}
int32_t
schSetJobQueryRes
(
SSchJob
*
pJob
,
SQueryResult
*
pRes
)
{
pRes
->
code
=
atomic_load_32
(
&
pJob
->
errCode
);
pRes
->
numOfRows
=
pJob
->
resNumOfRows
;
pRes
->
res
=
pJob
->
execRes
;
pJob
->
execRes
.
res
=
NULL
;
return
TSDB_CODE_SUCCESS
;
}
int32_t
schSetJobFetchRes
(
SSchJob
*
pJob
,
void
**
pData
)
{
int32_t
code
=
0
;
if
(
pJob
->
resData
&&
((
SRetrieveTableRsp
*
)
pJob
->
resData
)
->
completed
)
{
SCH_ERR_RET
(
schUpdateJobStatus
(
pJob
,
JOB_TASK_STATUS_SUCCEED
));
}
while
(
true
)
{
*
pData
=
atomic_load_ptr
(
&
pJob
->
resData
);
if
(
*
pData
!=
atomic_val_compare_exchange_ptr
(
&
pJob
->
resData
,
*
pData
,
NULL
))
{
continue
;
}
break
;
}
if
(
NULL
==
*
pData
)
{
SRetrieveTableRsp
*
rsp
=
(
SRetrieveTableRsp
*
)
taosMemoryCalloc
(
1
,
sizeof
(
SRetrieveTableRsp
));
if
(
rsp
)
{
rsp
->
completed
=
1
;
}
*
pData
=
rsp
;
SCH_JOB_DLOG
(
"empty res and set query complete, code:%x"
,
code
);
}
SCH_JOB_DLOG
(
"fetch done, totalRows:%d"
,
pJob
->
resNumOfRows
);
return
TSDB_CODE_SUCCESS
;
}
int32_t
schNotifyUserExecRes
(
SSchJob
*
pJob
)
{
SQueryResult
*
pRes
=
taosMemoryCalloc
(
1
,
sizeof
(
SQueryResult
));
if
(
pRes
)
{
schSetJobQueryRes
(
pJob
,
pRes
);
}
schEndOperation
(
pJob
);
SCH_JOB_DLOG
(
"sch start to invoke exec cb, code: %s"
,
tstrerror
(
pJob
->
errCode
));
(
*
pJob
->
userRes
.
execFp
)(
pRes
,
pJob
->
userRes
.
userParam
,
atomic_load_32
(
&
pJob
->
errCode
));
SCH_JOB_DLOG
(
"sch end from query cb, code: %s"
,
tstrerror
(
pJob
->
errCode
));
return
TSDB_CODE_SUCCESS
;
}
int32_t
schNotifyUserFetchRes
(
SSchJob
*
pJob
)
{
void
*
pRes
=
NULL
;
schSetJobFetchRes
(
pJob
,
&
pRes
);
schEndOperation
(
pJob
);
SCH_JOB_DLOG
(
"sch start to invoke fetch cb, code: %s"
,
tstrerror
(
pJob
->
errCode
));
(
*
pJob
->
userRes
.
fetchFp
)(
pRes
,
pJob
->
userRes
.
userParam
,
atomic_load_32
(
&
pJob
->
errCode
));
SCH_JOB_DLOG
(
"sch end from fetch cb, code: %s"
,
tstrerror
(
pJob
->
errCode
));
return
TSDB_CODE_SUCCESS
;
}
void
schPostJobRes
(
SSchJob
*
pJob
,
SCH_OP_TYPE
op
)
{
if
(
SCH_OP_NULL
==
pJob
->
opStatus
.
op
)
{
SCH_JOB_DLOG
(
"job not in any op, no need to post job res, status:%s"
,
jobTaskStatusStr
(
pJob
->
status
));
return
;
}
if
(
op
&&
pJob
->
opStatus
.
op
!=
op
)
{
SCH_JOB_ELOG
(
"job in op %s mis-match with expected %s"
,
schGetOpStr
(
pJob
->
opStatus
.
op
),
schGetOpStr
(
op
));
return
;
}
if
(
SCH_JOB_IN_SYNC_OP
(
pJob
))
{
tsem_post
(
&
pJob
->
rspSem
);
}
else
if
(
SCH_JOB_IN_ASYNC_EXEC_OP
(
pJob
))
{
schNotifyUserExecRes
(
pJob
);
}
else
if
(
SCH_JOB_IN_ASYNC_FETCH_OP
(
pJob
))
{
schNotifyUserFetchRes
(
pJob
);
}
else
{
SCH_JOB_ELOG
(
"job not in any operation, status:%s"
,
jobTaskStatusStr
(
pJob
->
status
));
}
}
int32_t
schProcessOnJobFailureImpl
(
SSchJob
*
pJob
,
int32_t
status
,
int32_t
errCode
)
{
// if already FAILED, no more processing
SCH_ERR_RET
(
schUpdateJobStatus
(
pJob
,
status
));
schUpdateJobErrCode
(
pJob
,
errCode
);
int32_t
code
=
atomic_load_32
(
&
pJob
->
errCode
);
if
(
code
)
{
SCH_JOB_DLOG
(
"job failed with error: %s"
,
tstrerror
(
code
));
int32_t
schProcessOnJobFailureImpl
(
SSchJob
*
pJob
,
int32_t
status
,
int32_t
errCode
)
{
schUpdateJobErrCode
(
pJob
,
errCode
);
int32_t
code
=
atomic_load_32
(
&
pJob
->
errCode
);
if
(
code
)
{
SCH_JOB_DLOG
(
"job failed with error: %s"
,
tstrerror
(
code
));
}
schPostJobRes
(
pJob
,
0
);
SCH_RET
(
code
);
SCH_RET
(
TSDB_CODE_SCH_IGNORE_ERROR
);
}
// Note: no more task error processing, handled in function internal
int32_t
schProcessOnJobFailure
(
SSchJob
*
pJob
,
int32_t
errCode
)
{
SCH_RET
(
schProcessOnJobFailureImpl
(
pJob
,
JOB_TASK_STATUS_FAILED
,
errCode
));
if
(
TSDB_CODE_SCH_IGNORE_ERROR
==
errCode
)
{
return
TSDB_CODE_SCH_IGNORE_ERROR
;
}
schProcessOnJobFailureImpl
(
pJob
,
JOB_TASK_STATUS_FAIL
,
errCode
);
return
TSDB_CODE_SCH_IGNORE_ERROR
;
}
// Note: no more error processing, handled in function internal
int32_t
schProcessOnJobDropped
(
SSchJob
*
pJob
,
int32_t
errCode
)
{
SCH_RET
(
schProcessOnJobFailureImpl
(
pJob
,
JOB_TASK_STATUS_DROP
PING
,
errCode
));
SCH_RET
(
schProcessOnJobFailureImpl
(
pJob
,
JOB_TASK_STATUS_DROP
,
errCode
));
}
// Note: no more task error processing, handled in function internal
int32_t
schProcessOnJobPartialSuccess
(
SSchJob
*
pJob
)
{
int32_t
code
=
0
;
SCH_ERR_RET
(
schUpdateJobStatus
(
pJob
,
JOB_TASK_STATUS_PARTIAL_SUCCEED
));
schPostJobRes
(
pJob
,
SCH_OP_EXEC
);
return
TSDB_CODE_SUCCESS
;
_return:
SCH_RET
(
schProcessOnJobFailure
(
pJob
,
code
));
}
void
schProcessOnDataFetched
(
SSchJob
*
pJob
)
{
schPostJobRes
(
pJob
,
SCH_OP_FETCH
);
}
// Note: no more task error processing, handled in function internal
int32_t
schProcessOnTaskFailure
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
int32_t
errCode
)
{
int8_t
status
=
0
;
if
(
errCode
==
TSDB_CODE_SCH_TIMEOUT_ERROR
)
{
SCH_LOG_TASK_WAIT_TS
(
pTask
);
}
else
{
SCH_LOG_TASK_END_TS
(
pTask
);
}
if
(
schJobNeedToStop
(
pJob
,
&
status
))
{
SCH_TASK_DLOG
(
"task failed not processed cause of job status, job status:%s"
,
jobTaskStatusStr
(
status
));
SCH_RET
(
atomic_load_32
(
&
pJob
->
errCode
));
}
bool
needRetry
=
false
;
bool
moved
=
false
;
int32_t
taskDone
=
0
;
int32_t
code
=
0
;
SCH_TASK_DLOG
(
"taskOnFailure, code:%s"
,
tstrerror
(
errCode
));
SCH_ERR_JRET
(
schTaskCheckSetRetry
(
pJob
,
pTask
,
errCode
,
&
needRetry
));
if
(
!
needRetry
)
{
SCH_TASK_ELOG
(
"task failed and no more retry, code:%s"
,
tstrerror
(
errCode
));
if
(
SCH_GET_TASK_STATUS
(
pTask
)
!=
JOB_TASK_STATUS_EXECUTING
)
{
SCH_TASK_ELOG
(
"task not in executing list, status:%s"
,
SCH_GET_TASK_STATUS_STR
(
pTask
));
SCH_ERR_JRET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_FAILED
);
if
(
SCH_IS_WAIT_ALL_JOB
(
pJob
))
{
SCH_LOCK
(
SCH_WRITE
,
&
pTask
->
level
->
lock
);
pTask
->
level
->
taskFailed
++
;
taskDone
=
pTask
->
level
->
taskSucceed
+
pTask
->
level
->
taskFailed
;
SCH_UNLOCK
(
SCH_WRITE
,
&
pTask
->
level
->
lock
);
schUpdateJobErrCode
(
pJob
,
errCode
);
if
(
taskDone
<
pTask
->
level
->
taskNum
)
{
SCH_TASK_DLOG
(
"need to wait other tasks, doneNum:%d, allNum:%d"
,
taskDone
,
pTask
->
level
->
taskNum
);
SCH_RET
(
errCode
);
}
}
}
else
{
SCH_ERR_JRET
(
schHandleTaskRetry
(
pJob
,
pTask
));
return
TSDB_CODE_SUCCESS
;
}
_return:
SCH_RET
(
schProcessOnJobFailure
(
pJob
,
errCode
));
}
int32_t
schLaunchNextLevelTasks
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
if
(
!
SCH_IS_QUERY_JOB
(
pJob
))
{
return
TSDB_CODE_SUCCESS
;
}
SSchLevel
*
pLevel
=
pTask
->
level
;
int32_t
doneNum
=
atomic_add_fetch_32
(
&
pLevel
->
taskDoneNum
,
1
);
if
(
doneNum
==
pLevel
->
taskNum
)
{
pJob
->
levelIdx
--
;
pLevel
=
taosArrayGet
(
pJob
->
levels
,
pJob
->
levelIdx
);
for
(
int32_t
i
=
0
;
i
<
pLevel
->
taskNum
;
++
i
)
{
SSchTask
*
pTask
=
taosArrayGet
(
pLevel
->
subTasks
,
i
);
if
(
pTask
->
children
&&
taosArrayGetSize
(
pTask
->
children
)
>
0
)
{
continue
;
}
SCH_ERR_RET
(
schLaunchTask
(
pJob
,
pTask
));
}
}
return
TSDB_CODE_SUCCESS
;
}
// Note: no more task error processing, handled in function internal
int32_t
schProcessOnTaskSuccess
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
bool
moved
=
false
;
int32_t
code
=
0
;
SCH_TASK_DLOG
(
"taskOnSuccess, status:%s"
,
SCH_GET_TASK_STATUS_STR
(
pTask
));
SCH_LOG_TASK_END_TS
(
pTask
);
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_PARTIAL_SUCCEED
);
SCH_ERR_JRET
(
schRecordTaskSucceedNode
(
pJob
,
pTask
));
SCH_ERR_JRET
(
schLaunchTasksInFlowCtrlList
(
pJob
,
pTask
));
int32_t
parentNum
=
pTask
->
parents
?
(
int32_t
)
taosArrayGetSize
(
pTask
->
parents
)
:
0
;
if
(
parentNum
==
0
)
{
int32_t
taskDone
=
0
;
if
(
SCH_IS_WAIT_ALL_JOB
(
pJob
))
{
SCH_LOCK
(
SCH_WRITE
,
&
pTask
->
level
->
lock
);
pTask
->
level
->
taskSucceed
++
;
taskDone
=
pTask
->
level
->
taskSucceed
+
pTask
->
level
->
taskFailed
;
SCH_UNLOCK
(
SCH_WRITE
,
&
pTask
->
level
->
lock
);
if
(
taskDone
<
pTask
->
level
->
taskNum
)
{
SCH_TASK_DLOG
(
"wait all tasks, done:%d, all:%d"
,
taskDone
,
pTask
->
level
->
taskNum
);
return
TSDB_CODE_SUCCESS
;
}
else
if
(
taskDone
>
pTask
->
level
->
taskNum
)
{
SCH_TASK_ELOG
(
"taskDone number invalid, done:%d, total:%d"
,
taskDone
,
pTask
->
level
->
taskNum
);
}
if
(
pTask
->
level
->
taskFailed
>
0
)
{
SCH_RET
(
schProcessOnJobFailure
(
pJob
,
0
));
}
else
{
SCH_RET
(
schProcessOnJobPartialSuccess
(
pJob
));
}
}
else
{
pJob
->
resNode
=
pTask
->
succeedAddr
;
}
pJob
->
fetchTask
=
pTask
;
SCH_RET
(
schProcessOnJobPartialSuccess
(
pJob
));
}
/*
if (SCH_IS_DATA_SRC_TASK(task) && job->dataSrcEps.numOfEps < SCH_MAX_CANDIDATE_EP_NUM) {
strncpy(job->dataSrcEps.fqdn[job->dataSrcEps.numOfEps], task->execAddr.fqdn, sizeof(task->execAddr.fqdn));
job->dataSrcEps.port[job->dataSrcEps.numOfEps] = task->execAddr.port;
++job->dataSrcEps.numOfEps;
}
*/
for
(
int32_t
i
=
0
;
i
<
parentNum
;
++
i
)
{
SSchTask
*
parent
=
*
(
SSchTask
**
)
taosArrayGet
(
pTask
->
parents
,
i
);
int32_t
readyNum
=
atomic_add_fetch_32
(
&
parent
->
childReady
,
1
);
SCH_LOCK
(
SCH_WRITE
,
&
parent
->
lock
);
SDownstreamSourceNode
source
=
{.
type
=
QUERY_NODE_DOWNSTREAM_SOURCE
,
.
taskId
=
pTask
->
taskId
,
.
schedId
=
schMgmt
.
sId
,
.
execId
=
pTask
->
execId
,
.
addr
=
pTask
->
succeedAddr
};
qSetSubplanExecutionNode
(
parent
->
plan
,
pTask
->
plan
->
id
.
groupId
,
&
source
);
SCH_UNLOCK
(
SCH_WRITE
,
&
parent
->
lock
);
if
(
SCH_TASK_READY_FOR_LAUNCH
(
readyNum
,
parent
))
{
SCH_TASK_DLOG
(
"all %d children task done, start to launch parent task 0x%"
PRIx64
,
readyNum
,
parent
->
taskId
);
SCH_ERR_RET
(
schLaunchTask
(
pJob
,
parent
));
}
}
SCH_ERR_RET
(
schLaunchNextLevelTasks
(
pJob
,
pTask
));
return
TSDB_CODE_SUCCESS
;
_return:
SCH_RET
(
schProcessOnJobFailure
(
pJob
,
code
));
}
// Note: no more error processing, handled in function internal
int32_t
schFetchFromRemote
(
SSchJob
*
pJob
)
{
int32_t
code
=
0
;
void
*
resData
=
atomic_load_ptr
(
&
pJob
->
resData
);
if
(
resData
)
{
SCH_JOB_DLOG
(
"res already fetched, res:%p"
,
resData
);
return
TSDB_CODE_SUCCESS
;
}
SCH_ERR_JRET
(
schBuildAndSendMsg
(
pJob
,
pJob
->
fetchTask
,
&
pJob
->
resNode
,
TDMT_SCH_FETCH
));
return
TSDB_CODE_SUCCESS
;
_return:
SCH_RET
(
schProcessOnTaskFailure
(
pJob
,
pJob
->
fetchTask
,
code
));
}
int32_t
schProcessOnExplainDone
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SRetrieveTableRsp
*
pRsp
)
{
SCH_TASK_DLOG
(
"got explain rsp, rows:%d, complete:%d"
,
htonl
(
pRsp
->
numOfRows
),
pRsp
->
completed
);
atomic_store_32
(
&
pJob
->
resNumOfRows
,
htonl
(
pRsp
->
numOfRows
));
atomic_store_ptr
(
&
pJob
->
resData
,
pRsp
);
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_SUCCEED
);
schProcessOnDataFetched
(
pJob
);
return
TSDB_CODE_SUCCESS
;
}
void
schDropTaskOnExecNode
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
if
(
NULL
==
pTask
->
execNodes
)
{
SCH_TASK_DLOG
(
"no exec address, status:%s"
,
SCH_GET_TASK_STATUS_STR
(
pTask
));
return
;
}
int32_t
size
=
(
int32_t
)
taosHashGetSize
(
pTask
->
execNodes
);
if
(
size
<=
0
)
{
SCH_TASK_DLOG
(
"task has no execNodes, no need to drop it, status:%s"
,
SCH_GET_TASK_STATUS_STR
(
pTask
));
return
;
}
SSchNodeInfo
*
nodeInfo
=
taosHashIterate
(
pTask
->
execNodes
,
NULL
);
while
(
nodeInfo
)
{
SCH_SET_TASK_HANDLE
(
pTask
,
nodeInfo
->
handle
);
schBuildAndSendMsg
(
pJob
,
pTask
,
&
nodeInfo
->
addr
,
TDMT_SCH_DROP_TASK
);
nodeInfo
=
taosHashIterate
(
pTask
->
execNodes
,
nodeInfo
);
}
SCH_TASK_DLOG
(
"task has been dropped on %d exec nodes"
,
size
);
}
int32_t
schRescheduleTask
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
if
(
SCH_IS_DATA_SRC_QRY_TASK
(
pTask
))
{
return
TSDB_CODE_SUCCESS
;
}
SCH_LOCK_TASK
(
pTask
);
if
(
SCH_TASK_TIMEOUT
(
pTask
)
&&
JOB_TASK_STATUS_EXECUTING
==
pTask
->
status
&&
pJob
->
fetchTask
!=
pTask
&&
taosArrayGetSize
(
pTask
->
candidateAddrs
)
>
1
)
{
SCH_TASK_DLOG
(
"task execId %d will be rescheduled now"
,
pTask
->
execId
);
schDropTaskOnExecNode
(
pJob
,
pTask
);
taosHashClear
(
pTask
->
execNodes
);
schProcessOnTaskFailure
(
pJob
,
pTask
,
TSDB_CODE_SCH_TIMEOUT_ERROR
);
}
SCH_UNLOCK_TASK
(
pTask
);
return
TSDB_CODE_SUCCESS
;
}
int32_t
schProcessOnTaskStatusRsp
(
SQueryNodeEpId
*
pEpId
,
SArray
*
pStatusList
)
{
int32_t
taskNum
=
(
int32_t
)
taosArrayGetSize
(
pStatusList
);
SSchTask
*
pTask
=
NULL
;
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_SUCC
);
qDebug
(
"%d task status in hb rsp from nodeId:%d, fqdn:%s, port:%d"
,
taskNum
,
pEpId
->
nodeId
,
pEpId
->
ep
.
fqdn
,
pEpId
->
ep
.
port
);
schProcessOnDataFetched
(
pJob
);
for
(
int32_t
i
=
0
;
i
<
taskNum
;
++
i
)
{
STaskStatus
*
taskStatus
=
taosArrayGet
(
pStatusList
,
i
);
return
TSDB_CODE_SUCCESS
;
}
qDebug
(
"QID:0x%"
PRIx64
",TID:0x%"
PRIx64
",EID:%d task status in server: %s"
,
taskStatus
->
queryId
,
taskStatus
->
taskId
,
taskStatus
->
execId
,
jobTaskStatusStr
(
taskStatus
->
status
));
SSchJob
*
pJob
=
schAcquireJob
(
taskStatus
->
refId
);
if
(
NULL
==
pJob
)
{
qWarn
(
"job not found, refId:0x%"
PRIx64
",QID:0x%"
PRIx64
",TID:0x%"
PRIx64
,
taskStatus
->
refId
,
taskStatus
->
queryId
,
taskStatus
->
taskId
);
// TODO DROP TASK FROM SERVER!!!!
continue
;
int32_t
schLaunchJobLowerLevel
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
if
(
!
SCH_IS_QUERY_JOB
(
pJob
))
{
return
TSDB_CODE_SUCCESS
;
}
pTask
=
NULL
;
schGetTaskInJob
(
pJob
,
taskStatus
->
taskId
,
&
pTask
);
if
(
NULL
==
pTask
)
{
// TODO DROP TASK FROM SERVER!!!!
schReleaseJob
(
taskStatus
->
refId
);
continue
;
}
SSchLevel
*
pLevel
=
pTask
->
level
;
int32_t
doneNum
=
atomic_add_fetch_32
(
&
pLevel
->
taskDoneNum
,
1
);
if
(
doneNum
==
pLevel
->
taskNum
)
{
pJob
->
levelIdx
--
;
if
(
taskStatus
->
execId
!=
pTask
->
execId
)
{
// TODO DROP TASK FROM SERVER!!!!
SCH_TASK_DLOG
(
"EID %d in hb rsp mis-match"
,
taskStatus
->
execId
);
schReleaseJob
(
taskStatus
->
refId
);
continue
;
}
pLevel
=
taosArrayGet
(
pJob
->
levels
,
pJob
->
levelIdx
);
for
(
int32_t
i
=
0
;
i
<
pLevel
->
taskNum
;
++
i
)
{
SSchTask
*
pTask
=
taosArrayGet
(
pLevel
->
subTasks
,
i
);
if
(
taskStatus
->
status
==
JOB_TASK_STATUS_FAILED
)
{
// RECORD AND HANDLE ERROR!!!!
schReleaseJob
(
taskStatus
->
refId
);
if
(
pTask
->
children
&&
taosArrayGetSize
(
pTask
->
children
)
>
0
)
{
continue
;
}
if
(
taskStatus
->
status
==
JOB_TASK_STATUS_NOT_START
)
{
schRescheduleTask
(
pJob
,
pTask
);
SCH_ERR_RET
(
schLaunchTask
(
pJob
,
pTask
));
}
schReleaseJob
(
taskStatus
->
refId
);
}
return
TSDB_CODE_SUCCESS
;
}
int32_t
schSaveJobQueryRes
(
SSchJob
*
pJob
,
SQueryTableRsp
*
rsp
)
{
if
(
rsp
->
tbFName
[
0
])
{
if
(
NULL
==
pJob
->
execRes
.
res
)
{
...
...
@@ -1336,22 +562,6 @@ int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp) {
return
TSDB_CODE_SUCCESS
;
}
int32_t
schGetTaskFromList
(
SHashObj
*
pTaskList
,
uint64_t
taskId
,
SSchTask
**
pTask
)
{
int32_t
s
=
taosHashGetSize
(
pTaskList
);
if
(
s
<=
0
)
{
return
TSDB_CODE_SUCCESS
;
}
SSchTask
**
task
=
taosHashGet
(
pTaskList
,
&
taskId
,
sizeof
(
taskId
));
if
(
NULL
==
task
||
NULL
==
(
*
task
))
{
return
TSDB_CODE_SUCCESS
;
}
*
pTask
=
*
task
;
return
TSDB_CODE_SUCCESS
;
}
int32_t
schGetTaskInJob
(
SSchJob
*
pJob
,
uint64_t
taskId
,
SSchTask
**
pTask
)
{
schGetTaskFromList
(
pJob
->
taskList
,
taskId
,
pTask
);
if
(
NULL
==
*
pTask
)
{
...
...
@@ -1362,125 +572,26 @@ int32_t schGetTaskInJob(SSchJob *pJob, uint64_t taskId, SSchTask **pTask) {
return
TSDB_CODE_SUCCESS
;
}
int32_t
schLaunchTaskImpl
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
int8_t
status
=
0
;
int32_t
code
=
0
;
atomic_add_fetch_32
(
&
pTask
->
level
->
taskLaunchedNum
,
1
);
pTask
->
execId
++
;
SCH_TASK_DLOG
(
"start to launch task's %dth exec"
,
pTask
->
execId
);
SCH_LOG_TASK_START_TS
(
pTask
);
if
(
schJobNeedToStop
(
pJob
,
&
status
))
{
SCH_TASK_DLOG
(
"no need to launch task cause of job status, job status:%s"
,
jobTaskStatusStr
(
status
));
SCH_RET
(
atomic_load_32
(
&
pJob
->
errCode
));
}
// NOTE: race condition: the task should be put into the hash table before send msg to server
if
(
SCH_GET_TASK_STATUS
(
pTask
)
!=
JOB_TASK_STATUS_EXECUTING
)
{
SCH_ERR_RET
(
schPushTaskToExecList
(
pJob
,
pTask
));
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_EXECUTING
);
}
SSubplan
*
plan
=
pTask
->
plan
;
if
(
NULL
==
pTask
->
msg
)
{
// TODO add more detailed reason for failure
code
=
qSubPlanToString
(
plan
,
&
pTask
->
msg
,
&
pTask
->
msgLen
);
if
(
TSDB_CODE_SUCCESS
!=
code
)
{
SCH_TASK_ELOG
(
"failed to create physical plan, code:%s, msg:%p, len:%d"
,
tstrerror
(
code
),
pTask
->
msg
,
pTask
->
msgLen
);
SCH_ERR_RET
(
code
);
}
else
{
SCH_TASK_DLOGL
(
"physical plan len:%d, %s"
,
pTask
->
msgLen
,
pTask
->
msg
);
}
}
SCH_ERR_RET
(
schSetTaskCandidateAddrs
(
pJob
,
pTask
));
if
(
SCH_IS_QUERY_JOB
(
pJob
))
{
SCH_ERR_RET
(
schEnsureHbConnection
(
pJob
,
pTask
));
}
SCH_ERR_RET
(
schBuildAndSendMsg
(
pJob
,
pTask
,
NULL
,
plan
->
msgType
));
return
TSDB_CODE_SUCCESS
;
}
// Note: no more error processing, handled in function internal
int32_t
schLaunchTask
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
bool
enough
=
false
;
int32_t
code
=
0
;
SCH_SET_TASK_HANDLE
(
pTask
,
NULL
);
if
(
SCH_TASK_NEED_FLOW_CTRL
(
pJob
,
pTask
))
{
SCH_ERR_JRET
(
schCheckIncTaskFlowQuota
(
pJob
,
pTask
,
&
enough
));
if
(
enough
)
{
SCH_ERR_JRET
(
schLaunchTaskImpl
(
pJob
,
pTask
));
}
}
else
{
SCH_ERR_JRET
(
schLaunchTaskImpl
(
pJob
,
pTask
));
}
return
TSDB_CODE_SUCCESS
;
_return:
SCH_RET
(
schProcessOnTaskFailure
(
pJob
,
pTask
,
code
));
}
int32_t
schLaunchLevelTasks
(
SSchJob
*
pJob
,
SSchLevel
*
level
)
{
for
(
int32_t
i
=
0
;
i
<
level
->
taskNum
;
++
i
)
{
SSchTask
*
pTask
=
taosArrayGet
(
level
->
subTasks
,
i
);
SCH_ERR_RET
(
schLaunchTask
(
pJob
,
pTask
));
}
return
TSDB_CODE_SUCCESS
;
}
int32_t
schLaunchJob
(
SSchJob
*
pJob
)
{
if
(
EXPLAIN_MODE_STATIC
==
pJob
->
attr
.
explainMode
)
{
SCH_ERR_RET
(
qExecStaticExplain
(
pJob
->
pDag
,
(
SRetrieveTableRsp
**
)
&
pJob
->
resData
));
SCH_ERR_RET
(
schSwitchJobStatus
(
pJob
,
JOB_TASK_STATUS_PART_SUCC
,
NULL
));
}
else
{
SSchLevel
*
level
=
taosArrayGet
(
pJob
->
levels
,
pJob
->
levelIdx
);
SCH_ERR_RET
(
schChkJobNeedFlowCtrl
(
pJob
,
level
));
SCH_ERR_RET
(
schLaunchLevelTasks
(
pJob
,
level
));
}
return
TSDB_CODE_SUCCESS
;
}
void
schDropTaskInHashList
(
SSchJob
*
pJob
,
SHashObj
*
list
)
{
if
(
!
SCH_IS_NEED_DROP_JOB
(
pJob
))
{
return
;
}
void
*
pIter
=
taosHashIterate
(
list
,
NULL
);
while
(
pIter
)
{
SSchTask
*
pTask
=
*
(
SSchTask
**
)
pIter
;
schDropTaskOnExecNode
(
pJob
,
pTask
);
pIter
=
taosHashIterate
(
list
,
pIter
);
}
}
void
schDropJobAllTasks
(
SSchJob
*
pJob
)
{
schDropTaskInHashList
(
pJob
,
pJob
->
execTasks
);
// schDropTaskInHashList(pJob, pJob->succTasks);
// schDropTaskInHashList(pJob, pJob->failTasks);
}
int32_t
schCancelJob
(
SSchJob
*
pJob
)
{
// TODO
return
TSDB_CODE_SUCCESS
;
// TODO MOVE ALL TASKS FROM EXEC LIST TO FAIL LIST
}
void
schFreeJobImpl
(
void
*
job
)
{
if
(
NULL
==
job
)
{
return
;
...
...
@@ -1492,10 +603,6 @@ void schFreeJobImpl(void *job) {
qDebug
(
"QID:0x%"
PRIx64
" begin to free sch job, refId:0x%"
PRIx64
", pointer:%p"
,
queryId
,
refId
,
pJob
);
if
(
pJob
->
status
==
JOB_TASK_STATUS_EXECUTING
)
{
schCancelJob
(
pJob
);
}
schDropJobAllTasks
(
pJob
);
int32_t
numOfLevels
=
taosArrayGetSize
(
pJob
->
levels
);
...
...
@@ -1528,7 +635,7 @@ void schFreeJobImpl(void *job) {
qDestroyQueryPlan
(
pJob
->
pDag
);
taosMemoryFreeClear
(
pJob
->
userRes
.
query
Res
);
taosMemoryFreeClear
(
pJob
->
userRes
.
exec
Res
);
taosMemoryFreeClear
(
pJob
->
resData
);
taosMemoryFree
(
pJob
);
...
...
@@ -1540,228 +647,259 @@ void schFreeJobImpl(void *job) {
qDebug
(
"QID:0x%"
PRIx64
" sch job freed, refId:0x%"
PRIx64
", pointer:%p"
,
queryId
,
refId
,
pJob
);
}
int32_t
schLaunchStaticExplainJob
(
SSchedulerReq
*
pReq
,
SSchJob
*
pJob
,
bool
sync
)
{
qDebug
(
"QID:0x%"
PRIx64
" job started"
,
pReq
->
pDag
->
queryId
);
int32_t
schJobFetchRows
(
SSchJob
*
pJob
)
{
int32_t
code
=
0
;
if
(
!
(
pJob
->
attr
.
explainMode
==
EXPLAIN_MODE_STATIC
))
{
SCH_ERR_RET
(
schLaunchFetchTask
(
pJob
));
if
(
pJob
->
opStatus
.
syncReq
)
{
SCH_JOB_DLOG
(
"sync wait for rsp now, job status:%s"
,
SCH_GET_JOB_STATUS_STR
(
pJob
));
tsem_wait
(
&
pJob
->
rspSem
);
SCH_RET
(
schDumpJobFetchRes
(
pJob
,
pJob
->
userRes
.
fetchRes
));
}
}
else
{
if
(
pJob
->
opStatus
.
syncReq
)
{
SCH_RET
(
schDumpJobFetchRes
(
pJob
,
pJob
->
userRes
.
fetchRes
));
}
else
{
schPostJobRes
(
pJob
,
SCH_OP_FETCH
);
}
}
SCH_RET
(
code
);
}
int32_t
schInitJob
(
int64_t
*
pJobId
,
SSchedulerReq
*
pReq
)
{
int32_t
code
=
0
;
/*
int64_t
refId
=
-
1
;
SSchJob
*
pJob
=
taosMemoryCalloc
(
1
,
sizeof
(
SSchJob
));
if
(
NULL
==
pJob
)
{
qError
(
"QID:0x%"
PRIx64
" calloc %d failed"
,
pReq
->
pDag
->
queryId
,
(
int32_t
)
sizeof
(
SSchJob
));
code = TSDB_CODE_QRY_OUT_OF_MEMORY;
pReq->fp(NULL, pReq->cbParam, code);
SCH_ERR_RET(code);
SCH_ERR_JRET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
pJob
->
attr
.
explainMode
=
pReq
->
pDag
->
explainInfo
.
mode
;
pJob
->
conn
=
*
pReq
->
pConn
;
pJob
->
sql
=
pReq
->
sql
;
pJob->reqKilled = pReq->reqKilled;
pJob
->
pDag
=
pReq
->
pDag
;
pJob->attr.queryJob = true;
pJob->attr.explainMode = pReq->pDag->explainInfo.mode;
pJob->queryId = pReq->pDag->queryId;
pJob->userRes.execFp = pReq->fp;
pJob->userRes.userParam = pReq->cbParam;
pJob
->
chkKillFp
=
pReq
->
chkKillFp
;
pJob
->
chkKillParam
=
pReq
->
chkKillParam
;
pJob
->
userRes
.
execFp
=
pReq
->
execFp
;
pJob
->
userRes
.
cbParam
=
pReq
->
cbParam
;
schUpdateJobStatus(pJob, JOB_TASK_STATUS_NOT_START);
if
(
pReq
->
pNodeList
==
NULL
||
taosArrayGetSize
(
pReq
->
pNodeList
)
<=
0
)
{
qDebug
(
"QID:0x%"
PRIx64
" input exec nodeList is empty"
,
pReq
->
pDag
->
queryId
);
}
else
{
pJob
->
nodeList
=
taosArrayDup
(
pReq
->
pNodeList
);
}
code = schBeginOperation(pJob, SCH_OP_EXEC, sync);
if (code) {
pReq->fp(NULL, pReq->cbParam, code);
schFreeJobImpl(pJob
);
SCH_ERR_
RET(code
);
pJob
->
taskList
=
taosHashInit
(
pReq
->
pDag
->
numOfSubplans
,
taosGetDefaultHashFunction
(
TSDB_DATA_TYPE_UBIGINT
),
false
,
HASH_ENTRY_LOCK
);
if
(
NULL
==
pJob
->
taskList
)
{
SCH_JOB_ELOG
(
"taosHashInit %d taskList failed"
,
pReq
->
pDag
->
numOfSubplans
);
SCH_ERR_
JRET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
*/
SCH_ERR_JRET
(
qExecStaticExplain
(
pReq
->
pDag
,
(
SRetrieveTableRsp
**
)
&
pJob
->
resData
));
SCH_ERR_JRET
(
schValidateAndBuildJob
(
pReq
->
pDag
,
pJob
));
/*
int64_t refId = taosAddRef(schMgmt.jobRef, pJob);
if (refId < 0) {
SCH_JOB_ELOG("taosAddRef job failed, error:%s", tstrerror(terrno));
SCH_ERR_JRET(terrno);
if
(
SCH_IS_EXPLAIN_JOB
(
pJob
))
{
SCH_ERR_JRET
(
qExecExplainBegin
(
pReq
->
pDag
,
&
pJob
->
explainCtx
,
pReq
->
startTs
));
}
if (NULL == schAcquireJob(refId)) {
SCH_JOB_ELOG("schAcquireJob job failed, refId:0x%" PRIx64, refId);
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
pJob
->
execTasks
=
taosHashInit
(
pReq
->
pDag
->
numOfSubplans
,
taosGetDefaultHashFunction
(
TSDB_DATA_TYPE_UBIGINT
),
false
,
HASH_ENTRY_LOCK
);
if
(
NULL
==
pJob
->
execTasks
)
{
SCH_JOB_ELOG
(
"taosHashInit %d execTasks failed"
,
pReq
->
pDag
->
numOfSubplans
);
SCH_ERR_JRET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
pJob->refId = refId;
SCH_JOB_DLOG("job refId:0x%" PRIx64, pJob->refId);
*/
tsem_init
(
&
pJob
->
rspSem
,
0
,
0
);
pJob
->
status
=
JOB_TASK_STATUS_PARTIAL_SUCCEED
;
pJob
->
refId
=
taosAddRef
(
schMgmt
.
jobRef
,
pJob
);
if
(
pJob
->
refId
<
0
)
{
SCH_JOB_ELOG
(
"taosAddRef job failed, error:%s"
,
tstrerror
(
terrno
));
SCH_ERR_JRET
(
terrno
);
}
SCH_JOB_DLOG
(
"job exec done, job status:%s"
,
SCH_GET_JOB_STATUS_STR
(
pJob
)
);
atomic_add_fetch_32
(
&
schMgmt
.
jobNum
,
1
);
if
(
!
sync
)
{
schPostJobRes
(
pJob
,
SCH_OP_EXEC
);
}
else
{
schEndOperation
(
pJob
);
}
*
pJobId
=
pJob
->
refId
;
// schReleaseJob(
pJob->refId);
SCH_JOB_DLOG
(
"job refId:0x%"
PRIx64
" created"
,
pJob
->
refId
);
SCH_RET
(
code
)
;
return
TSDB_CODE_SUCCESS
;
_return:
schEndOperation
(
pJob
);
if
(
!
sync
)
{
pReq
->
execFp
(
NULL
,
pReq
->
execParam
,
code
);
}
if
(
NULL
==
pJob
)
{
qDestroyQueryPlan
(
pReq
->
pDag
);
}
else
if
(
pJob
->
refId
<
0
)
{
schFreeJobImpl
(
pJob
);
SCH_RET
(
code
);
}
int32_t
schFetchRows
(
SSchJob
*
pJob
)
{
int32_t
code
=
0
;
if
(
!
(
pJob
->
attr
.
explainMode
==
EXPLAIN_MODE_STATIC
))
{
SCH_ERR_JRET
(
schFetchFromRemote
(
pJob
));
tsem_wait
(
&
pJob
->
rspSem
);
}
else
{
taosRemoveRef
(
schMgmt
.
jobRef
,
pJob
->
refId
);
}
SCH_ERR_JRET
(
schSetJobFetchRes
(
pJob
,
pJob
->
userRes
.
fetchRes
));
_return:
schEndOperation
(
pJob
);
SCH_RET
(
code
);
}
int32_t
sch
AsyncFetchRows
(
SSchJob
*
pJob
)
{
int32_t
sch
ExecJob
(
SSchJob
*
pJob
,
SSchedulerReq
*
pReq
)
{
int32_t
code
=
0
;
qDebug
(
"QID:0x%"
PRIx64
" sch job refId 0x%"
PRIx64
" started"
,
pReq
->
pDag
->
queryId
,
pJob
->
refId
);
if
(
pJob
->
attr
.
explainMode
==
EXPLAIN_MODE_STATIC
)
{
schPostJobRes
(
pJob
,
SCH_OP_FETCH
);
return
TSDB_CODE_SUCCESS
;
SCH_ERR_RET
(
schLaunchJob
(
pJob
));
if
(
pReq
->
syncReq
)
{
SCH_JOB_DLOG
(
"sync wait for rsp now, job status:%s"
,
SCH_GET_JOB_STATUS_STR
(
pJob
));
tsem_wait
(
&
pJob
->
rspSem
);
}
SCH_
ERR_RET
(
schFetchFromRemote
(
pJob
)
);
SCH_
JOB_DLOG
(
"job exec done, job status:%s, jobId:0x%"
PRIx64
,
SCH_GET_JOB_STATUS_STR
(
pJob
),
pJob
->
refId
);
return
TSDB_CODE_SUCCESS
;
}
int32_t
schExecJobImpl
(
SSchedulerReq
*
pReq
,
SSchJob
*
pJob
,
bool
sync
)
{
int32_t
code
=
0
;
qDebug
(
"QID:0x%"
PRIx64
" sch job refId 0x%"
PRIx64
" started"
,
pReq
->
pDag
->
queryId
,
pJob
->
refId
);
SCH_ERR_JRET
(
schBeginOperation
(
pJob
,
SCH_OP_EXEC
,
sync
));
void
schProcessOnOpEnd
(
SSchJob
*
pJob
,
SCH_OP_TYPE
type
,
SSchedulerReq
*
pReq
,
int32_t
errCode
)
{
int32_t
op
=
0
;
if
(
EXPLAIN_MODE_STATIC
==
pReq
->
pDag
->
explainInfo
.
mode
)
{
code
=
schLaunchStaticExplainJob
(
pReq
,
pJob
,
sync
);
}
else
{
code
=
schLaunchJob
(
pJob
);
if
(
sync
)
{
SCH_JOB_DLOG
(
"will wait for rsp now, job status:%s"
,
SCH_GET_JOB_STATUS_STR
(
pJob
));
tsem_wait
(
&
pJob
->
rspSem
);
schEndOperation
(
pJob
);
}
else
if
(
code
)
{
schPostJobRes
(
pJob
,
SCH_OP_EXEC
);
switch
(
type
)
{
case
SCH_OP_EXEC
:
if
(
pReq
&&
pReq
->
syncReq
)
{
op
=
atomic_val_compare_exchange_32
(
&
pJob
->
opStatus
.
op
,
type
,
SCH_OP_NULL
);
if
(
SCH_OP_NULL
==
op
||
op
!=
type
)
{
SCH_JOB_ELOG
(
"job not in %s operation, op:%s, status:%s"
,
schGetOpStr
(
type
),
schGetOpStr
(
op
),
jobTaskStatusStr
(
pJob
->
status
));
}
schDumpJobExecRes
(
pJob
,
pReq
->
pExecRes
);
}
break
;
case
SCH_OP_FETCH
:
if
(
pReq
&&
pReq
->
syncReq
)
{
op
=
atomic_val_compare_exchange_32
(
&
pJob
->
opStatus
.
op
,
type
,
SCH_OP_NULL
);
if
(
SCH_OP_NULL
==
op
||
op
!=
type
)
{
SCH_JOB_ELOG
(
"job not in %s operation, op:%s, status:%s"
,
schGetOpStr
(
type
),
schGetOpStr
(
op
),
jobTaskStatusStr
(
pJob
->
status
));
}
}
break
;
case
SCH_OP_GET_STATUS
:
errCode
=
TSDB_CODE_SUCCESS
;
break
;
default:
break
;
}
SCH_JOB_DLOG
(
"job exec done, job status:%s, jobId:0x%"
PRIx64
,
SCH_GET_JOB_STATUS_STR
(
pJob
),
pJob
->
refId
);
SCH_RET
(
code
);
_return:
if
(
!
sync
)
{
pReq
->
execFp
(
NULL
,
pReq
->
execParam
,
code
);
if
(
errCode
)
{
schSwitchJobStatus
(
pJob
,
JOB_TASK_STATUS_FAIL
,
(
void
*
)
&
errCode
);
}
SCH_
RET
(
code
);
SCH_
JOB_DLOG
(
"job end %s operation with code %s"
,
schGetOpStr
(
type
),
tstrerror
(
errCode
)
);
}
int32_t
sch
DoTaskRedirect
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SDataBuf
*
pData
,
int32_t
rspCode
)
{
int32_t
sch
ProcessOnOpBegin
(
SSchJob
*
pJob
,
SCH_OP_TYPE
type
,
SSchedulerReq
*
pReq
)
{
int32_t
code
=
0
;
int8_t
status
=
0
;
if
((
pTask
->
execId
+
1
)
>=
pTask
->
maxExecTimes
)
{
SCH_TASK_DLOG
(
"task no more retry since reach max try times, execId:%d"
,
pTask
->
execId
);
schProcessOnJobFailure
(
pJob
,
rspCode
);
return
TSDB_CODE_SUCCESS
;
}
SCH_TASK_DLOG
(
"task will be redirected now, status:%s"
,
SCH_GET_TASK_STATUS_STR
(
pTask
));
schDropTaskOnExecNode
(
pJob
,
pTask
);
taosHashClear
(
pTask
->
execNodes
);
SCH_ERR_JRET
(
schRemoveTaskFromExecList
(
pJob
,
pTask
));
schDeregisterTaskHb
(
pJob
,
pTask
);
atomic_sub_fetch_32
(
&
pTask
->
level
->
taskLaunchedNum
,
1
);
taosMemoryFreeClear
(
pTask
->
msg
);
pTask
->
msgLen
=
0
;
pTask
->
lastMsgType
=
0
;
memset
(
&
pTask
->
succeedAddr
,
0
,
sizeof
(
pTask
->
succeedAddr
));
if
(
SCH_IS_DATA_SRC_QRY_TASK
(
pTask
))
{
if
(
pData
)
{
SCH_ERR_JRET
(
schUpdateTaskCandidateAddr
(
pJob
,
pTask
,
pData
->
pEpSet
));
if
(
schJobNeedToStop
(
pJob
,
&
status
))
{
SCH_JOB_ELOG
(
"abort op %s cause of job need to stop, status:%s"
,
schGetOpStr
(
type
),
jobTaskStatusStr
(
status
));
SCH_ERR_RET
(
TSDB_CODE_SCH_IGNORE_ERROR
);
}
if
(
SCH_TASK_NEED_FLOW_CTRL
(
pJob
,
pTask
))
{
if
(
JOB_TASK_STATUS_EXECUTING
==
SCH_GET_TASK_STATUS
(
pTask
))
{
SCH_ERR_JRET
(
schLaunchTasksInFlowCtrlList
(
pJob
,
pTask
));
}
switch
(
type
)
{
case
SCH_OP_EXEC
:
if
(
SCH_OP_NULL
!=
atomic_val_compare_exchange_32
(
&
pJob
->
opStatus
.
op
,
SCH_OP_NULL
,
type
))
{
SCH_JOB_ELOG
(
"job already in %s operation"
,
schGetOpStr
(
pJob
->
opStatus
.
op
));
SCH_ERR_RET
(
TSDB_CODE_TSC_APP_ERROR
);
}
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_NOT_START
);
SCH_ERR_JRET
(
schLaunchTask
(
pJob
,
pTask
));
SCH_JOB_DLOG
(
"job start %s operation"
,
schGetOpStr
(
pJob
->
opStatus
.
op
));
return
TSDB_CODE_SUCCESS
;
pJob
->
opStatus
.
syncReq
=
pReq
->
syncReq
;
break
;
case
SCH_OP_FETCH
:
if
(
SCH_OP_NULL
!=
atomic_val_compare_exchange_32
(
&
pJob
->
opStatus
.
op
,
SCH_OP_NULL
,
type
))
{
SCH_JOB_ELOG
(
"job already in %s operation"
,
schGetOpStr
(
pJob
->
opStatus
.
op
));
SCH_ERR_RET
(
TSDB_CODE_TSC_APP_ERROR
);
}
SCH_JOB_DLOG
(
"job start %s operation"
,
schGetOpStr
(
pJob
->
opStatus
.
op
));
// merge plan
pJob
->
opStatus
.
syncReq
=
pReq
->
syncReq
;
pTask
->
childReady
=
0
;
if
(
!
SCH_JOB_NEED_FETCH
(
pJob
))
{
SCH_JOB_ELOG
(
"no need to fetch data, status:%s"
,
SCH_GET_JOB_STATUS_STR
(
pJob
));
SCH_ERR_RET
(
TSDB_CODE_QRY_APP_ERROR
);
}
qClearSubplanExecutionNode
(
pTask
->
plan
);
if
(
status
!=
JOB_TASK_STATUS_PART_SUCC
)
{
SCH_JOB_ELOG
(
"job status error for fetch, status:%s"
,
jobTaskStatusStr
(
status
));
SCH_ERR_RET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_NOT_START
);
pJob
->
userRes
.
fetchRes
=
pReq
->
pFetchRes
;
pJob
->
userRes
.
fetchFp
=
pReq
->
fetchFp
;
pJob
->
userRes
.
cbParam
=
pReq
->
cbParam
;
int32_t
childrenNum
=
taosArrayGetSize
(
pTask
->
children
);
for
(
int32_t
i
=
0
;
i
<
childrenNum
;
++
i
)
{
SSchTask
*
pChild
=
taosArrayGetP
(
pTask
->
children
,
i
);
SCH_LOCK_TASK
(
pChild
);
schDoTaskRedirect
(
pJob
,
pChild
,
NULL
,
rspCode
);
SCH_UNLOCK_TASK
(
pChild
);
break
;
case
SCH_OP_GET_STATUS
:
if
(
pJob
->
status
<
JOB_TASK_STATUS_INIT
||
pJob
->
levelNum
<=
0
||
NULL
==
pJob
->
levels
)
{
qDebug
(
"job not initialized or not executable job, refId:0x%"
PRIx64
,
pJob
->
refId
);
SCH_ERR_RET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
break
;
default:
SCH_JOB_ELOG
(
"unknown operation type %d"
,
type
);
SCH_ERR_RET
(
TSDB_CODE_TSC_APP_ERROR
);
}
return
TSDB_CODE_SUCCESS
;
}
_return:
void
schProcessOnCbEnd
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
int32_t
errCode
)
{
if
(
pTask
)
{
SCH_UNLOCK_TASK
(
pTask
);
}
code
=
schProcessOnTaskFailure
(
pJob
,
pTask
,
code
);
if
(
errCode
)
{
schSwitchJobStatus
(
pJob
,
JOB_TASK_STATUS_FAIL
,
(
void
*
)
&
errCode
);
}
SCH_RET
(
code
);
if
(
pJob
)
{
schReleaseJob
(
pJob
->
refId
);
}
}
int32_t
sch
HandleRedirect
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SDataBuf
*
pData
,
int32_t
rspCode
)
{
int32_t
sch
ProcessOnCbBegin
(
SSchJob
**
job
,
SSchTask
**
task
,
uint64_t
qId
,
int64_t
rId
,
uint64_t
tId
)
{
int32_t
code
=
0
;
int8_t
status
=
0
;
if
(
SCH_IS_DATA_SRC_QRY_TASK
(
pTask
))
{
if
(
NULL
==
pData
->
pEpSet
)
{
SCH_TASK_ELOG
(
"no epset updated while got error %s"
,
tstrerror
(
rspCode
));
SCH_ERR_JRET
(
rspCode
);
SSchTask
*
pTask
=
NULL
;
SSchJob
*
pJob
=
schAcquireJob
(
rId
);
if
(
NULL
==
pJob
)
{
qWarn
(
"QID:0x%"
PRIx64
",TID:0x%"
PRIx64
"job no exist, may be dropped, refId:0x%"
PRIx64
,
qId
,
tId
,
rId
);
SCH_ERR_RET
(
TSDB_CODE_QRY_JOB_NOT_EXIST
);
}
if
(
schJobNeedToStop
(
pJob
,
&
status
))
{
SCH_TASK_ELOG
(
"will not do further processing cause of job status %s"
,
jobTaskStatusStr
(
status
));
SCH_ERR_JRET
(
TSDB_CODE_SCH_IGNORE_ERROR
);
}
SCH_RET
(
schDoTaskRedirect
(
pJob
,
pTask
,
pData
,
rspCode
));
SCH_ERR_JRET
(
schGetTaskInJob
(
pJob
,
tId
,
&
pTask
));
SCH_LOCK_TASK
(
pTask
);
*
job
=
pJob
;
*
task
=
pTask
;
return
TSDB_CODE_SUCCESS
;
_return:
schProcessOnTaskFailure
(
pJob
,
pTask
,
code
);
if
(
pTask
)
{
SCH_UNLOCK_TASK
(
pTask
);
}
if
(
pJob
)
{
schReleaseJob
(
rId
);
}
SCH_RET
(
code
);
}
...
...
source/libs/scheduler/src/schRemote.c
浏览文件 @
29949a96
...
...
@@ -16,7 +16,7 @@
#include "catalog.h"
#include "command.h"
#include "query.h"
#include "sch
eduler
Int.h"
#include "schInt.h"
#include "tmsg.h"
#include "tref.h"
#include "trpc.h"
...
...
@@ -36,7 +36,7 @@ int32_t schValidateReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgTy
TMSG_INFO
(
msgType
));
}
if
(
taskStatus
!=
JOB_TASK_STATUS_EXEC
UTING
&&
taskStatus
!=
JOB_TASK_STATUS_PARTIAL_SUCCEED
)
{
if
(
taskStatus
!=
JOB_TASK_STATUS_EXEC
&&
taskStatus
!=
JOB_TASK_STATUS_PART_SUCC
)
{
SCH_TASK_DLOG
(
"rsp msg conflicted with task status, status:%s, rspType:%s"
,
jobTaskStatusStr
(
taskStatus
),
TMSG_INFO
(
msgType
));
}
...
...
@@ -50,7 +50,7 @@ int32_t schValidateReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgTy
SCH_ERR_RET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
if
(
taskStatus
!=
JOB_TASK_STATUS_EXEC
UTING
&&
taskStatus
!=
JOB_TASK_STATUS_PARTIAL_SUCCEED
)
{
if
(
taskStatus
!=
JOB_TASK_STATUS_EXEC
&&
taskStatus
!=
JOB_TASK_STATUS_PART_SUCC
)
{
SCH_TASK_ELOG
(
"rsp msg conflicted with task status, status:%s, rspType:%s"
,
jobTaskStatusStr
(
taskStatus
),
TMSG_INFO
(
msgType
));
SCH_ERR_RET
(
TSDB_CODE_SCH_STATUS_ERROR
);
...
...
@@ -76,7 +76,7 @@ int32_t schValidateReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgTy
SCH_ERR_RET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
if
(
taskStatus
!=
JOB_TASK_STATUS_EXEC
UTING
&&
taskStatus
!=
JOB_TASK_STATUS_PARTIAL_SUCCEED
)
{
if
(
taskStatus
!=
JOB_TASK_STATUS_EXEC
&&
taskStatus
!=
JOB_TASK_STATUS_PART_SUCC
)
{
SCH_TASK_ELOG
(
"rsp msg conflicted with task status, status:%s, rspType:%s"
,
jobTaskStatusStr
(
taskStatus
),
TMSG_INFO
(
msgType
));
SCH_ERR_RET
(
TSDB_CODE_SCH_STATUS_ERROR
);
...
...
@@ -88,9 +88,21 @@ int32_t schValidateReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgTy
}
// Note: no more task error processing, handled in function internal
int32_t
schHandleResponseMsg
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
int32_t
msgType
,
char
*
msg
,
int32_t
msgSize
,
int32_t
rspCode
)
{
int32_t
schHandleResponseMsg
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
int32_t
execId
,
SDataBuf
*
pMsg
,
int32_t
rspCode
)
{
int32_t
code
=
0
;
char
*
msg
=
pMsg
->
pData
;
int32_t
msgSize
=
pMsg
->
len
;
int32_t
msgType
=
pMsg
->
msgType
;
bool
dropExecNode
=
(
msgType
==
TDMT_SCH_LINK_BROKEN
||
SCH_NETWORK_ERR
(
rspCode
));
SCH_ERR_JRET
(
schUpdateTaskHandle
(
pJob
,
pTask
,
dropExecNode
,
pMsg
->
handle
,
execId
));
SCH_ERR_JRET
(
schValidateReceivedMsgType
(
pJob
,
pTask
,
msgType
));
int32_t
reqType
=
IsReq
(
pMsg
)
?
pMsg
->
msgType
:
(
pMsg
->
msgType
-
1
);
if
(
SCH_NEED_REDIRECT
(
reqType
,
rspCode
,
pMsg
->
len
))
{
SCH_RET
(
schHandleRedirect
(
pJob
,
pTask
,
(
SDataBuf
*
)
pMsg
,
rspCode
));
}
switch
(
msgType
)
{
case
TDMT_VND_COMMIT_RSP
:
{
...
...
@@ -313,7 +325,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
return
TSDB_CODE_SUCCESS
;
}
SCH_ERR_JRET
(
sch
FetchFromRemote
(
pJob
));
SCH_ERR_JRET
(
sch
LaunchFetchTask
(
pJob
));
taosMemoryFreeClear
(
msg
);
...
...
@@ -330,7 +342,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
atomic_add_fetch_32
(
&
pJob
->
resNumOfRows
,
htonl
(
rsp
->
numOfRows
));
if
(
rsp
->
completed
)
{
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_SUCC
EED
);
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_SUCC
);
}
SCH_TASK_DLOG
(
"got fetch rsp, rows:%d, complete:%d"
,
htonl
(
rsp
->
numOfRows
),
rsp
->
completed
);
...
...
@@ -366,65 +378,24 @@ _return:
int32_t
schHandleCallback
(
void
*
param
,
SDataBuf
*
pMsg
,
int32_t
rspCode
)
{
int32_t
code
=
0
;
int32_t
msgType
=
pMsg
->
msgType
;
SSchTaskCallbackParam
*
pParam
=
(
SSchTaskCallbackParam
*
)
param
;
SSchTask
*
pTask
=
NULL
;
SSchJob
*
pJob
=
NULL
;
SSchJob
*
pJob
=
schAcquireJob
(
pParam
->
refId
);
if
(
NULL
==
pJob
)
{
qWarn
(
"QID:0x%"
PRIx64
",TID:0x%"
PRIx64
"taosAcquireRef job failed, may be dropped, refId:0x%"
PRIx64
,
pParam
->
queryId
,
pParam
->
taskId
,
pParam
->
refId
);
SCH_ERR_JRET
(
TSDB_CODE_QRY_JOB_FREED
);
}
SCH_ERR_JRET
(
schGetTaskInJob
(
pJob
,
pParam
->
taskId
,
&
pTask
));
SCH_LOCK_TASK
(
pTask
);
qDebug
(
"begin to handle rsp msg, type:%s, handle:%p, code:%s"
,
TMSG_INFO
(
pMsg
->
msgType
),
pMsg
->
handle
,
tstrerror
(
rspCode
));
SCH_TASK_DLOG
(
"rsp msg received, type:%s, handle:%p, code:%s"
,
TMSG_INFO
(
msgType
),
pMsg
->
handle
,
tstrerror
(
rspCode
));
if
(
pParam
->
execId
!=
pTask
->
execId
)
{
SCH_TASK_DLOG
(
"execId %d mis-match current execId %d"
,
pParam
->
execId
,
pTask
->
execId
);
goto
_return
;
}
bool
dropExecNode
=
(
msgType
==
TDMT_SCH_LINK_BROKEN
||
SCH_NETWORK_ERR
(
rspCode
));
SCH_ERR_JRET
(
schUpdateTaskHandle
(
pJob
,
pTask
,
dropExecNode
,
pMsg
->
handle
,
pParam
->
execId
));
int8_t
status
=
0
;
if
(
schJobNeedToStop
(
pJob
,
&
status
))
{
SCH_TASK_ELOG
(
"rsp will not be processed cause of job status %s, rspCode:0x%x"
,
jobTaskStatusStr
(
status
),
rspCode
);
code
=
atomic_load_32
(
&
pJob
->
errCode
);
goto
_return
;
}
SCH_ERR_JRET
(
schValidateReceivedMsgType
(
pJob
,
pTask
,
msgType
));
int32_t
reqType
=
IsReq
(
pMsg
)
?
pMsg
->
msgType
:
(
pMsg
->
msgType
-
1
);
if
(
SCH_NEED_REDIRECT
(
reqType
,
rspCode
,
pMsg
->
len
))
{
code
=
schHandleRedirect
(
pJob
,
pTask
,
(
SDataBuf
*
)
pMsg
,
rspCode
);
goto
_return
;
}
SCH_ERR_RET
(
schProcessOnCbBegin
(
&
pJob
,
&
pTask
,
pParam
->
queryId
,
pParam
->
refId
,
pParam
->
taskId
));
schHandleResponseMsg
(
pJob
,
pTask
,
msgType
,
pMsg
->
pData
,
pMsg
->
len
,
rspCode
);
code
=
schHandleResponseMsg
(
pJob
,
pTask
,
pParam
->
execId
,
pMsg
,
rspCode
);
pMsg
->
pData
=
NULL
;
_return:
if
(
pTask
)
{
if
(
code
)
{
schProcessOnTaskFailure
(
pJob
,
pTask
,
code
);
}
SCH_UNLOCK_TASK
(
pTask
);
}
if
(
pJob
)
{
schReleaseJob
(
pParam
->
refId
);
}
schProcessOnCbEnd
(
pJob
,
pTask
,
code
);
taosMemoryFreeClear
(
pMsg
->
pData
);
taosMemoryFreeClear
(
param
);
qDebug
(
"end to handle rsp msg, type:%s, handle:%p, code:%s"
,
TMSG_INFO
(
pMsg
->
msgType
),
pMsg
->
handle
,
tstrerror
(
rspCode
));
SCH_RET
(
code
);
}
...
...
@@ -459,8 +430,38 @@ int32_t schHandleCommitCallback(void *param, SDataBuf *pMsg, int32_t code) {
return
schHandleCallback
(
param
,
pMsg
,
code
);
}
int32_t
schMakeCallbackParam
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
int32_t
msgType
,
bool
isHb
,
SSchTrans
*
trans
,
void
**
pParam
)
{
int32_t
schHandleHbCallback
(
void
*
param
,
SDataBuf
*
pMsg
,
int32_t
code
)
{
SSchedulerHbRsp
rsp
=
{
0
};
SSchTaskCallbackParam
*
pParam
=
(
SSchTaskCallbackParam
*
)
param
;
if
(
code
)
{
qError
(
"hb rsp error:%s"
,
tstrerror
(
code
));
SCH_ERR_JRET
(
code
);
}
if
(
tDeserializeSSchedulerHbRsp
(
pMsg
->
pData
,
pMsg
->
len
,
&
rsp
))
{
qError
(
"invalid hb rsp msg, size:%d"
,
pMsg
->
len
);
SCH_ERR_JRET
(
TSDB_CODE_QRY_INVALID_INPUT
);
}
SSchTrans
trans
=
{
0
};
trans
.
pTrans
=
pParam
->
pTrans
;
trans
.
pHandle
=
pMsg
->
handle
;
SCH_ERR_JRET
(
schUpdateHbConnection
(
&
rsp
.
epId
,
&
trans
));
SCH_ERR_JRET
(
schProcessOnTaskStatusRsp
(
&
rsp
.
epId
,
rsp
.
taskStatus
));
_return:
tFreeSSchedulerHbRsp
(
&
rsp
);
taosMemoryFree
(
param
);
SCH_RET
(
code
);
}
int32_t
schMakeCallbackParam
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
int32_t
msgType
,
bool
isHb
,
SSchTrans
*
trans
,
void
**
pParam
)
{
if
(
!
isHb
)
{
SSchTaskCallbackParam
*
param
=
taosMemoryCalloc
(
1
,
sizeof
(
SSchTaskCallbackParam
));
if
(
NULL
==
param
)
{
...
...
@@ -703,36 +704,6 @@ _return:
SCH_RET
(
code
);
}
int32_t
schHandleHbCallback
(
void
*
param
,
SDataBuf
*
pMsg
,
int32_t
code
)
{
SSchedulerHbRsp
rsp
=
{
0
};
SSchTaskCallbackParam
*
pParam
=
(
SSchTaskCallbackParam
*
)
param
;
if
(
code
)
{
qError
(
"hb rsp error:%s"
,
tstrerror
(
code
));
SCH_ERR_JRET
(
code
);
}
if
(
tDeserializeSSchedulerHbRsp
(
pMsg
->
pData
,
pMsg
->
len
,
&
rsp
))
{
qError
(
"invalid hb rsp msg, size:%d"
,
pMsg
->
len
);
SCH_ERR_JRET
(
TSDB_CODE_QRY_INVALID_INPUT
);
}
SSchTrans
trans
=
{
0
};
trans
.
pTrans
=
pParam
->
pTrans
;
trans
.
pHandle
=
pMsg
->
handle
;
SCH_ERR_JRET
(
schUpdateHbConnection
(
&
rsp
.
epId
,
&
trans
));
SCH_ERR_JRET
(
schProcessOnTaskStatusRsp
(
&
rsp
.
epId
,
rsp
.
taskStatus
));
_return:
tFreeSSchedulerHbRsp
(
&
rsp
);
taosMemoryFree
(
param
);
SCH_RET
(
code
);
}
int32_t
schMakeBrokenLinkVal
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SRpcBrokenlinkVal
*
brokenVal
,
bool
isHb
)
{
int32_t
code
=
0
;
int32_t
msgType
=
TDMT_SCH_LINK_BROKEN
;
...
...
source/libs/scheduler/src/schStatus.c
0 → 100644
浏览文件 @
29949a96
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "catalog.h"
#include "command.h"
#include "query.h"
#include "schInt.h"
#include "tmsg.h"
#include "tref.h"
#include "trpc.h"
int32_t
schSwitchJobStatus
(
SSchJob
*
pJob
,
int32_t
status
,
void
*
param
)
{
int32_t
code
=
0
;
SCH_ERR_JRET
(
schUpdateJobStatus
(
pJob
,
status
));
switch
(
status
)
{
case
JOB_TASK_STATUS_INIT
:
break
;
case
JOB_TASK_STATUS_EXEC
:
SCH_ERR_JRET
(
schExecJob
(
pJob
,
(
SSchedulerReq
*
)
param
));
break
;
case
JOB_TASK_STATUS_PART_SUCC
:
SCH_ERR_JRET
(
schProcessOnJobPartialSuccess
(
pJob
));
break
;
case
JOB_TASK_STATUS_SUCC
:
break
;
case
JOB_TASK_STATUS_FAIL
:
SCH_RET
(
schProcessOnJobFailure
(
pJob
,
(
param
?
*
(
int32_t
*
)
param
:
0
)));
break
;
case
JOB_TASK_STATUS_DROP
:
SCH_ERR_JRET
(
schProcessOnJobDropped
(
pJob
,
*
(
int32_t
*
)
param
));
if
(
taosRemoveRef
(
schMgmt
.
jobRef
,
pJob
->
refId
))
{
SCH_JOB_ELOG
(
"remove job from job list failed, refId:0x%"
PRIx64
,
pJob
->
refId
);
}
else
{
SCH_JOB_DLOG
(
"job removed from jobRef list, refId:0x%"
PRIx64
,
pJob
->
refId
);
}
break
;
default:
{
SCH_JOB_ELOG
(
"unknown job status %d"
,
status
);
SCH_RET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
}
return
TSDB_CODE_SUCCESS
;
_return:
SCH_RET
(
schProcessOnJobFailure
(
pJob
,
code
));
}
int32_t
schHandleOpBeginEvent
(
int64_t
jobId
,
SSchJob
**
job
,
SCH_OP_TYPE
type
,
SSchedulerReq
*
pReq
)
{
SSchJob
*
pJob
=
schAcquireJob
(
jobId
);
if
(
NULL
==
pJob
)
{
qError
(
"Acquire sch job failed, may be dropped, jobId:0x%"
PRIx64
,
jobId
);
SCH_ERR_RET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
*
job
=
pJob
;
SCH_RET
(
schProcessOnOpBegin
(
pJob
,
type
,
pReq
));
}
int32_t
schHandleOpEndEvent
(
SSchJob
*
pJob
,
SCH_OP_TYPE
type
,
SSchedulerReq
*
pReq
,
int32_t
errCode
)
{
int32_t
code
=
errCode
;
if
(
NULL
==
pJob
)
{
SCH_RET
(
code
);
}
schProcessOnOpEnd
(
pJob
,
type
,
pReq
,
errCode
);
if
(
TSDB_CODE_SCH_IGNORE_ERROR
==
errCode
)
{
code
=
pJob
->
errCode
;
}
schReleaseJob
(
pJob
->
refId
);
return
code
;
}
source/libs/scheduler/src/schTask.c
0 → 100644
浏览文件 @
29949a96
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "catalog.h"
#include "command.h"
#include "query.h"
#include "schInt.h"
#include "tmsg.h"
#include "tref.h"
#include "trpc.h"
void
schFreeTask
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
schDeregisterTaskHb
(
pJob
,
pTask
);
if
(
pTask
->
candidateAddrs
)
{
taosArrayDestroy
(
pTask
->
candidateAddrs
);
}
taosMemoryFreeClear
(
pTask
->
msg
);
if
(
pTask
->
children
)
{
taosArrayDestroy
(
pTask
->
children
);
}
if
(
pTask
->
parents
)
{
taosArrayDestroy
(
pTask
->
parents
);
}
if
(
pTask
->
execNodes
)
{
taosHashCleanup
(
pTask
->
execNodes
);
}
}
int32_t
schInitTask
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SSubplan
*
pPlan
,
SSchLevel
*
pLevel
)
{
pTask
->
plan
=
pPlan
;
pTask
->
level
=
pLevel
;
pTask
->
execId
=
-
1
;
pTask
->
maxExecTimes
=
SCH_TASK_MAX_EXEC_TIMES
;
pTask
->
timeoutUsec
=
SCH_DEFAULT_TASK_TIMEOUT_USEC
;
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_INIT
);
pTask
->
taskId
=
schGenTaskId
();
pTask
->
execNodes
=
taosHashInit
(
SCH_MAX_CANDIDATE_EP_NUM
,
taosGetDefaultHashFunction
(
TSDB_DATA_TYPE_INT
),
true
,
HASH_NO_LOCK
);
if
(
NULL
==
pTask
->
execNodes
)
{
SCH_TASK_ELOG
(
"taosHashInit %d execNodes failed"
,
SCH_MAX_CANDIDATE_EP_NUM
);
SCH_ERR_RET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
return
TSDB_CODE_SUCCESS
;
}
int32_t
schRecordTaskSucceedNode
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
SQueryNodeAddr
*
addr
=
taosArrayGet
(
pTask
->
candidateAddrs
,
pTask
->
candidateIdx
);
if
(
NULL
==
addr
)
{
SCH_TASK_ELOG
(
"taosArrayGet candidate addr failed, idx:%d, size:%d"
,
pTask
->
candidateIdx
,
(
int32_t
)
taosArrayGetSize
(
pTask
->
candidateAddrs
));
SCH_ERR_RET
(
TSDB_CODE_SCH_INTERNAL_ERROR
);
}
pTask
->
succeedAddr
=
*
addr
;
return
TSDB_CODE_SUCCESS
;
}
int32_t
schAppendTaskExecNode
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SQueryNodeAddr
*
addr
,
int32_t
execId
)
{
SSchNodeInfo
nodeInfo
=
{.
addr
=
*
addr
,
.
handle
=
NULL
};
if
(
taosHashPut
(
pTask
->
execNodes
,
&
execId
,
sizeof
(
execId
),
&
nodeInfo
,
sizeof
(
nodeInfo
)))
{
SCH_TASK_ELOG
(
"taosHashPut nodeInfo to execNodes failed, errno:%d"
,
errno
);
SCH_ERR_RET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
SCH_TASK_DLOG
(
"task execNode added, execId:%d"
,
execId
);
return
TSDB_CODE_SUCCESS
;
}
int32_t
schDropTaskExecNode
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
void
*
handle
,
int32_t
execId
)
{
if
(
NULL
==
pTask
->
execNodes
)
{
return
TSDB_CODE_SUCCESS
;
}
if
(
taosHashRemove
(
pTask
->
execNodes
,
&
execId
,
sizeof
(
execId
)))
{
SCH_TASK_ELOG
(
"fail to remove execId %d from execNodeList"
,
execId
);
}
else
{
SCH_TASK_DLOG
(
"execId %d removed from execNodeList"
,
execId
);
}
if
(
execId
!=
pTask
->
execId
)
{
// ignore it
SCH_TASK_DLOG
(
"execId %d is not current execId %d"
,
execId
,
pTask
->
execId
);
SCH_ERR_RET
(
TSDB_CODE_SCH_IGNORE_ERROR
);
}
return
TSDB_CODE_SUCCESS
;
}
int32_t
schUpdateTaskExecNode
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
void
*
handle
,
int32_t
execId
)
{
if
(
taosHashGetSize
(
pTask
->
execNodes
)
<=
0
)
{
return
TSDB_CODE_SUCCESS
;
}
SSchNodeInfo
*
nodeInfo
=
taosHashGet
(
pTask
->
execNodes
,
&
execId
,
sizeof
(
execId
));
nodeInfo
->
handle
=
handle
;
SCH_TASK_DLOG
(
"handle updated to %p for execId %d"
,
handle
,
execId
);
return
TSDB_CODE_SUCCESS
;
}
int32_t
schUpdateTaskHandle
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
bool
dropExecNode
,
void
*
handle
,
int32_t
execId
)
{
if
(
dropExecNode
)
{
SCH_RET
(
schDropTaskExecNode
(
pJob
,
pTask
,
handle
,
execId
));
}
SCH_SET_TASK_HANDLE
(
pTask
,
handle
);
schUpdateTaskExecNode
(
pJob
,
pTask
,
handle
,
execId
);
return
TSDB_CODE_SUCCESS
;
}
// Note: no more task error processing, handled in function internal
int32_t
schProcessOnTaskFailure
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
int32_t
errCode
)
{
if
(
TSDB_CODE_SCH_IGNORE_ERROR
==
errCode
)
{
return
TSDB_CODE_SCH_IGNORE_ERROR
;
}
int8_t
status
=
0
;
if
(
schJobNeedToStop
(
pJob
,
&
status
))
{
SCH_TASK_DLOG
(
"no more task failure processing cause of job status %s"
,
jobTaskStatusStr
(
status
));
SCH_ERR_RET
(
TSDB_CODE_SCH_IGNORE_ERROR
);
}
if
(
SCH_GET_TASK_STATUS
(
pTask
)
!=
JOB_TASK_STATUS_EXEC
)
{
SCH_TASK_ELOG
(
"task already not in EXEC status, status:%s"
,
SCH_GET_TASK_STATUS_STR
(
pTask
));
SCH_ERR_RET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
if
(
errCode
==
TSDB_CODE_SCH_TIMEOUT_ERROR
)
{
SCH_LOG_TASK_WAIT_TS
(
pTask
);
}
else
{
SCH_LOG_TASK_END_TS
(
pTask
);
}
bool
needRetry
=
false
;
bool
moved
=
false
;
int32_t
taskDone
=
0
;
SCH_TASK_DLOG
(
"taskOnFailure, code:%s"
,
tstrerror
(
errCode
));
SCH_ERR_RET
(
schTaskCheckSetRetry
(
pJob
,
pTask
,
errCode
,
&
needRetry
));
if
(
!
needRetry
)
{
SCH_TASK_ELOG
(
"task failed and no more retry, code:%s"
,
tstrerror
(
errCode
));
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_FAIL
);
if
(
SCH_JOB_NEED_WAIT
(
pJob
))
{
SCH_LOCK
(
SCH_WRITE
,
&
pTask
->
level
->
lock
);
pTask
->
level
->
taskFailed
++
;
taskDone
=
pTask
->
level
->
taskSucceed
+
pTask
->
level
->
taskFailed
;
SCH_UNLOCK
(
SCH_WRITE
,
&
pTask
->
level
->
lock
);
schUpdateJobErrCode
(
pJob
,
errCode
);
if
(
taskDone
<
pTask
->
level
->
taskNum
)
{
SCH_TASK_DLOG
(
"need to wait other tasks, doneNum:%d, allNum:%d"
,
taskDone
,
pTask
->
level
->
taskNum
);
SCH_RET
(
TSDB_CODE_SCH_IGNORE_ERROR
);
}
SCH_RET
(
atomic_load_32
(
&
pJob
->
errCode
));
}
}
else
{
SCH_ERR_RET
(
schHandleTaskRetry
(
pJob
,
pTask
));
return
TSDB_CODE_SUCCESS
;
}
SCH_RET
(
errCode
);
}
// Note: no more task error processing, handled in function internal
int32_t
schProcessOnTaskSuccess
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
bool
moved
=
false
;
int32_t
code
=
0
;
SCH_TASK_DLOG
(
"taskOnSuccess, status:%s"
,
SCH_GET_TASK_STATUS_STR
(
pTask
));
SCH_LOG_TASK_END_TS
(
pTask
);
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_PART_SUCC
);
SCH_ERR_RET
(
schRecordTaskSucceedNode
(
pJob
,
pTask
));
SCH_ERR_RET
(
schLaunchTasksInFlowCtrlList
(
pJob
,
pTask
));
int32_t
parentNum
=
pTask
->
parents
?
(
int32_t
)
taosArrayGetSize
(
pTask
->
parents
)
:
0
;
if
(
parentNum
==
0
)
{
int32_t
taskDone
=
0
;
if
(
SCH_JOB_NEED_WAIT
(
pJob
))
{
SCH_LOCK
(
SCH_WRITE
,
&
pTask
->
level
->
lock
);
pTask
->
level
->
taskSucceed
++
;
taskDone
=
pTask
->
level
->
taskSucceed
+
pTask
->
level
->
taskFailed
;
SCH_UNLOCK
(
SCH_WRITE
,
&
pTask
->
level
->
lock
);
if
(
taskDone
<
pTask
->
level
->
taskNum
)
{
SCH_TASK_DLOG
(
"wait all tasks, done:%d, all:%d"
,
taskDone
,
pTask
->
level
->
taskNum
);
return
TSDB_CODE_SUCCESS
;
}
else
if
(
taskDone
>
pTask
->
level
->
taskNum
)
{
SCH_TASK_ELOG
(
"taskDone number invalid, done:%d, total:%d"
,
taskDone
,
pTask
->
level
->
taskNum
);
}
if
(
pTask
->
level
->
taskFailed
>
0
)
{
SCH_RET
(
schSwitchJobStatus
(
pJob
,
JOB_TASK_STATUS_FAIL
,
NULL
));
}
else
{
SCH_RET
(
schSwitchJobStatus
(
pJob
,
JOB_TASK_STATUS_PART_SUCC
,
NULL
));
}
}
else
{
pJob
->
resNode
=
pTask
->
succeedAddr
;
}
pJob
->
fetchTask
=
pTask
;
SCH_RET
(
schSwitchJobStatus
(
pJob
,
JOB_TASK_STATUS_PART_SUCC
,
NULL
));
}
/*
if (SCH_IS_DATA_SRC_TASK(task) && job->dataSrcEps.numOfEps < SCH_MAX_CANDIDATE_EP_NUM) {
strncpy(job->dataSrcEps.fqdn[job->dataSrcEps.numOfEps], task->execAddr.fqdn, sizeof(task->execAddr.fqdn));
job->dataSrcEps.port[job->dataSrcEps.numOfEps] = task->execAddr.port;
++job->dataSrcEps.numOfEps;
}
*/
for
(
int32_t
i
=
0
;
i
<
parentNum
;
++
i
)
{
SSchTask
*
parent
=
*
(
SSchTask
**
)
taosArrayGet
(
pTask
->
parents
,
i
);
int32_t
readyNum
=
atomic_add_fetch_32
(
&
parent
->
childReady
,
1
);
SCH_LOCK
(
SCH_WRITE
,
&
parent
->
lock
);
SDownstreamSourceNode
source
=
{.
type
=
QUERY_NODE_DOWNSTREAM_SOURCE
,
.
taskId
=
pTask
->
taskId
,
.
schedId
=
schMgmt
.
sId
,
.
execId
=
pTask
->
execId
,
.
addr
=
pTask
->
succeedAddr
};
qSetSubplanExecutionNode
(
parent
->
plan
,
pTask
->
plan
->
id
.
groupId
,
&
source
);
SCH_UNLOCK
(
SCH_WRITE
,
&
parent
->
lock
);
if
(
SCH_TASK_READY_FOR_LAUNCH
(
readyNum
,
parent
))
{
SCH_TASK_DLOG
(
"all %d children task done, start to launch parent task 0x%"
PRIx64
,
readyNum
,
parent
->
taskId
);
SCH_ERR_RET
(
schLaunchTask
(
pJob
,
parent
));
}
}
SCH_ERR_RET
(
schLaunchJobLowerLevel
(
pJob
,
pTask
));
return
TSDB_CODE_SUCCESS
;
}
int32_t
schRescheduleTask
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
if
(
SCH_IS_DATA_SRC_QRY_TASK
(
pTask
))
{
return
TSDB_CODE_SUCCESS
;
}
if
(
SCH_TASK_TIMEOUT
(
pTask
)
&&
JOB_TASK_STATUS_EXEC
==
pTask
->
status
&&
pJob
->
fetchTask
!=
pTask
&&
taosArrayGetSize
(
pTask
->
candidateAddrs
)
>
1
)
{
SCH_TASK_DLOG
(
"task execId %d will be rescheduled now"
,
pTask
->
execId
);
schDropTaskOnExecNode
(
pJob
,
pTask
);
taosHashClear
(
pTask
->
execNodes
);
SCH_ERR_RET
(
schProcessOnTaskFailure
(
pJob
,
pTask
,
TSDB_CODE_SCH_TIMEOUT_ERROR
));
}
return
TSDB_CODE_SUCCESS
;
}
int32_t
schDoTaskRedirect
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SDataBuf
*
pData
,
int32_t
rspCode
)
{
int32_t
code
=
0
;
if
((
pTask
->
execId
+
1
)
>=
pTask
->
maxExecTimes
)
{
SCH_TASK_DLOG
(
"task no more retry since reach max try times, execId:%d"
,
pTask
->
execId
);
schSwitchJobStatus
(
pJob
,
JOB_TASK_STATUS_FAIL
,
(
void
*
)
&
rspCode
);
return
TSDB_CODE_SUCCESS
;
}
SCH_TASK_DLOG
(
"task will be redirected now, status:%s"
,
SCH_GET_TASK_STATUS_STR
(
pTask
));
schDropTaskOnExecNode
(
pJob
,
pTask
);
taosHashClear
(
pTask
->
execNodes
);
SCH_ERR_JRET
(
schRemoveTaskFromExecList
(
pJob
,
pTask
));
schDeregisterTaskHb
(
pJob
,
pTask
);
atomic_sub_fetch_32
(
&
pTask
->
level
->
taskLaunchedNum
,
1
);
taosMemoryFreeClear
(
pTask
->
msg
);
pTask
->
msgLen
=
0
;
pTask
->
lastMsgType
=
0
;
memset
(
&
pTask
->
succeedAddr
,
0
,
sizeof
(
pTask
->
succeedAddr
));
if
(
SCH_IS_DATA_SRC_QRY_TASK
(
pTask
))
{
if
(
pData
)
{
SCH_ERR_JRET
(
schUpdateTaskCandidateAddr
(
pJob
,
pTask
,
pData
->
pEpSet
));
}
if
(
SCH_TASK_NEED_FLOW_CTRL
(
pJob
,
pTask
))
{
if
(
JOB_TASK_STATUS_EXEC
==
SCH_GET_TASK_STATUS
(
pTask
))
{
SCH_ERR_JRET
(
schLaunchTasksInFlowCtrlList
(
pJob
,
pTask
));
}
}
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_INIT
);
SCH_ERR_JRET
(
schLaunchTask
(
pJob
,
pTask
));
return
TSDB_CODE_SUCCESS
;
}
// merge plan
pTask
->
childReady
=
0
;
qClearSubplanExecutionNode
(
pTask
->
plan
);
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_INIT
);
int32_t
childrenNum
=
taosArrayGetSize
(
pTask
->
children
);
for
(
int32_t
i
=
0
;
i
<
childrenNum
;
++
i
)
{
SSchTask
*
pChild
=
taosArrayGetP
(
pTask
->
children
,
i
);
SCH_LOCK_TASK
(
pChild
);
schDoTaskRedirect
(
pJob
,
pChild
,
NULL
,
rspCode
);
SCH_UNLOCK_TASK
(
pChild
);
}
return
TSDB_CODE_SUCCESS
;
_return:
SCH_RET
(
schProcessOnTaskFailure
(
pJob
,
pTask
,
code
));
}
int32_t
schHandleRedirect
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SDataBuf
*
pData
,
int32_t
rspCode
)
{
int32_t
code
=
0
;
if
(
SCH_IS_DATA_SRC_QRY_TASK
(
pTask
))
{
if
(
NULL
==
pData
->
pEpSet
)
{
SCH_TASK_ELOG
(
"no epset updated while got error %s"
,
tstrerror
(
rspCode
));
SCH_ERR_JRET
(
rspCode
);
}
}
SCH_RET
(
schDoTaskRedirect
(
pJob
,
pTask
,
pData
,
rspCode
));
_return:
SCH_RET
(
schProcessOnTaskFailure
(
pJob
,
pTask
,
code
));
}
int32_t
schPushTaskToExecList
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
int32_t
code
=
taosHashPut
(
pJob
->
execTasks
,
&
pTask
->
taskId
,
sizeof
(
pTask
->
taskId
),
&
pTask
,
POINTER_BYTES
);
if
(
0
!=
code
)
{
if
(
HASH_NODE_EXIST
(
code
))
{
SCH_TASK_ELOG
(
"task already in execTask list, code:%x"
,
code
);
SCH_ERR_RET
(
TSDB_CODE_SCH_INTERNAL_ERROR
);
}
SCH_TASK_ELOG
(
"taosHashPut task to execTask list failed, errno:%d"
,
errno
);
SCH_ERR_RET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
SCH_TASK_DLOG
(
"task added to execTask list, numOfTasks:%d"
,
taosHashGetSize
(
pJob
->
execTasks
));
return
TSDB_CODE_SUCCESS
;
}
/*
int32_t schMoveTaskToSuccList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
if (0 != taosHashRemove(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId))) {
SCH_TASK_WLOG("remove task from execTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
} else {
SCH_TASK_DLOG("task removed from execTask list, numOfTasks:%d", taosHashGetSize(pJob->execTasks));
}
int32_t code = taosHashPut(pJob->succTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES);
if (0 != code) {
if (HASH_NODE_EXIST(code)) {
*moved = true;
SCH_TASK_ELOG("task already in succTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
SCH_TASK_ELOG("taosHashPut task to succTask list failed, errno:%d", errno);
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
*moved = true;
SCH_TASK_DLOG("task moved to succTask list, numOfTasks:%d", taosHashGetSize(pJob->succTasks));
return TSDB_CODE_SUCCESS;
}
int32_t schMoveTaskToFailList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
*moved = false;
if (0 != taosHashRemove(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId))) {
SCH_TASK_WLOG("remove task from execTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
}
int32_t code = taosHashPut(pJob->failTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES);
if (0 != code) {
if (HASH_NODE_EXIST(code)) {
*moved = true;
SCH_TASK_WLOG("task already in failTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
SCH_TASK_ELOG("taosHashPut task to failTask list failed, errno:%d", errno);
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
*moved = true;
SCH_TASK_DLOG("task moved to failTask list, numOfTasks:%d", taosHashGetSize(pJob->failTasks));
return TSDB_CODE_SUCCESS;
}
int32_t schMoveTaskToExecList(SSchJob *pJob, SSchTask *pTask, bool *moved) {
if (0 != taosHashRemove(pJob->succTasks, &pTask->taskId, sizeof(pTask->taskId))) {
SCH_TASK_WLOG("remove task from succTask list failed, may not exist, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
}
int32_t code = taosHashPut(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId), &pTask, POINTER_BYTES);
if (0 != code) {
if (HASH_NODE_EXIST(code)) {
*moved = true;
SCH_TASK_ELOG("task already in execTask list, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
SCH_TASK_ELOG("taosHashPut task to execTask list failed, errno:%d", errno);
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
*moved = true;
SCH_TASK_DLOG("task moved to execTask list, numOfTasks:%d", taosHashGetSize(pJob->execTasks));
return TSDB_CODE_SUCCESS;
}
*/
int32_t
schTaskCheckSetRetry
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
int32_t
errCode
,
bool
*
needRetry
)
{
if
(
TSDB_CODE_SCH_TIMEOUT_ERROR
==
errCode
)
{
pTask
->
maxExecTimes
++
;
if
(
pTask
->
timeoutUsec
<
SCH_MAX_TASK_TIMEOUT_USEC
)
{
pTask
->
timeoutUsec
*=
2
;
if
(
pTask
->
timeoutUsec
>
SCH_MAX_TASK_TIMEOUT_USEC
)
{
pTask
->
timeoutUsec
=
SCH_MAX_TASK_TIMEOUT_USEC
;
}
}
}
if
((
pTask
->
execId
+
1
)
>=
pTask
->
maxExecTimes
)
{
*
needRetry
=
false
;
SCH_TASK_DLOG
(
"task no more retry since reach max try times, execId:%d"
,
pTask
->
execId
);
return
TSDB_CODE_SUCCESS
;
}
if
(
!
SCH_NEED_RETRY
(
pTask
->
lastMsgType
,
errCode
))
{
*
needRetry
=
false
;
SCH_TASK_DLOG
(
"task no more retry cause of errCode, errCode:%x - %s"
,
errCode
,
tstrerror
(
errCode
));
return
TSDB_CODE_SUCCESS
;
}
if
(
SCH_IS_DATA_SRC_TASK
(
pTask
))
{
if
((
pTask
->
execId
+
1
)
>=
SCH_TASK_NUM_OF_EPS
(
&
pTask
->
plan
->
execNode
))
{
*
needRetry
=
false
;
SCH_TASK_DLOG
(
"task no more retry since all ep tried, execId:%d, epNum:%d"
,
pTask
->
execId
,
SCH_TASK_NUM_OF_EPS
(
&
pTask
->
plan
->
execNode
));
return
TSDB_CODE_SUCCESS
;
}
}
else
{
int32_t
candidateNum
=
taosArrayGetSize
(
pTask
->
candidateAddrs
);
if
((
pTask
->
candidateIdx
+
1
)
>=
candidateNum
&&
(
TSDB_CODE_SCH_TIMEOUT_ERROR
!=
errCode
))
{
*
needRetry
=
false
;
SCH_TASK_DLOG
(
"task no more retry since all candiates tried, candidateIdx:%d, candidateNum:%d"
,
pTask
->
candidateIdx
,
candidateNum
);
return
TSDB_CODE_SUCCESS
;
}
}
*
needRetry
=
true
;
SCH_TASK_DLOG
(
"task need the %dth retry, errCode:%x - %s"
,
pTask
->
execId
+
1
,
errCode
,
tstrerror
(
errCode
));
return
TSDB_CODE_SUCCESS
;
}
int32_t
schHandleTaskRetry
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
atomic_sub_fetch_32
(
&
pTask
->
level
->
taskLaunchedNum
,
1
);
SCH_ERR_RET
(
schRemoveTaskFromExecList
(
pJob
,
pTask
));
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_INIT
);
if
(
SCH_TASK_NEED_FLOW_CTRL
(
pJob
,
pTask
))
{
SCH_ERR_RET
(
schLaunchTasksInFlowCtrlList
(
pJob
,
pTask
));
}
schDeregisterTaskHb
(
pJob
,
pTask
);
if
(
SCH_IS_DATA_SRC_TASK
(
pTask
))
{
SCH_SWITCH_EPSET
(
&
pTask
->
plan
->
execNode
);
}
else
{
int32_t
candidateNum
=
taosArrayGetSize
(
pTask
->
candidateAddrs
);
if
(
++
pTask
->
candidateIdx
>=
candidateNum
)
{
pTask
->
candidateIdx
=
0
;
}
}
SCH_ERR_RET
(
schLaunchTask
(
pJob
,
pTask
));
return
TSDB_CODE_SUCCESS
;
}
int32_t
schSetAddrsFromNodeList
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
int32_t
addNum
=
0
;
int32_t
nodeNum
=
0
;
if
(
pJob
->
nodeList
)
{
nodeNum
=
taosArrayGetSize
(
pJob
->
nodeList
);
for
(
int32_t
i
=
0
;
i
<
nodeNum
&&
addNum
<
SCH_MAX_CANDIDATE_EP_NUM
;
++
i
)
{
SQueryNodeLoad
*
nload
=
taosArrayGet
(
pJob
->
nodeList
,
i
);
SQueryNodeAddr
*
naddr
=
&
nload
->
addr
;
if
(
NULL
==
taosArrayPush
(
pTask
->
candidateAddrs
,
naddr
))
{
SCH_TASK_ELOG
(
"taosArrayPush execNode to candidate addrs failed, addNum:%d, errno:%d"
,
addNum
,
errno
);
SCH_ERR_RET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
SCH_TASK_DLOG
(
"set %dth candidate addr, id %d, fqdn:%s, port:%d"
,
i
,
naddr
->
nodeId
,
SCH_GET_CUR_EP
(
naddr
)
->
fqdn
,
SCH_GET_CUR_EP
(
naddr
)
->
port
);
++
addNum
;
}
}
if
(
addNum
<=
0
)
{
SCH_TASK_ELOG
(
"no available execNode as candidates, nodeNum:%d"
,
nodeNum
);
SCH_ERR_RET
(
TSDB_CODE_TSC_NO_EXEC_NODE
);
}
return
TSDB_CODE_SUCCESS
;
}
int32_t
schSetTaskCandidateAddrs
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
if
(
NULL
!=
pTask
->
candidateAddrs
)
{
return
TSDB_CODE_SUCCESS
;
}
pTask
->
candidateIdx
=
0
;
pTask
->
candidateAddrs
=
taosArrayInit
(
SCH_MAX_CANDIDATE_EP_NUM
,
sizeof
(
SQueryNodeAddr
));
if
(
NULL
==
pTask
->
candidateAddrs
)
{
SCH_TASK_ELOG
(
"taosArrayInit %d condidate addrs failed"
,
SCH_MAX_CANDIDATE_EP_NUM
);
SCH_ERR_RET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
if
(
pTask
->
plan
->
execNode
.
epSet
.
numOfEps
>
0
)
{
if
(
NULL
==
taosArrayPush
(
pTask
->
candidateAddrs
,
&
pTask
->
plan
->
execNode
))
{
SCH_TASK_ELOG
(
"taosArrayPush execNode to candidate addrs failed, errno:%d"
,
errno
);
SCH_ERR_RET
(
TSDB_CODE_QRY_OUT_OF_MEMORY
);
}
SCH_TASK_DLOG
(
"use execNode in plan as candidate addr, numOfEps:%d"
,
pTask
->
plan
->
execNode
.
epSet
.
numOfEps
);
return
TSDB_CODE_SUCCESS
;
}
if
(
SCH_IS_DATA_SRC_QRY_TASK
(
pTask
))
{
SCH_TASK_ELOG
(
"no execNode specifed for data src task, numOfEps:%d"
,
pTask
->
plan
->
execNode
.
epSet
.
numOfEps
);
SCH_ERR_RET
(
TSDB_CODE_QRY_APP_ERROR
);
}
SCH_ERR_RET
(
schSetAddrsFromNodeList
(
pJob
,
pTask
));
/*
for (int32_t i = 0; i < job->dataSrcEps.numOfEps && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) {
strncpy(epSet->fqdn[epSet->numOfEps], job->dataSrcEps.fqdn[i], sizeof(job->dataSrcEps.fqdn[i]));
epSet->port[epSet->numOfEps] = job->dataSrcEps.port[i];
++epSet->numOfEps;
}
*/
return
TSDB_CODE_SUCCESS
;
}
int32_t
schUpdateTaskCandidateAddr
(
SSchJob
*
pJob
,
SSchTask
*
pTask
,
SEpSet
*
pEpSet
)
{
if
(
NULL
==
pTask
->
candidateAddrs
||
1
!=
taosArrayGetSize
(
pTask
->
candidateAddrs
))
{
SCH_TASK_ELOG
(
"not able to update cndidate addr, addr num %d"
,
(
int32_t
)(
pTask
->
candidateAddrs
?
taosArrayGetSize
(
pTask
->
candidateAddrs
)
:
0
));
SCH_ERR_RET
(
TSDB_CODE_APP_ERROR
);
}
SQueryNodeAddr
*
pAddr
=
taosArrayGet
(
pTask
->
candidateAddrs
,
0
);
SEp
*
pOld
=
&
pAddr
->
epSet
.
eps
[
pAddr
->
epSet
.
inUse
];
SEp
*
pNew
=
&
pEpSet
->
eps
[
pEpSet
->
inUse
];
SCH_TASK_DLOG
(
"update task ep from %s:%d to %s:%d"
,
pOld
->
fqdn
,
pOld
->
port
,
pNew
->
fqdn
,
pNew
->
port
);
memcpy
(
&
pAddr
->
epSet
,
pEpSet
,
sizeof
(
pAddr
->
epSet
));
return
TSDB_CODE_SUCCESS
;
}
int32_t
schRemoveTaskFromExecList
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
int32_t
code
=
taosHashRemove
(
pJob
->
execTasks
,
&
pTask
->
taskId
,
sizeof
(
pTask
->
taskId
));
if
(
code
)
{
SCH_TASK_ELOG
(
"task failed to rm from execTask list, code:%x"
,
code
);
SCH_ERR_RET
(
TSDB_CODE_SCH_INTERNAL_ERROR
);
}
return
TSDB_CODE_SUCCESS
;
}
void
schDropTaskOnExecNode
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
if
(
NULL
==
pTask
->
execNodes
)
{
SCH_TASK_DLOG
(
"no exec address, status:%s"
,
SCH_GET_TASK_STATUS_STR
(
pTask
));
return
;
}
int32_t
size
=
(
int32_t
)
taosHashGetSize
(
pTask
->
execNodes
);
if
(
size
<=
0
)
{
SCH_TASK_DLOG
(
"task has no execNodes, no need to drop it, status:%s"
,
SCH_GET_TASK_STATUS_STR
(
pTask
));
return
;
}
SSchNodeInfo
*
nodeInfo
=
taosHashIterate
(
pTask
->
execNodes
,
NULL
);
while
(
nodeInfo
)
{
SCH_SET_TASK_HANDLE
(
pTask
,
nodeInfo
->
handle
);
schBuildAndSendMsg
(
pJob
,
pTask
,
&
nodeInfo
->
addr
,
TDMT_SCH_DROP_TASK
);
nodeInfo
=
taosHashIterate
(
pTask
->
execNodes
,
nodeInfo
);
}
SCH_TASK_DLOG
(
"task has been dropped on %d exec nodes"
,
size
);
}
int32_t
schProcessOnTaskStatusRsp
(
SQueryNodeEpId
*
pEpId
,
SArray
*
pStatusList
)
{
int32_t
taskNum
=
(
int32_t
)
taosArrayGetSize
(
pStatusList
);
SSchTask
*
pTask
=
NULL
;
SSchJob
*
pJob
=
NULL
;
qDebug
(
"%d task status in hb rsp from nodeId:%d, fqdn:%s, port:%d"
,
taskNum
,
pEpId
->
nodeId
,
pEpId
->
ep
.
fqdn
,
pEpId
->
ep
.
port
);
for
(
int32_t
i
=
0
;
i
<
taskNum
;
++
i
)
{
STaskStatus
*
pStatus
=
taosArrayGet
(
pStatusList
,
i
);
int32_t
code
=
0
;
qDebug
(
"QID:0x%"
PRIx64
",TID:0x%"
PRIx64
",EID:%d task status in server: %s"
,
pStatus
->
queryId
,
pStatus
->
taskId
,
pStatus
->
execId
,
jobTaskStatusStr
(
pStatus
->
status
));
if
(
schProcessOnCbBegin
(
&
pJob
,
&
pTask
,
pStatus
->
queryId
,
pStatus
->
refId
,
pStatus
->
taskId
))
{
continue
;
}
if
(
pStatus
->
execId
!=
pTask
->
execId
)
{
//TODO
SCH_TASK_DLOG
(
"execId %d mis-match current execId %d"
,
pStatus
->
execId
,
pTask
->
execId
);
schProcessOnCbEnd
(
pJob
,
pTask
,
0
);
continue
;
}
if
(
pStatus
->
status
==
JOB_TASK_STATUS_FAIL
)
{
// RECORD AND HANDLE ERROR!!!!
schProcessOnCbEnd
(
pJob
,
pTask
,
0
);
continue
;
}
if
(
pStatus
->
status
==
JOB_TASK_STATUS_INIT
)
{
code
=
schRescheduleTask
(
pJob
,
pTask
);
}
schProcessOnCbEnd
(
pJob
,
pTask
,
code
);
}
return
TSDB_CODE_SUCCESS
;
}
int32_t
schLaunchTaskImpl
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
int8_t
status
=
0
;
int32_t
code
=
0
;
atomic_add_fetch_32
(
&
pTask
->
level
->
taskLaunchedNum
,
1
);
pTask
->
execId
++
;
SCH_TASK_DLOG
(
"start to launch task's %dth exec"
,
pTask
->
execId
);
SCH_LOG_TASK_START_TS
(
pTask
);
if
(
schJobNeedToStop
(
pJob
,
&
status
))
{
SCH_TASK_DLOG
(
"no need to launch task cause of job status %s"
,
jobTaskStatusStr
(
status
));
SCH_ERR_RET
(
TSDB_CODE_SCH_IGNORE_ERROR
);
}
// NOTE: race condition: the task should be put into the hash table before send msg to server
if
(
SCH_GET_TASK_STATUS
(
pTask
)
!=
JOB_TASK_STATUS_EXEC
)
{
SCH_ERR_RET
(
schPushTaskToExecList
(
pJob
,
pTask
));
SCH_SET_TASK_STATUS
(
pTask
,
JOB_TASK_STATUS_EXEC
);
}
SSubplan
*
plan
=
pTask
->
plan
;
if
(
NULL
==
pTask
->
msg
)
{
// TODO add more detailed reason for failure
code
=
qSubPlanToString
(
plan
,
&
pTask
->
msg
,
&
pTask
->
msgLen
);
if
(
TSDB_CODE_SUCCESS
!=
code
)
{
SCH_TASK_ELOG
(
"failed to create physical plan, code:%s, msg:%p, len:%d"
,
tstrerror
(
code
),
pTask
->
msg
,
pTask
->
msgLen
);
SCH_ERR_RET
(
code
);
}
else
{
SCH_TASK_DLOGL
(
"physical plan len:%d, %s"
,
pTask
->
msgLen
,
pTask
->
msg
);
}
}
SCH_ERR_RET
(
schSetTaskCandidateAddrs
(
pJob
,
pTask
));
if
(
SCH_IS_QUERY_JOB
(
pJob
))
{
SCH_ERR_RET
(
schEnsureHbConnection
(
pJob
,
pTask
));
}
SCH_ERR_RET
(
schBuildAndSendMsg
(
pJob
,
pTask
,
NULL
,
plan
->
msgType
));
return
TSDB_CODE_SUCCESS
;
}
// Note: no more error processing, handled in function internal
int32_t
schLaunchTask
(
SSchJob
*
pJob
,
SSchTask
*
pTask
)
{
bool
enough
=
false
;
int32_t
code
=
0
;
SCH_SET_TASK_HANDLE
(
pTask
,
NULL
);
if
(
SCH_TASK_NEED_FLOW_CTRL
(
pJob
,
pTask
))
{
SCH_ERR_JRET
(
schCheckIncTaskFlowQuota
(
pJob
,
pTask
,
&
enough
));
if
(
enough
)
{
SCH_ERR_JRET
(
schLaunchTaskImpl
(
pJob
,
pTask
));
}
}
else
{
SCH_ERR_JRET
(
schLaunchTaskImpl
(
pJob
,
pTask
));
}
return
TSDB_CODE_SUCCESS
;
_return:
SCH_RET
(
schProcessOnTaskFailure
(
pJob
,
pTask
,
code
));
}
int32_t
schLaunchLevelTasks
(
SSchJob
*
pJob
,
SSchLevel
*
level
)
{
SCH_ERR_RET
(
schChkJobNeedFlowCtrl
(
pJob
,
level
));
for
(
int32_t
i
=
0
;
i
<
level
->
taskNum
;
++
i
)
{
SSchTask
*
pTask
=
taosArrayGet
(
level
->
subTasks
,
i
);
SCH_ERR_RET
(
schLaunchTask
(
pJob
,
pTask
));
}
return
TSDB_CODE_SUCCESS
;
}
void
schDropTaskInHashList
(
SSchJob
*
pJob
,
SHashObj
*
list
)
{
if
(
!
SCH_JOB_NEED_DROP
(
pJob
))
{
return
;
}
void
*
pIter
=
taosHashIterate
(
list
,
NULL
);
while
(
pIter
)
{
SSchTask
*
pTask
=
*
(
SSchTask
**
)
pIter
;
schDropTaskOnExecNode
(
pJob
,
pTask
);
pIter
=
taosHashIterate
(
list
,
pIter
);
}
}
// Note: no more error processing, handled in function internal
int32_t
schLaunchFetchTask
(
SSchJob
*
pJob
)
{
int32_t
code
=
0
;
void
*
resData
=
atomic_load_ptr
(
&
pJob
->
resData
);
if
(
resData
)
{
SCH_JOB_DLOG
(
"res already fetched, res:%p"
,
resData
);
return
TSDB_CODE_SUCCESS
;
}
SCH_ERR_JRET
(
schBuildAndSendMsg
(
pJob
,
pJob
->
fetchTask
,
&
pJob
->
resNode
,
TDMT_SCH_FETCH
));
return
TSDB_CODE_SUCCESS
;
_return:
SCH_RET
(
schProcessOnTaskFailure
(
pJob
,
pJob
->
fetchTask
,
code
));
}
source/libs/scheduler/src/schUtil.c
浏览文件 @
29949a96
...
...
@@ -16,11 +16,25 @@
#include "catalog.h"
#include "command.h"
#include "query.h"
#include "sch
eduler
Int.h"
#include "schInt.h"
#include "tmsg.h"
#include "tref.h"
#include "trpc.h"
FORCE_INLINE
SSchJob
*
schAcquireJob
(
int64_t
refId
)
{
qDebug
(
"sch acquire jobId:0x%"
PRIx64
,
refId
);
return
(
SSchJob
*
)
taosAcquireRef
(
schMgmt
.
jobRef
,
refId
);
}
FORCE_INLINE
int32_t
schReleaseJob
(
int64_t
refId
)
{
if
(
0
==
refId
)
{
return
TSDB_CODE_SUCCESS
;
}
qDebug
(
"sch release jobId:0x%"
PRIx64
,
refId
);
return
taosReleaseRef
(
schMgmt
.
jobRef
,
refId
);
}
char
*
schGetOpStr
(
SCH_OP_TYPE
type
)
{
switch
(
type
)
{
case
SCH_OP_NULL
:
...
...
@@ -29,6 +43,8 @@ char* schGetOpStr(SCH_OP_TYPE type) {
return
"EXEC"
;
case
SCH_OP_FETCH
:
return
"FETCH"
;
case
SCH_OP_GET_STATUS
:
return
"GET STATUS"
;
default:
return
"UNKNOWN"
;
}
...
...
@@ -283,3 +299,20 @@ void schFreeSMsgSendInfo(SMsgSendInfo *msgSendInfo) {
taosMemoryFree
(
msgSendInfo
);
}
int32_t
schGetTaskFromList
(
SHashObj
*
pTaskList
,
uint64_t
taskId
,
SSchTask
**
pTask
)
{
int32_t
s
=
taosHashGetSize
(
pTaskList
);
if
(
s
<=
0
)
{
return
TSDB_CODE_SUCCESS
;
}
SSchTask
**
task
=
taosHashGet
(
pTaskList
,
&
taskId
,
sizeof
(
taskId
));
if
(
NULL
==
task
||
NULL
==
(
*
task
))
{
return
TSDB_CODE_SUCCESS
;
}
*
pTask
=
*
task
;
return
TSDB_CODE_SUCCESS
;
}
source/libs/scheduler/src/scheduler.c
浏览文件 @
29949a96
...
...
@@ -16,7 +16,7 @@
#include "catalog.h"
#include "command.h"
#include "query.h"
#include "sch
eduler
Int.h"
#include "schInt.h"
#include "tmsg.h"
#include "tref.h"
#include "trpc.h"
...
...
@@ -67,121 +67,45 @@ int32_t schedulerInit(SSchedulerCfg *cfg) {
return
TSDB_CODE_SUCCESS
;
}
int32_t
schedulerExecJob
(
SSchedulerReq
*
pReq
,
int64_t
*
pJobId
,
SQueryResult
*
pRes
)
{
qDebug
(
"scheduler
sync exec job start
"
);
int32_t
schedulerExecJob
(
SSchedulerReq
*
pReq
,
int64_t
*
pJobId
)
{
qDebug
(
"scheduler
%s exec job start"
,
pReq
->
syncReq
?
"SYNC"
:
"ASYNC
"
);
int32_t
code
=
0
;
SSchJob
*
pJob
=
NULL
;
SCH_ERR_JRET
(
schInitJob
(
pReq
,
&
pJob
));
*
pJobId
=
pJob
->
refId
;
SCH_ERR_JRET
(
schInitJob
(
pJobId
,
pReq
))
;
SCH_ERR_JRET
(
sch
ExecJobImpl
(
pReq
,
pJob
,
true
));
SCH_ERR_JRET
(
sch
HandleOpBeginEvent
(
*
pJobId
,
&
pJob
,
SCH_OP_EXEC
,
pReq
));
_return:
if
(
code
&&
NULL
==
pJob
)
{
qDestroyQueryPlan
(
pReq
->
pDag
);
}
if
(
pJob
)
{
schSetJobQueryRes
(
pJob
,
pRes
);
schReleaseJob
(
pJob
->
refId
);
}
return
code
;
}
SCH_ERR_JRET
(
schSwitchJobStatus
(
pJob
,
JOB_TASK_STATUS_INIT
,
pReq
));
int32_t
schedulerAsyncExecJob
(
SSchedulerReq
*
pReq
,
int64_t
*
pJobId
)
{
qDebug
(
"scheduler async exec job start"
);
int32_t
code
=
0
;
SSchJob
*
pJob
=
NULL
;
SCH_ERR_JRET
(
schInitJob
(
pReq
,
&
pJob
));
*
pJobId
=
pJob
->
refId
;
SCH_ERR_JRET
(
schExecJobImpl
(
pReq
,
pJob
,
false
));
SCH_ERR_JRET
(
schSwitchJobStatus
(
pJob
,
JOB_TASK_STATUS_EXEC
,
pReq
));
_return:
if
(
code
&&
NULL
==
pJob
)
{
qDestroyQueryPlan
(
pReq
->
pDag
);
}
if
(
pJob
)
{
schReleaseJob
(
pJob
->
refId
);
}
return
code
;
SCH_RET
(
schHandleOpEndEvent
(
pJob
,
SCH_OP_EXEC
,
pReq
,
code
));
}
int32_t
schedulerFetchRows
(
int64_t
job
,
void
**
pData
)
{
qDebug
(
"scheduler sync fetch rows start"
);
if
(
NULL
==
pData
)
{
SCH_ERR_RET
(
TSDB_CODE_QRY_INVALID_INPUT
);
}
int32_t
schedulerFetchRows
(
int64_t
jobId
,
SSchedulerReq
*
pReq
)
{
qDebug
(
"scheduler %s fetch rows start"
,
pReq
->
syncReq
?
"SYNC"
:
"ASYNC"
);
int32_t
code
=
0
;
SSchJob
*
pJob
=
schAcquireJob
(
job
);
if
(
NULL
==
pJob
)
{
qError
(
"acquire job from jobRef list failed, may be dropped, jobId:0x%"
PRIx64
,
job
);
SCH_ERR_RET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
SCH_ERR_RET
(
schBeginOperation
(
pJob
,
SCH_OP_FETCH
,
true
));
pJob
->
userRes
.
fetchRes
=
pData
;
code
=
schFetchRows
(
pJob
);
schReleaseJob
(
job
);
SCH_RET
(
code
);
}
void
schedulerAsyncFetchRows
(
int64_t
job
,
schedulerFetchFp
fp
,
void
*
param
)
{
qDebug
(
"scheduler async fetch rows start"
);
int32_t
code
=
0
;
if
(
NULL
==
fp
||
NULL
==
param
)
{
SCH_ERR_JRET
(
TSDB_CODE_QRY_INVALID_INPUT
);
}
SSchJob
*
pJob
=
schAcquireJob
(
job
);
if
(
NULL
==
pJob
)
{
qError
(
"acquire sch job from job list failed, may be dropped, jobId:0x%"
PRIx64
,
job
);
SCH_ERR_JRET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
SCH_ERR_JRET
(
schBeginOperation
(
pJob
,
SCH_OP_FETCH
,
false
));
SSchJob
*
pJob
=
NULL
;
pJob
->
userRes
.
fetchFp
=
fp
;
pJob
->
userRes
.
userParam
=
param
;
SCH_ERR_JRET
(
schHandleOpBeginEvent
(
jobId
,
&
pJob
,
SCH_OP_FETCH
,
pReq
));
SCH_ERR_JRET
(
sch
Async
FetchRows
(
pJob
));
SCH_ERR_JRET
(
sch
Job
FetchRows
(
pJob
));
_return:
if
(
code
)
{
fp
(
NULL
,
param
,
code
);
}
schReleaseJob
(
job
);
SCH_RET
(
schHandleOpEndEvent
(
pJob
,
SCH_OP_FETCH
,
pReq
,
code
));
}
int32_t
schedulerGetTasksStatus
(
int64_t
job
,
SArray
*
pSub
)
{
int32_t
schedulerGetTasksStatus
(
int64_t
job
Id
,
SArray
*
pSub
)
{
int32_t
code
=
0
;
SSchJob
*
pJob
=
schAcquireJob
(
job
);
if
(
NULL
==
pJob
)
{
qDebug
(
"acquire job from jobRef list failed, may not started or dropped, refId:0x%"
PRIx64
,
job
);
SCH_ERR_RET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
SSchJob
*
pJob
=
NULL
;
if
(
pJob
->
status
<
JOB_TASK_STATUS_NOT_START
||
pJob
->
levelNum
<=
0
||
NULL
==
pJob
->
levels
)
{
qDebug
(
"job not initialized or not executable job, refId:0x%"
PRIx64
,
job
);
SCH_ERR_JRET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
SCH_ERR_JRET
(
schHandleOpBeginEvent
(
jobId
,
&
pJob
,
SCH_OP_GET_STATUS
,
NULL
));
for
(
int32_t
i
=
pJob
->
levelNum
-
1
;
i
>=
0
;
--
i
)
{
SSchLevel
*
pLevel
=
taosArrayGet
(
pJob
->
levels
,
i
);
...
...
@@ -198,23 +122,7 @@ int32_t schedulerGetTasksStatus(int64_t job, SArray *pSub) {
_return:
schReleaseJob
(
job
);
SCH_RET
(
code
);
}
int32_t
scheduleCancelJob
(
int64_t
job
)
{
SSchJob
*
pJob
=
schAcquireJob
(
job
);
if
(
NULL
==
pJob
)
{
qError
(
"acquire job from jobRef list failed, may be dropped, jobId:0x%"
PRIx64
,
job
);
SCH_ERR_RET
(
TSDB_CODE_SCH_STATUS_ERROR
);
}
int32_t
code
=
schCancelJob
(
pJob
);
schReleaseJob
(
job
);
SCH_RET
(
code
);
SCH_RET
(
schHandleOpEndEvent
(
pJob
,
SCH_OP_GET_STATUS
,
NULL
,
code
));
}
void
schedulerStopQueryHb
(
void
*
pTrans
)
{
...
...
@@ -225,33 +133,23 @@ void schedulerStopQueryHb(void *pTrans) {
schCleanClusterHb
(
pTrans
);
}
void
schedulerFreeJob
(
int64_t
*
job
,
int32_t
errCode
)
{
if
(
0
==
*
job
)
{
void
schedulerFreeJob
(
int64_t
*
job
Id
,
int32_t
errCode
)
{
if
(
0
==
*
job
Id
)
{
return
;
}
SSchJob
*
pJob
=
schAcquireJob
(
*
job
);
SSchJob
*
pJob
=
schAcquireJob
(
*
job
Id
);
if
(
NULL
==
pJob
)
{
qError
(
"acquire sch job failed, may be dropped, jobId:0x%"
PRIx64
,
*
job
);
*
job
=
0
;
qError
(
"Acquire sch job failed, may be dropped, jobId:0x%"
PRIx64
,
*
jobId
);
return
;
}
int32_t
code
=
schProcessOnJobDropped
(
pJob
,
errCode
);
if
(
TSDB_CODE_SCH_JOB_IS_DROPPING
==
code
)
{
SCH_JOB_DLOG
(
"sch job is already dropping, refId:0x%"
PRIx64
,
*
job
);
*
job
=
0
;
if
(
schJobDone
(
pJob
))
{
return
;
}
SCH_JOB_DLOG
(
"start to remove job from jobRef list, refId:0x%"
PRIx64
,
*
job
);
if
(
taosRemoveRef
(
schMgmt
.
jobRef
,
*
job
))
{
SCH_JOB_ELOG
(
"remove job from job list failed, refId:0x%"
PRIx64
,
*
job
);
}
schReleaseJob
(
*
job
);
*
job
=
0
;
schSwitchJobStatus
(
pJob
,
JOB_TASK_STATUS_DROP
,
(
void
*
)
&
errCode
);
*
jobId
=
0
;
}
void
schedulerDestroy
(
void
)
{
...
...
source/libs/scheduler/test/schedulerTests.cpp
浏览文件 @
29949a96
...
...
@@ -50,7 +50,7 @@
#pragma GCC diagnostic ignored "-Wreturn-type"
#pragma GCC diagnostic ignored "-Wformat"
#include "sch
eduler
Int.h"
#include "schInt.h"
#include "stub.h"
#include "tref.h"
...
...
@@ -87,7 +87,7 @@ void schtInitLogFile() {
}
void
schtQueryCb
(
S
Query
Result
*
pResult
,
void
*
param
,
int32_t
code
)
{
void
schtQueryCb
(
S
Exec
Result
*
pResult
,
void
*
param
,
int32_t
code
)
{
assert
(
TSDB_CODE_SUCCESS
==
code
);
*
(
int32_t
*
)
param
=
1
;
}
...
...
@@ -507,14 +507,15 @@ void* schtRunJobThread(void *aa) {
SRequestConnInfo
conn
=
{
0
};
conn
.
pTrans
=
mockPointer
;
SSchedulerReq
req
=
{
0
};
req
.
syncReq
=
false
;
req
.
pConn
=
&
conn
;
req
.
pNodeList
=
qnodeList
;
req
.
pDag
=
&
dag
;
req
.
sql
=
"select * from tb"
;
req
.
execFp
=
schtQueryCb
;
req
.
exec
Param
=
&
queryDone
;
req
.
cb
Param
=
&
queryDone
;
code
=
scheduler
Async
ExecJob
(
&
req
,
&
queryJobRefId
);
code
=
schedulerExecJob
(
&
req
,
&
queryJobRefId
);
assert
(
code
==
0
);
pJob
=
schAcquireJob
(
queryJobRefId
);
...
...
@@ -584,7 +585,10 @@ void* schtRunJobThread(void *aa) {
atomic_store_32
(
&
schtStartFetch
,
1
);
void
*
data
=
NULL
;
code
=
schedulerFetchRows
(
queryJobRefId
,
&
data
);
req
.
syncReq
=
true
;
req
.
pFetchRes
=
&
data
;
code
=
schedulerFetchRows
(
queryJobRefId
,
&
req
);
assert
(
code
==
0
||
code
);
if
(
0
==
code
)
{
...
...
@@ -594,7 +598,7 @@ void* schtRunJobThread(void *aa) {
}
data
=
NULL
;
code
=
schedulerFetchRows
(
queryJobRefId
,
&
data
);
code
=
schedulerFetchRows
(
queryJobRefId
,
&
req
);
assert
(
code
==
0
||
code
);
schtFreeQueryJob
(
0
);
...
...
@@ -664,9 +668,9 @@ TEST(queryTest, normalCase) {
req
.
pDag
=
&
dag
;
req
.
sql
=
"select * from tb"
;
req
.
execFp
=
schtQueryCb
;
req
.
exec
Param
=
&
queryDone
;
req
.
cb
Param
=
&
queryDone
;
code
=
scheduler
Async
ExecJob
(
&
req
,
&
job
);
code
=
schedulerExecJob
(
&
req
,
&
job
);
ASSERT_EQ
(
code
,
0
);
...
...
@@ -709,7 +713,10 @@ TEST(queryTest, normalCase) {
taosThreadCreate
(
&
(
thread1
),
&
thattr
,
schtCreateFetchRspThread
,
&
job
);
void
*
data
=
NULL
;
code
=
schedulerFetchRows
(
job
,
&
data
);
req
.
syncReq
=
true
;
req
.
pFetchRes
=
&
data
;
code
=
schedulerFetchRows
(
job
,
&
req
);
ASSERT_EQ
(
code
,
0
);
SRetrieveTableRsp
*
pRsp
=
(
SRetrieveTableRsp
*
)
data
;
...
...
@@ -718,7 +725,7 @@ TEST(queryTest, normalCase) {
taosMemoryFreeClear
(
data
);
data
=
NULL
;
code
=
schedulerFetchRows
(
job
,
&
data
);
code
=
schedulerFetchRows
(
job
,
&
req
);
ASSERT_EQ
(
code
,
0
);
ASSERT_TRUE
(
data
==
NULL
);
...
...
@@ -768,8 +775,8 @@ TEST(queryTest, readyFirstCase) {
req
.
pDag
=
&
dag
;
req
.
sql
=
"select * from tb"
;
req
.
execFp
=
schtQueryCb
;
req
.
exec
Param
=
&
queryDone
;
code
=
scheduler
Async
ExecJob
(
&
req
,
&
job
);
req
.
cb
Param
=
&
queryDone
;
code
=
schedulerExecJob
(
&
req
,
&
job
);
ASSERT_EQ
(
code
,
0
);
...
...
@@ -813,7 +820,9 @@ TEST(queryTest, readyFirstCase) {
taosThreadCreate
(
&
(
thread1
),
&
thattr
,
schtCreateFetchRspThread
,
&
job
);
void
*
data
=
NULL
;
code
=
schedulerFetchRows
(
job
,
&
data
);
req
.
syncReq
=
true
;
req
.
pFetchRes
=
&
data
;
code
=
schedulerFetchRows
(
job
,
&
req
);
ASSERT_EQ
(
code
,
0
);
SRetrieveTableRsp
*
pRsp
=
(
SRetrieveTableRsp
*
)
data
;
...
...
@@ -822,7 +831,7 @@ TEST(queryTest, readyFirstCase) {
taosMemoryFreeClear
(
data
);
data
=
NULL
;
code
=
schedulerFetchRows
(
job
,
&
data
);
code
=
schedulerFetchRows
(
job
,
&
req
);
ASSERT_EQ
(
code
,
0
);
ASSERT_TRUE
(
data
==
NULL
);
...
...
@@ -875,9 +884,9 @@ TEST(queryTest, flowCtrlCase) {
req
.
pDag
=
&
dag
;
req
.
sql
=
"select * from tb"
;
req
.
execFp
=
schtQueryCb
;
req
.
exec
Param
=
&
queryDone
;
req
.
cb
Param
=
&
queryDone
;
code
=
scheduler
Async
ExecJob
(
&
req
,
&
job
);
code
=
schedulerExecJob
(
&
req
,
&
job
);
ASSERT_EQ
(
code
,
0
);
...
...
@@ -925,7 +934,9 @@ TEST(queryTest, flowCtrlCase) {
taosThreadCreate
(
&
(
thread1
),
&
thattr
,
schtCreateFetchRspThread
,
&
job
);
void
*
data
=
NULL
;
code
=
schedulerFetchRows
(
job
,
&
data
);
req
.
syncReq
=
true
;
req
.
pFetchRes
=
&
data
;
code
=
schedulerFetchRows
(
job
,
&
req
);
ASSERT_EQ
(
code
,
0
);
SRetrieveTableRsp
*
pRsp
=
(
SRetrieveTableRsp
*
)
data
;
...
...
@@ -934,7 +945,7 @@ TEST(queryTest, flowCtrlCase) {
taosMemoryFreeClear
(
data
);
data
=
NULL
;
code
=
schedulerFetchRows
(
job
,
&
data
);
code
=
schedulerFetchRows
(
job
,
&
req
);
ASSERT_EQ
(
code
,
0
);
ASSERT_TRUE
(
data
==
NULL
);
...
...
@@ -978,7 +989,7 @@ TEST(insertTest, normalCase) {
TdThread
thread1
;
taosThreadCreate
(
&
(
thread1
),
&
thattr
,
schtSendRsp
,
&
insertJobRefId
);
S
Query
Result
res
=
{
0
};
S
Exec
Result
res
=
{
0
};
SRequestConnInfo
conn
=
{
0
};
conn
.
pTrans
=
mockPointer
;
...
...
@@ -988,9 +999,9 @@ TEST(insertTest, normalCase) {
req
.
pDag
=
&
dag
;
req
.
sql
=
"insert into tb values(now,1)"
;
req
.
execFp
=
schtQueryCb
;
req
.
exec
Param
=
NULL
;
req
.
cb
Param
=
NULL
;
code
=
schedulerExecJob
(
&
req
,
&
insertJobRefId
,
&
res
);
code
=
schedulerExecJob
(
&
req
,
&
insertJobRefId
);
ASSERT_EQ
(
code
,
0
);
ASSERT_EQ
(
res
.
numOfRows
,
20
);
...
...
source/libs/sync/inc/syncRaftCfg.h
浏览文件 @
29949a96
...
...
@@ -36,7 +36,7 @@ typedef struct SRaftCfg {
TdFilePtr
pFile
;
char
path
[
TSDB_FILENAME_LEN
*
2
];
int8_t
isStandBy
;
int8_t
snapshot
Enable
;
int8_t
snapshot
Strategy
;
SyncIndex
lastConfigIndex
;
SyncIndex
configIndexArr
[
MAX_CONFIG_INDEX_COUNT
];
...
...
@@ -49,20 +49,20 @@ int32_t raftCfgClose(SRaftCfg *pRaftCfg);
int32_t
raftCfgPersist
(
SRaftCfg
*
pRaftCfg
);
int32_t
raftCfgAddConfigIndex
(
SRaftCfg
*
pRaftCfg
,
SyncIndex
configIndex
);
cJSON
*
syncCfg2Json
(
SSyncCfg
*
pSyncCfg
);
char
*
syncCfg2Str
(
SSyncCfg
*
pSyncCfg
);
char
*
syncCfg2SimpleStr
(
SSyncCfg
*
pSyncCfg
);
cJSON
*
syncCfg2Json
(
SSyncCfg
*
pSyncCfg
);
char
*
syncCfg2Str
(
SSyncCfg
*
pSyncCfg
);
char
*
syncCfg2SimpleStr
(
SSyncCfg
*
pSyncCfg
);
int32_t
syncCfgFromJson
(
const
cJSON
*
pRoot
,
SSyncCfg
*
pSyncCfg
);
int32_t
syncCfgFromStr
(
const
char
*
s
,
SSyncCfg
*
pSyncCfg
);
cJSON
*
raftCfg2Json
(
SRaftCfg
*
pRaftCfg
);
char
*
raftCfg2Str
(
SRaftCfg
*
pRaftCfg
);
cJSON
*
raftCfg2Json
(
SRaftCfg
*
pRaftCfg
);
char
*
raftCfg2Str
(
SRaftCfg
*
pRaftCfg
);
int32_t
raftCfgFromJson
(
const
cJSON
*
pRoot
,
SRaftCfg
*
pRaftCfg
);
int32_t
raftCfgFromStr
(
const
char
*
s
,
SRaftCfg
*
pRaftCfg
);
typedef
struct
SRaftCfgMeta
{
int8_t
isStandBy
;
int8_t
snapshot
Enable
;
int8_t
snapshot
Strategy
;
SyncIndex
lastConfigIndex
;
}
SRaftCfgMeta
;
...
...
source/libs/sync/src/syncElection.c
浏览文件 @
29949a96
...
...
@@ -96,12 +96,20 @@ int32_t syncNodeElect(SSyncNode* pSyncNode) {
return
ret
;
}
if
(
pSyncNode
->
pRaftCfg
->
snapshotEnable
)
{
switch
(
pSyncNode
->
pRaftCfg
->
snapshotStrategy
)
{
case
SYNC_STRATEGY_NO_SNAPSHOT
:
ret
=
syncNodeRequestVotePeers
(
pSyncNode
);
break
;
case
SYNC_STRATEGY_STANDARD_SNAPSHOT
:
case
SYNC_STRATEGY_WAL_FIRST
:
ret
=
syncNodeRequestVotePeersSnapshot
(
pSyncNode
);
}
else
{
break
;
default:
ret
=
syncNodeRequestVotePeers
(
pSyncNode
);
break
;
}
ASSERT
(
ret
==
0
);
syncNodeResetElectTimer
(
pSyncNode
);
...
...
source/libs/sync/src/syncMain.c
浏览文件 @
29949a96
...
...
@@ -672,12 +672,12 @@ int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWe
}
if
(
arrSize
>
SYNC_MAX_BATCH_SIZE
)
{
syncNodeErrorLog
(
pSyncNode
,
"sync propose
match
batch error"
);
syncNodeErrorLog
(
pSyncNode
,
"sync propose batch error"
);
terrno
=
TSDB_CODE_SYN_BATCH_ERROR
;
return
-
1
;
}
if
(
pSyncNode
->
state
=
=
TAOS_SYNC_STATE_LEADER
)
{
if
(
pSyncNode
->
state
!
=
TAOS_SYNC_STATE_LEADER
)
{
syncNodeErrorLog
(
pSyncNode
,
"sync propose not leader"
);
terrno
=
TSDB_CODE_SYN_NOT_LEADER
;
return
-
1
;
...
...
@@ -711,7 +711,7 @@ int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWe
// enqueue msg ok
}
else
{
sError
(
"
enqueue msg error, FpEqMsg is NULL"
);
sError
(
"
vgId:%d, enqueue msg error, FpEqMsg is NULL"
,
pSyncNode
->
vgId
);
terrno
=
TSDB_CODE_SYN_INTERNAL_ERROR
;
return
-
1
;
}
...
...
@@ -730,7 +730,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
if
(
pSyncNode
->
changing
&&
pMsg
->
msgType
!=
TDMT_SYNC_CONFIG_CHANGE_FINISH
)
{
ret
=
-
1
;
terrno
=
TSDB_CODE_SYN_PROPOSE_NOT_READY
;
sError
(
"
sync propose not ready, type:%s,%d"
,
TMSG_INFO
(
pMsg
->
msgType
),
pMsg
->
msgType
);
sError
(
"
vgId:%d, sync propose not ready, type:%s,%d"
,
pSyncNode
->
vgId
,
TMSG_INFO
(
pMsg
->
msgType
),
pMsg
->
msgType
);
goto
_END
;
}
...
...
@@ -739,7 +739,8 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
if
(
!
syncNodeCanChange
(
pSyncNode
))
{
ret
=
-
1
;
terrno
=
TSDB_CODE_SYN_RECONFIG_NOT_READY
;
sError
(
"sync reconfig not ready, type:%s,%d"
,
TMSG_INFO
(
pMsg
->
msgType
),
pMsg
->
msgType
);
sError
(
"vgId:%d, sync reconfig not ready, type:%s,%d"
,
pSyncNode
->
vgId
,
TMSG_INFO
(
pMsg
->
msgType
),
pMsg
->
msgType
);
goto
_END
;
}
...
...
@@ -780,7 +781,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
}
else
{
ret
=
-
1
;
terrno
=
TSDB_CODE_SYN_INTERNAL_ERROR
;
sError
(
"
enqueue msg error, FpEqMsg is NULL"
);
sError
(
"
vgId:%d, enqueue msg error, FpEqMsg is NULL"
,
pSyncNode
->
vgId
);
}
}
...
...
@@ -790,7 +791,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
}
else
{
ret
=
-
1
;
terrno
=
TSDB_CODE_SYN_NOT_LEADER
;
sError
(
"
sync propose not leader, %s"
,
syncUtilState2String
(
pSyncNode
->
state
));
sError
(
"
vgId:%d, sync propose not leader, %s"
,
pSyncNode
->
vgId
,
syncUtilState2String
(
pSyncNode
->
state
));
goto
_END
;
}
...
...
@@ -820,7 +821,7 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
// create a new raft config file
SRaftCfgMeta
meta
;
meta
.
isStandBy
=
pSyncInfo
->
isStandBy
;
meta
.
snapshot
Enable
=
pSyncInfo
->
snapshotStrategy
;
meta
.
snapshot
Strategy
=
pSyncInfo
->
snapshotStrategy
;
meta
.
lastConfigIndex
=
SYNC_INDEX_INVALID
;
ret
=
raftCfgCreateFile
((
SSyncCfg
*
)
&
(
pSyncInfo
->
syncCfg
),
meta
,
pSyncNode
->
configPath
);
ASSERT
(
ret
==
0
);
...
...
@@ -969,7 +970,7 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
pSyncNode
->
FpOnSnapshotSend
=
syncNodeOnSnapshotSendCb
;
pSyncNode
->
FpOnSnapshotRsp
=
syncNodeOnSnapshotRspCb
;
if
(
pSyncNode
->
pRaftCfg
->
snapshot
Enable
)
{
if
(
pSyncNode
->
pRaftCfg
->
snapshot
Strategy
)
{
sInfo
(
"sync node use snapshot"
);
pSyncNode
->
FpOnRequestVote
=
syncNodeOnRequestVoteSnapshotCb
;
pSyncNode
->
FpOnRequestVoteReply
=
syncNodeOnRequestVoteReplySnapshotCb
;
...
...
@@ -1107,7 +1108,7 @@ void syncNodeClose(SSyncNode* pSyncNode) {
// option
// bool syncNodeSnapshotEnable(SSyncNode* pSyncNode) { return pSyncNode->pRaftCfg->snapshotEnable; }
ESyncStrategy
syncNodeStrategy
(
SSyncNode
*
pSyncNode
)
{
return
pSyncNode
->
pRaftCfg
->
snapshot
Enable
;
}
ESyncStrategy
syncNodeStrategy
(
SSyncNode
*
pSyncNode
)
{
return
pSyncNode
->
pRaftCfg
->
snapshot
Strategy
;
}
// ping --------------
int32_t
syncNodePing
(
SSyncNode
*
pSyncNode
,
const
SRaftId
*
destRaftId
,
SyncPing
*
pMsg
)
{
...
...
@@ -2496,6 +2497,15 @@ int32_t syncNodeOnClientRequestBatchCb(SSyncNode* ths, SyncClientRequestBatch* p
SWal
*
pWal
=
pData
->
pWal
;
walFsync
(
pWal
,
true
);
if
(
ths
->
replicaNum
>
1
)
{
// if mulit replica, start replicate right now
syncNodeReplicate
(
ths
);
}
else
if
(
ths
->
replicaNum
==
1
)
{
// one replica
syncMaybeAdvanceCommitIndex
(
ths
);
}
return
0
;
}
...
...
source/libs/sync/src/syncRaftCfg.c
浏览文件 @
29949a96
...
...
@@ -101,7 +101,7 @@ cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) {
char
*
syncCfg2Str
(
SSyncCfg
*
pSyncCfg
)
{
cJSON
*
pJson
=
syncCfg2Json
(
pSyncCfg
);
char
*
serialized
=
cJSON_Print
(
pJson
);
char
*
serialized
=
cJSON_Print
(
pJson
);
cJSON_Delete
(
pJson
);
return
serialized
;
}
...
...
@@ -109,7 +109,7 @@ char *syncCfg2Str(SSyncCfg *pSyncCfg) {
char
*
syncCfg2SimpleStr
(
SSyncCfg
*
pSyncCfg
)
{
if
(
pSyncCfg
!=
NULL
)
{
int32_t
len
=
512
;
char
*
s
=
taosMemoryMalloc
(
len
);
char
*
s
=
taosMemoryMalloc
(
len
);
memset
(
s
,
0
,
len
);
snprintf
(
s
,
len
,
"{replica-num:%d, my-index:%d, "
,
pSyncCfg
->
replicaNum
,
pSyncCfg
->
myIndex
);
...
...
@@ -182,7 +182,7 @@ cJSON *raftCfg2Json(SRaftCfg *pRaftCfg) {
cJSON
*
pRoot
=
cJSON_CreateObject
();
cJSON_AddItemToObject
(
pRoot
,
"SSyncCfg"
,
syncCfg2Json
(
&
(
pRaftCfg
->
cfg
)));
cJSON_AddNumberToObject
(
pRoot
,
"isStandBy"
,
pRaftCfg
->
isStandBy
);
cJSON_AddNumberToObject
(
pRoot
,
"snapshot
Enable"
,
pRaftCfg
->
snapshotEnable
);
cJSON_AddNumberToObject
(
pRoot
,
"snapshot
Strategy"
,
pRaftCfg
->
snapshotStrategy
);
char
buf64
[
128
];
snprintf
(
buf64
,
sizeof
(
buf64
),
"%ld"
,
pRaftCfg
->
lastConfigIndex
);
...
...
@@ -205,7 +205,7 @@ cJSON *raftCfg2Json(SRaftCfg *pRaftCfg) {
char
*
raftCfg2Str
(
SRaftCfg
*
pRaftCfg
)
{
cJSON
*
pJson
=
raftCfg2Json
(
pRaftCfg
);
char
*
serialized
=
cJSON_Print
(
pJson
);
char
*
serialized
=
cJSON_Print
(
pJson
);
cJSON_Delete
(
pJson
);
return
serialized
;
}
...
...
@@ -228,7 +228,7 @@ int32_t raftCfgCreateFile(SSyncCfg *pCfg, SRaftCfgMeta meta, const char *path) {
SRaftCfg
raftCfg
;
raftCfg
.
cfg
=
*
pCfg
;
raftCfg
.
isStandBy
=
meta
.
isStandBy
;
raftCfg
.
snapshot
Enable
=
meta
.
snapshotEnable
;
raftCfg
.
snapshot
Strategy
=
meta
.
snapshotStrategy
;
raftCfg
.
lastConfigIndex
=
meta
.
lastConfigIndex
;
raftCfg
.
configIndexCount
=
1
;
memset
(
raftCfg
.
configIndexArr
,
0
,
sizeof
(
raftCfg
.
configIndexArr
));
...
...
@@ -257,8 +257,8 @@ int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg) {
cJSON
*
pJsonIsStandBy
=
cJSON_GetObjectItem
(
pJson
,
"isStandBy"
);
pRaftCfg
->
isStandBy
=
cJSON_GetNumberValue
(
pJsonIsStandBy
);
cJSON
*
pJsonSnapshot
Enable
=
cJSON_GetObjectItem
(
pJson
,
"snapshotEnable
"
);
pRaftCfg
->
snapshot
Enable
=
cJSON_GetNumberValue
(
pJsonSnapshotEnable
);
cJSON
*
pJsonSnapshot
Strategy
=
cJSON_GetObjectItem
(
pJson
,
"snapshotStrategy
"
);
pRaftCfg
->
snapshot
Strategy
=
cJSON_GetNumberValue
(
pJsonSnapshotStrategy
);
cJSON
*
pJsonLastConfigIndex
=
cJSON_GetObjectItem
(
pJson
,
"lastConfigIndex"
);
pRaftCfg
->
lastConfigIndex
=
atoll
(
cJSON_GetStringValue
(
pJsonLastConfigIndex
));
...
...
@@ -280,7 +280,7 @@ int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg) {
(
pRaftCfg
->
configIndexArr
)[
i
]
=
atoll
(
pIndex
->
valuestring
);
}
cJSON
*
pJsonSyncCfg
=
cJSON_GetObjectItem
(
pJson
,
"SSyncCfg"
);
cJSON
*
pJsonSyncCfg
=
cJSON_GetObjectItem
(
pJson
,
"SSyncCfg"
);
int32_t
code
=
syncCfgFromJson
(
pJsonSyncCfg
,
&
(
pRaftCfg
->
cfg
));
ASSERT
(
code
==
0
);
...
...
source/libs/sync/src/syncReplication.c
浏览文件 @
29949a96
...
...
@@ -132,10 +132,6 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
SyncIndex
preLogIndex
=
syncNodeGetPreIndex
(
pSyncNode
,
nextIndex
);
SyncTerm
preLogTerm
=
syncNodeGetPreTerm
(
pSyncNode
,
nextIndex
);
if
(
preLogTerm
==
SYNC_TERM_INVALID
)
{
SSyncSnapshotSender
*
pSender
=
syncNodeGetSnapshotSender
(
pSyncNode
,
pDestId
);
ASSERT
(
pSender
!=
NULL
);
ASSERT
(
!
snapshotSenderIsStart
(
pSender
));
SyncIndex
newNextIndex
=
syncNodeGetLastIndex
(
pSyncNode
)
+
1
;
syncIndexMgrSetIndex
(
pSyncNode
->
pNextIndex
,
pDestId
,
newNextIndex
);
syncIndexMgrSetIndex
(
pSyncNode
->
pMatchIndex
,
pDestId
,
SYNC_INDEX_INVALID
);
...
...
@@ -145,26 +141,32 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
return
-
1
;
}
// entry pointer array
SSyncRaftEntry
*
entryPArr
[
SYNC_MAX_BATCH_SIZE
];
memset
(
entryPArr
,
0
,
sizeof
(
entryPArr
));
// get entry batch
int32_t
getCount
=
0
;
SyncIndex
getEntryIndex
=
nextIndex
;
for
(
int32_t
i
=
0
;
i
<
pSyncNode
->
batchSize
;
++
i
)
{
SSyncRaftEntry
*
pEntry
;
SSyncRaftEntry
*
pEntry
=
NULL
;
int32_t
code
=
pSyncNode
->
pLogStore
->
syncLogGetEntry
(
pSyncNode
->
pLogStore
,
getEntryIndex
,
&
pEntry
);
if
(
code
==
0
)
{
ASSERT
(
pEntry
!=
NULL
);
entryPArr
[
i
]
=
pEntry
;
getCount
++
;
getEntryIndex
++
;
}
else
{
break
;
}
}
// build msg
SyncAppendEntriesBatch
*
pMsg
=
syncAppendEntriesBatchBuild
(
entryPArr
,
getCount
,
pSyncNode
->
vgId
);
ASSERT
(
pMsg
!=
NULL
);
// free entries
for
(
int32_t
i
=
0
;
i
<
pSyncNode
->
batchSize
;
++
i
)
{
SSyncRaftEntry
*
pEntry
=
entryPArr
[
i
];
if
(
pEntry
!=
NULL
)
{
...
...
@@ -197,12 +199,6 @@ int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) {
syncIndexMgrLog2
(
"begin append entries peers pNextIndex:"
,
pSyncNode
->
pNextIndex
);
syncIndexMgrLog2
(
"begin append entries peers pMatchIndex:"
,
pSyncNode
->
pMatchIndex
);
logStoreSimpleLog2
(
"begin append entries peers LogStore:"
,
pSyncNode
->
pLogStore
);
if
(
gRaftDetailLog
)
{
SSnapshot
snapshot
;
pSyncNode
->
pFsm
->
FpGetSnapshotInfo
(
pSyncNode
->
pFsm
,
&
snapshot
);
sTrace
(
"begin append entries peers, snapshot.lastApplyIndex:%ld, snapshot.lastApplyTerm:%lu"
,
snapshot
.
lastApplyIndex
,
snapshot
.
lastApplyTerm
);
}
int32_t
ret
=
0
;
for
(
int
i
=
0
;
i
<
pSyncNode
->
peersNum
;
++
i
)
{
...
...
@@ -224,9 +220,6 @@ int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) {
return
-
1
;
}
// batch optimized
// SyncIndex lastIndex = syncUtilMinIndex(pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore), nextIndex);
// prepare entry
SyncAppendEntries
*
pMsg
=
NULL
;
...
...
@@ -283,11 +276,24 @@ int32_t syncNodeReplicate(SSyncNode* pSyncNode) {
// start replicate
int32_t
ret
=
0
;
if
(
pSyncNode
->
pRaftCfg
->
snapshotEnable
)
{
switch
(
pSyncNode
->
pRaftCfg
->
snapshotStrategy
)
{
case
SYNC_STRATEGY_NO_SNAPSHOT
:
ret
=
syncNodeAppendEntriesPeers
(
pSyncNode
);
break
;
case
SYNC_STRATEGY_STANDARD_SNAPSHOT
:
ret
=
syncNodeAppendEntriesPeersSnapshot
(
pSyncNode
);
}
else
{
break
;
case
SYNC_STRATEGY_WAL_FIRST
:
ret
=
syncNodeAppendEntriesPeersSnapshot2
(
pSyncNode
);
break
;
default:
ret
=
syncNodeAppendEntriesPeers
(
pSyncNode
);
break
;
}
return
ret
;
}
...
...
source/libs/sync/test/syncRaftCfgTest.cpp
浏览文件 @
29949a96
...
...
@@ -83,7 +83,7 @@ void test3() {
}
else
{
SRaftCfgMeta
meta
;
meta
.
isStandBy
=
7
;
meta
.
snapshot
Enable
=
9
;
meta
.
snapshot
Strategy
=
9
;
meta
.
lastConfigIndex
=
789
;
raftCfgCreateFile
(
pCfg
,
meta
,
s
);
printf
(
"%s create json file: %s
\n
"
,
(
char
*
)
__FUNCTION__
,
s
);
...
...
@@ -108,7 +108,7 @@ void test5() {
pCfg
->
cfg
.
myIndex
=
taosGetTimestampSec
();
pCfg
->
isStandBy
+=
2
;
pCfg
->
snapshot
Enable
+=
3
;
pCfg
->
snapshot
Strategy
+=
3
;
pCfg
->
lastConfigIndex
+=
1000
;
pCfg
->
configIndexCount
=
5
;
...
...
source/os/src/osFile.c
浏览文件 @
29949a96
...
...
@@ -58,6 +58,15 @@ typedef struct TdFile {
#define FILE_WITH_LOCK 1
typedef
struct
AutoDelFile
*
AutoDelFilePtr
;
typedef
struct
AutoDelFile
{
char
*
name
;
AutoDelFilePtr
lastAutoDelFilePtr
;
}
AutoDelFile
;
static
TdThreadMutex
autoDelFileLock
;
static
AutoDelFilePtr
nowAutoDelFilePtr
=
NULL
;
static
TdThreadOnce
autoDelFileInit
=
PTHREAD_ONCE_INIT
;
void
taosGetTmpfilePath
(
const
char
*
inputTmpDir
,
const
char
*
fileNamePrefix
,
char
*
dstPath
)
{
#ifdef WINDOWS
const
char
*
tdengineTmpFileNamePrefix
=
"tdengine-"
;
...
...
@@ -238,7 +247,33 @@ int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno) {
return
0
;
}
void
autoDelFileListAdd
(
const
char
*
path
)
{
return
;
}
void
autoDelFileList
()
{
taosThreadMutexLock
(
&
autoDelFileLock
);
while
(
nowAutoDelFilePtr
!=
NULL
)
{
taosRemoveFile
(
nowAutoDelFilePtr
->
name
);
AutoDelFilePtr
tmp
=
nowAutoDelFilePtr
->
lastAutoDelFilePtr
;
taosMemoryFree
(
nowAutoDelFilePtr
->
name
);
taosMemoryFree
(
nowAutoDelFilePtr
);
nowAutoDelFilePtr
=
tmp
;
}
taosThreadMutexUnlock
(
&
autoDelFileLock
);
taosThreadMutexDestroy
(
&
autoDelFileLock
);
}
void
autoDelFileListInit
()
{
taosThreadMutexInit
(
&
autoDelFileLock
,
NULL
);
atexit
(
autoDelFileList
);
}
void
autoDelFileListAdd
(
const
char
*
path
)
{
taosThreadOnce
(
&
autoDelFileInit
,
autoDelFileListInit
);
taosThreadMutexLock
(
&
autoDelFileLock
);
AutoDelFilePtr
tmp
=
taosMemoryMalloc
(
sizeof
(
AutoDelFile
));
tmp
->
lastAutoDelFilePtr
=
nowAutoDelFilePtr
;
tmp
->
name
=
taosMemoryStrDup
(
path
);
nowAutoDelFilePtr
=
tmp
;
taosThreadMutexUnlock
(
&
autoDelFileLock
);
}
TdFilePtr
taosOpenFile
(
const
char
*
path
,
int32_t
tdFileOptions
)
{
int
fd
=
-
1
;
...
...
@@ -283,10 +318,6 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) {
}
}
if
(
tdFileOptions
&
TD_FILE_AUTO_DEL
)
{
autoDelFileListAdd
(
path
);
}
TdFilePtr
pFile
=
(
TdFilePtr
)
taosMemoryMalloc
(
sizeof
(
TdFile
));
if
(
pFile
==
NULL
)
{
if
(
fd
>=
0
)
close
(
fd
);
...
...
@@ -299,6 +330,9 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) {
pFile
->
fd
=
fd
;
pFile
->
fp
=
fp
;
pFile
->
refId
=
0
;
if
(
tdFileOptions
&
TD_FILE_AUTO_DEL
)
{
autoDelFileListAdd
(
path
);
}
return
pFile
;
}
...
...
source/os/src/osMemory.c
浏览文件 @
29949a96
...
...
@@ -282,14 +282,14 @@ void *taosMemoryRealloc(void *ptr, int32_t size) {
#endif
}
void
*
taosMemoryStrDup
(
void
*
ptr
)
{
void
*
taosMemoryStrDup
(
const
char
*
ptr
)
{
#ifdef USE_TD_MEMORY
if
(
ptr
==
NULL
)
return
NULL
;
TdMemoryInfoPtr
pTdMemoryInfo
=
(
TdMemoryInfoPtr
)((
char
*
)
ptr
-
sizeof
(
TdMemoryInfo
));
assert
(
pTdMemoryInfo
->
symbol
==
TD_MEMORY_SYMBOL
);
void
*
tmp
=
tstrdup
(
(
const
char
*
)
pTdMemoryInfo
);
void
*
tmp
=
tstrdup
(
pTdMemoryInfo
);
if
(
tmp
==
NULL
)
return
NULL
;
memcpy
(
tmp
,
pTdMemoryInfo
,
sizeof
(
TdMemoryInfo
));
...
...
@@ -297,7 +297,7 @@ void *taosMemoryStrDup(void *ptr) {
return
(
char
*
)
tmp
+
sizeof
(
TdMemoryInfo
);
#else
return
tstrdup
(
(
const
char
*
)
ptr
);
return
tstrdup
(
ptr
);
#endif
}
...
...
source/util/src/terror.c
浏览文件 @
29949a96
...
...
@@ -395,6 +395,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TASK_DROPPING, "Task dropping")
TAOS_DEFINE_ERROR
(
TSDB_CODE_QRY_DUPLICATTED_OPERATION
,
"Duplicatted operation"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_QRY_TASK_MSG_ERROR
,
"Task message error"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_QRY_JOB_FREED
,
"Job already freed"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_QRY_JOB_NOT_EXIST
,
"Job not exist"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_QRY_TASK_STATUS_ERROR
,
"Task status error"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_QRY_JSON_IN_ERROR
,
"Json not support in in/notin operator"
)
TAOS_DEFINE_ERROR
(
TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR
,
"Json not support in this place"
)
...
...
tests/pytest/util/gettime.py
0 → 100644
浏览文件 @
29949a96
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import
time
from
datetime
import
datetime
class
GetTime
:
def
get_ms_timestamp
(
self
,
ts_str
):
_ts_str
=
ts_str
if
" "
in
ts_str
:
p
=
ts_str
.
split
(
" "
)[
1
]
if
len
(
p
)
>
15
:
_ts_str
=
ts_str
[:
-
3
]
if
':'
in
_ts_str
and
'.'
in
_ts_str
:
timestamp
=
datetime
.
strptime
(
_ts_str
,
"%Y-%m-%d %H:%M:%S.%f"
)
date_time
=
int
(
int
(
time
.
mktime
(
timestamp
.
timetuple
()))
*
1000
+
timestamp
.
microsecond
/
1000
)
elif
':'
in
_ts_str
and
'.'
not
in
_ts_str
:
timestamp
=
datetime
.
strptime
(
_ts_str
,
"%Y-%m-%d %H:%M:%S"
)
date_time
=
int
(
int
(
time
.
mktime
(
timestamp
.
timetuple
()))
*
1000
+
timestamp
.
microsecond
/
1000
)
else
:
timestamp
=
datetime
.
strptime
(
_ts_str
,
"%Y-%m-%d"
)
date_time
=
int
(
int
(
time
.
mktime
(
timestamp
.
timetuple
()))
*
1000
+
timestamp
.
microsecond
/
1000
)
return
date_time
def
get_us_timestamp
(
self
,
ts_str
):
_ts
=
self
.
get_ms_timestamp
(
ts_str
)
*
1000
if
" "
in
ts_str
:
p
=
ts_str
.
split
(
" "
)[
1
]
if
len
(
p
)
>
12
:
us_ts
=
p
[
12
:
15
]
_ts
+=
int
(
us_ts
)
return
_ts
def
get_ns_timestamp
(
self
,
ts_str
):
_ts
=
self
.
get_us_timestamp
(
ts_str
)
*
1000
if
" "
in
ts_str
:
p
=
ts_str
.
split
(
" "
)[
1
]
if
len
(
p
)
>
15
:
us_ts
=
p
[
15
:]
_ts
+=
int
(
us_ts
)
return
_ts
def
time_transform
(
self
,
ts_str
,
precision
):
date_time
=
[]
if
precision
==
'ms'
:
for
i
in
ts_str
:
date_time
.
append
(
self
.
get_ms_timestamp
(
i
))
elif
precision
==
'us'
:
for
i
in
ts_str
:
date_time
.
append
(
self
.
get_us_timestamp
(
i
))
elif
precision
==
'ns'
:
for
i
in
ts_str
:
date_time
.
append
(
self
.
get_ns_timestamp
(
i
))
return
date_time
\ No newline at end of file
tests/script/jenkins/basic.txt
浏览文件 @
29949a96
...
...
@@ -167,7 +167,8 @@
#./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
# --- valgrind
./test.sh -f tsim/valgrind/checkError.sim -v
./test.sh -f tsim/valgrind/checkError1.sim
./test.sh -f tsim/valgrind/checkError2.sim
# --- vnode
# ./test.sh -f tsim/vnode/replica3_basic.sim
...
...
tests/script/sh/checkValgrind.sh
浏览文件 @
29949a96
...
...
@@ -4,13 +4,17 @@ set +e
#set -x
NODE_NAME
=
DETAIL
=
0
while
getopts
"n:"
arg
while
getopts
"n:
d
"
arg
do
case
$arg
in
n
)
NODE_NAME
=
$OPTARG
;;
d
)
DETAIL
=
1
;;
?
)
echo
"unkown argument"
;;
...
...
@@ -30,10 +34,20 @@ fi
TAOS_DIR
=
`
pwd
`
LOG_DIR
=
$TAOS_DIR
/sim/
$NODE_NAME
/log
#CFG_DIR=$TAOS_DIR/sim/$NODE_NAME/cfg
#echo ---- $LOG_DIR
error_summary
=
`
cat
${
LOG_DIR
}
/valgrind-taosd-
*
.log |
grep
"ERROR SUMMARY:"
|
awk
'{print $4}'
|
awk
'{sum+=$1}END{print sum}'
`
still_reachable
=
`
cat
${
LOG_DIR
}
/valgrind-taosd-
*
.log |
grep
"still reachable in"
|
wc
-l
`
definitely_lost
=
`
cat
${
LOG_DIR
}
/valgrind-taosd-
*
.log |
grep
"definitely lost in"
|
wc
-l
`
indirectly_lost
=
`
cat
${
LOG_DIR
}
/valgrind-taosd-
*
.log |
grep
"indirectly lost in "
|
wc
-l
`
possibly_lost
=
`
cat
${
LOG_DIR
}
/valgrind-taosd-
*
.log |
grep
"possibly lost in "
|
wc
-l
`
if
[
$DETAIL
-eq
1
]
;
then
echo
error_summary:
$error_summary
echo
still_reachable:
$still_reachable
echo
definitely_lost:
$definitely_lost
echo
indirectly_lost:
$indirectly_lost
echo
possibly_lost:
$possibly_lost
fi
#errors=`grep "ERROR SUMMARY:" ${LOG_DIR}/valgrind-taosd-*.log | cut -d ' ' -f 2,3,4,5 | tr -d "\n"`
errors
=
`
cat
${
LOG_DIR
}
/valgrind-taosd-
*
.log |
grep
"ERROR SUMMARY:"
|
awk
'{print $4}'
|
awk
'{sum+=$1}END{print sum}'
`
let
"errors=
$still_reachable
+
$error_summary
+
$definitely_lost
+
$indirectly_lost
+
$possibly_lost
"
echo
$errors
tests/script/tsim/valgrind/basic.sim
→
tests/script/tsim/valgrind/basic
1
.sim
浏览文件 @
29949a96
...
...
@@ -9,17 +9,15 @@ step1:
$x = $x + 1
sleep 1000
if $x == 10 then
print
====
> dnode not ready!
print
----
> dnode not ready!
return -1
endi
sql show dnodes
print
===
> $data00 $data01 $data02 $data03 $data04 $data05
print
----
> $data00 $data01 $data02 $data03 $data04 $data05
if $rows != 1 then
return -1
endi
goto _OVER
print =============== step2: create alter drop show user
sql create user u1 pass 'taosdata'
sql show users
...
...
@@ -29,5 +27,17 @@ sql alter user u1 pass 'taosdata'
sql drop user u1
sql_error alter user u2 sysinfo 0
print =============== step3: create drop dnode
sql create dnode $hostname port 7200
sql drop dnode 2
sql alter dnode 1 'debugflag 143'
print =============== step4: create alter drop show database
sql create database db vgroups 1
sql show databases
sql show db.vgroups
sql drop database db
sql show databases
_OVER:
system sh/exec.sh -n dnode1 -s stop -x SIGINT
tests/script/tsim/valgrind/basic2.sim
浏览文件 @
29949a96
...
...
@@ -9,11 +9,11 @@ step1:
$x = $x + 1
sleep 1000
if $x == 10 then
print
====
> dnode not ready!
print
----
> dnode not ready!
return -1
endi
sql show dnodes
print
===
> $data00 $data01 $data02 $data03 $data04 $data05
print
----
> $data00 $data01 $data02 $data03 $data04 $data05
if $rows != 1 then
return -1
endi
...
...
tests/script/tsim/valgrind/checkError.sim
已删除
100644 → 0
浏览文件 @
b890676a
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
#system sh/deploy.sh -n dnode2 -i 2
#system sh/deploy.sh -n dnode3 -i 3
#system sh/deploy.sh -n dnode4 -i 4
#system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
system sh/exec.sh -n dnode1 -s start
#system sh/exec.sh -n dnode2 -s start
#system sh/exec.sh -n dnode3 -s start
#system sh/exec.sh -n dnode4 -s start
sleep 2000
#$loop_cnt = 0
#check_dnode_ready:
# $loop_cnt = $loop_cnt + 1
# sleep 200
# if $loop_cnt == 10 then
# print ====> dnode not ready!
# return -1
# endi
#sql show dnodes
#print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
#print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
#print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
#print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
#if $data[0][0] != 1 then
# return -1
#endi
#if $data[0][4] != ready then
# goto check_dnode_ready
#endi
#
##sql connect
#sql create dnode $hostname port 7200
#sql create dnode $hostname port 7300
#sql create dnode $hostname port 7400
#
#$loop_cnt = 0
#check_dnode_ready_1:
#$loop_cnt = $loop_cnt + 1
#sleep 200
#if $loop_cnt == 10 then
# print ====> dnodes not ready!
# return -1
#endi
#sql show dnodes
#print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
#print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
#print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
#print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
#if $data[0][4] != ready then
# goto check_dnode_ready_1
#endi
#if $data[1][4] != ready then
# goto check_dnode_ready_1
#endi
#if $data[2][4] != ready then
# goto check_dnode_ready_1
#endi
#if $data[3][4] != ready then
# goto check_dnode_ready_1
#endi
#=========== please add any actions above =================
print ====> stop all dondes to output valgrind log file
system sh/exec.sh -n dnode1 -s stop -x SIGINT
print ====> start to check if there are ERRORS in vagrind log file for each dnode
# -n : dnode[x] be check
system_content sh/checkValgrind.sh -n dnode1
print cmd return result----> [ $system_content ]
# temporarily expand the threshold, since no time to fix the memory leaks.
if $system_content <= 5 then
return 0
endi
# This error occurs frequently, allowing it
# ==435850== 46 bytes in 1 blocks are definitely lost in loss record 1 of 3
# ==435850== at 0x483DD99: calloc (in /usr/lib/x86_64-linux-gnu/valgrind/vgp reload_memcheck-amd64-linux.so)
# ==435850== by 0x414AE0: taosMemoryCalloc (osMemory.c:212)
# ==435850== by 0x352730: transAllocBuffer (transComm.c:123)
# ==435850== by 0x34F42A: cliAllocRecvBufferCb (transCli.c:485)
$null=
if $system_content == $null then
return 0
endi
return -1
tests/script/tsim/valgrind/checkError1.sim
0 → 100644
浏览文件 @
29949a96
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start -v
sql connect
print =============== step1: show dnodes
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
print ----> dnode not ready!
return -1
endi
sql show dnodes
print ----> $data00 $data01 $data02 $data03 $data04 $data05
if $rows != 1 then
return -1
endi
print =============== step2: create alter drop show user
sql create user u1 pass 'taosdata'
sql show users
sql alter user u1 sysinfo 1
sql alter user u1 enable 1
sql alter user u1 pass 'taosdata'
sql drop user u1
sql_error alter user u2 sysinfo 0
print =============== step3:
print =============== stop
system sh/exec.sh -n dnode1 -s stop -x SIGINT
print =============== check
print ----> start to check if there are ERRORS in vagrind log file for each dnode
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
if $system_content <= 40 then
return 0
endi
$null=
if $system_content == $null then
return 0
endi
return -1
tests/script/tsim/valgrind/checkError2.sim
0 → 100644
浏览文件 @
29949a96
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start -v
sql connect
print =============== step1: create drop show dnodes
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
print ----> dnode not ready!
return -1
endi
sql show dnodes
print ----> $data00 $data01 $data02 $data03 $data04 $data05
if $rows != 1 then
return -1
endi
print =============== step2: create db
sql create database db vgroups 1
_OVER:
system sh/exec.sh -n dnode1 -s stop -x SIGINT
print =============== check
print ----> start to check if there are ERRORS in vagrind log file for each dnode
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
if $system_content <= 60 then
return 0
endi
$null=
if $system_content == $null then
return 0
endi
return -1
tests/system-test/1-insert/alter_stable.py
浏览文件 @
29949a96
...
...
@@ -108,8 +108,8 @@ class TDTestCase:
tdSql
.
error
(
f
'alter stable
{
self
.
stbname
}
_
{
i
}
add column
{
key
}
{
values
}
'
)
tdSql
.
error
(
f
'alter stable
{
self
.
stbname
}
_
{
i
}
drop column
{
key
}
'
)
#! bug TD-16921
#
tdSql.error(f'alter stable {self.ntbname} add column {key} {values}')
#
tdSql.error(f'alter stable {self.ntbname} drop column {key}')
tdSql
.
error
(
f
'alter stable
{
self
.
ntbname
}
add column
{
key
}
{
values
}
'
)
tdSql
.
error
(
f
'alter stable
{
self
.
ntbname
}
drop column
{
key
}
'
)
tdSql
.
execute
(
f
'alter stable
{
self
.
stbname
}
drop column
{
key
}
'
)
tdSql
.
query
(
f
'describe
{
self
.
stbname
}
'
)
tdSql
.
checkRows
(
len
(
self
.
column_dict
)
+
len
(
self
.
tag_dict
))
...
...
@@ -132,7 +132,7 @@ class TDTestCase:
tdSql
.
checkEqual
(
result
[
0
][
2
],
self
.
binary_length
+
1
)
tdSql
.
error
(
f
'alter stable
{
self
.
stbname
}
_
{
i
}
modify column
{
key
}
{
v
}
'
)
#! bug TD-16921
#
tdSql.error(f'alter stable {self.ntbname} modify column {key} {v}')
tdSql
.
error
(
f
'alter stable
{
self
.
ntbname
}
modify column
{
key
}
{
v
}
'
)
elif
'nchar'
in
values
.
lower
():
v
=
f
'nchar(
{
self
.
binary_length
+
1
}
)'
v_error
=
f
'nchar(
{
self
.
binary_length
-
1
}
)'
...
...
@@ -147,11 +147,11 @@ class TDTestCase:
tdSql
.
checkEqual
(
result
[
0
][
2
],
self
.
binary_length
+
1
)
tdSql
.
error
(
f
'alter stable
{
self
.
stbname
}
_
{
i
}
modify column
{
key
}
{
v
}
'
)
#! bug TD-16921
#
tdSql.error(f'alter stable {self.ntbname} modify column {key} {v}')
tdSql
.
error
(
f
'alter stable
{
self
.
ntbname
}
modify column
{
key
}
{
v
}
'
)
else
:
for
v
in
self
.
column_dict
.
values
():
tdSql
.
error
(
f
'alter stable
{
self
.
stbname
}
modify column
{
key
}
{
v
}
'
)
#
tdSql.error(f'alter stable {self.ntbname} modify column {key} {v}')
tdSql
.
error
(
f
'alter stable
{
self
.
ntbname
}
modify column
{
key
}
{
v
}
'
)
for
i
in
range
(
self
.
tbnum
):
tdSql
.
error
(
f
'alter stable
{
self
.
stbname
}
_
{
i
}
modify column
{
key
}
{
v
}
'
)
def
run
(
self
):
...
...
tests/system-test/2-query/Timediff.py
浏览文件 @
29949a96
from
util.log
import
*
from
util.sql
import
*
from
util.cases
import
*
from
util.gettime
import
*
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
f
"start to excute
{
__file__
}
"
)
tdSql
.
init
(
conn
.
cursor
())
self
.
get_time
=
GetTime
()
self
.
ts_str
=
[
'2020-1-1'
,
'2020-2-1 00:00:01'
,
'2020-3-1 00:00:00.001'
,
'2020-4-1 00:00:00.001002'
,
'2020-5-1 00:00:00.001002001'
]
self
.
db_param_precision
=
[
'ms'
,
'us'
,
'ns'
]
self
.
time_unit
=
[
'1w'
,
'1d'
,
'1h'
,
'1m'
,
'1s'
,
'1a'
,
'1u'
,
'1b'
]
self
.
error_unit
=
[
'2w'
,
'2d'
,
'2h'
,
'2m'
,
'2s'
,
'2a'
,
'2u'
,
'1c'
,
'#1'
]
self
.
ntbname
=
'ntb'
self
.
stbname
=
'stb'
self
.
ctbname
=
'ctb'
self
.
subtractor
=
1
# unit:s
def
check_tbtype
(
self
,
tb_type
):
if
tb_type
.
lower
()
==
'ntb'
:
tdSql
.
query
(
f
'select timediff(ts,
{
self
.
subtractor
}
) from
{
self
.
ntbname
}
'
)
elif
tb_type
.
lower
()
==
'ctb'
:
tdSql
.
query
(
f
'select timediff(ts,
{
self
.
subtractor
}
) from
{
self
.
ctbname
}
'
)
elif
tb_type
.
lower
()
==
'stb'
:
tdSql
.
query
(
f
'select timediff(ts,
{
self
.
subtractor
}
) from
{
self
.
stbname
}
'
)
def
check_tb_type
(
self
,
unit
,
tb_type
):
if
tb_type
.
lower
()
==
'ntb'
:
tdSql
.
query
(
f
'select timediff(ts,
{
self
.
subtractor
}
,
{
unit
}
) from
{
self
.
ntbname
}
'
)
elif
tb_type
.
lower
()
==
'ctb'
:
tdSql
.
query
(
f
'select timediff(ts,
{
self
.
subtractor
}
,
{
unit
}
) from
{
self
.
ctbname
}
'
)
elif
tb_type
.
lower
()
==
'stb'
:
tdSql
.
query
(
f
'select timediff(ts,
{
self
.
subtractor
}
,
{
unit
}
) from
{
self
.
stbname
}
'
)
def
data_check
(
self
,
date_time
,
precision
,
tb_type
):
for
unit
in
self
.
time_unit
:
if
(
unit
.
lower
()
==
'1u'
and
precision
.
lower
()
==
'ms'
)
or
(
unit
.
lower
()
==
'1b'
and
precision
.
lower
()
==
'us'
)
or
(
unit
.
lower
()
==
'1b'
and
precision
.
lower
()
==
'ms'
):
if
tb_type
.
lower
()
==
'ntb'
:
tdSql
.
error
(
f
'select timediff(ts,
{
self
.
subtractor
}
,
{
unit
}
) from
{
self
.
ntbname
}
'
)
elif
tb_type
.
lower
()
==
'ctb'
:
tdSql
.
error
(
f
'select timediff(ts,
{
self
.
subtractor
}
,
{
unit
}
) from
{
self
.
ctbname
}
'
)
elif
tb_type
.
lower
()
==
'stb'
:
tdSql
.
error
(
f
'select timediff(ts,
{
self
.
subtractor
}
,
{
unit
}
) from
{
self
.
stbname
}
'
)
elif
precision
.
lower
()
==
'ms'
:
self
.
check_tb_type
(
unit
,
tb_type
)
tdSql
.
checkRows
(
len
(
self
.
ts_str
))
if
unit
.
lower
()
==
'1a'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
])
-
self
.
subtractor
*
1000
)
elif
unit
.
lower
()
==
'1s'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
/
1000
)
-
self
.
subtractor
)
elif
unit
.
lower
()
==
'1m'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000
)
-
self
.
subtractor
)
/
60
))
elif
unit
.
lower
()
==
'1h'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000
)
-
self
.
subtractor
)
/
60
/
60
))
elif
unit
.
lower
()
==
'1d'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000
)
-
self
.
subtractor
)
/
60
/
60
/
24
))
elif
unit
.
lower
()
==
'1w'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000
)
-
self
.
subtractor
)
/
60
/
60
/
24
/
7
))
self
.
check_tbtype
(
tb_type
)
tdSql
.
checkRows
(
len
(
self
.
ts_str
))
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
])
-
self
.
subtractor
*
1000
)
elif
precision
.
lower
()
==
'us'
:
self
.
check_tb_type
(
unit
,
tb_type
)
tdSql
.
checkRows
(
len
(
self
.
ts_str
))
if
unit
.
lower
()
==
'1w'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000000
)
-
self
.
subtractor
)
/
60
/
60
/
24
/
7
))
elif
unit
.
lower
()
==
'1d'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000000
)
-
self
.
subtractor
)
/
60
/
60
/
24
))
elif
unit
.
lower
()
==
'1h'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000000
)
-
self
.
subtractor
)
/
60
/
60
))
elif
unit
.
lower
()
==
'1m'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000000
)
-
self
.
subtractor
)
/
60
))
elif
unit
.
lower
()
==
'1s'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000000
)
-
self
.
subtractor
)))
elif
unit
.
lower
()
==
'1a'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000
)
-
self
.
subtractor
*
1000
)))
elif
unit
.
lower
()
==
'1u'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
])
-
self
.
subtractor
*
1000000
)))
self
.
check_tbtype
(
tb_type
)
tdSql
.
checkRows
(
len
(
self
.
ts_str
))
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
])
-
self
.
subtractor
*
1000000
)))
elif
precision
.
lower
()
==
'ns'
:
self
.
check_tb_type
(
unit
,
tb_type
)
tdSql
.
checkRows
(
len
(
self
.
ts_str
))
if
unit
.
lower
()
==
'1w'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000000000
)
-
self
.
subtractor
)
/
60
/
60
/
24
/
7
))
elif
unit
.
lower
()
==
'1d'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000000000
)
-
self
.
subtractor
)
/
60
/
60
/
24
))
elif
unit
.
lower
()
==
'1h'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000000000
)
-
self
.
subtractor
)
/
60
/
60
))
elif
unit
.
lower
()
==
'1m'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000000000
)
-
self
.
subtractor
)
/
60
))
elif
unit
.
lower
()
==
'1s'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000000000
)
-
self
.
subtractor
)))
elif
unit
.
lower
()
==
'1a'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000000
)
-
self
.
subtractor
*
1000
)))
elif
unit
.
lower
()
==
'1u'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(((
date_time
[
i
]
/
1000
)
-
self
.
subtractor
*
1000000
)))
# self.check_tbtype(tb_type)
# tdSql.checkRows(len(self.ts_str))
# for i in range(len(self.ts_str)):
# tdSql.checkEqual(tdSql.queryResult[i][0],int(((date_time[i]/1000000)-self.subtractor*1000000000)))
for
unit
in
self
.
error_unit
:
if
tb_type
.
lower
()
==
'ntb'
:
tdSql
.
error
(
f
'select timediff(ts,
{
self
.
subtractor
}
,
{
unit
}
) from
{
self
.
ntbname
}
'
)
tdSql
.
error
(
f
'select timediff(c0,
{
self
.
subtractor
}
,
{
unit
}
) from
{
self
.
ntbname
}
'
)
elif
tb_type
.
lower
()
==
'ctb'
:
tdSql
.
error
(
f
'select timediff(ts,
{
self
.
subtractor
}
,
{
unit
}
) from
{
self
.
ctbname
}
'
)
tdSql
.
error
(
f
'select timediff(c0,
{
self
.
subtractor
}
,
{
unit
}
) from
{
self
.
ntbname
}
'
)
elif
tb_type
.
lower
()
==
'stb'
:
tdSql
.
error
(
f
'select timediff(ts,
{
self
.
subtractor
}
,
{
unit
}
) from
{
self
.
stbname
}
'
)
tdSql
.
error
(
f
'select timediff(c0,
{
self
.
subtractor
}
,
{
unit
}
) from
{
self
.
ntbname
}
'
)
def
function_check_ntb
(
self
):
for
precision
in
self
.
db_param_precision
:
tdSql
.
execute
(
'drop database if exists db'
)
tdSql
.
execute
(
f
'create database db precision "
{
precision
}
"'
)
tdSql
.
execute
(
'use db'
)
tdSql
.
execute
(
f
'create table
{
self
.
ntbname
}
(ts timestamp,c0 int)'
)
for
ts
in
self
.
ts_str
:
tdSql
.
execute
(
f
'insert into
{
self
.
ntbname
}
values("
{
ts
}
",1)'
)
for
unit
in
self
.
error_unit
:
tdSql
.
error
(
f
'select timediff(ts,
{
self
.
subtractor
}
,
{
unit
}
) from
{
self
.
ntbname
}
'
)
date_time
=
self
.
get_time
.
time_transform
(
self
.
ts_str
,
precision
)
self
.
data_check
(
date_time
,
precision
,
'ntb'
)
def
function_check_stb
(
self
):
for
precision
in
self
.
db_param_precision
:
tdSql
.
execute
(
'drop database if exists db'
)
tdSql
.
execute
(
f
'create database db precision "
{
precision
}
"'
)
tdSql
.
execute
(
'use db'
)
tdSql
.
execute
(
f
'create table
{
self
.
stbname
}
(ts timestamp,c0 int) tags(t0 int)'
)
tdSql
.
execute
(
f
'create table
{
self
.
ctbname
}
using
{
self
.
stbname
}
tags(1)'
)
for
ts
in
self
.
ts_str
:
tdSql
.
execute
(
f
'insert into
{
self
.
ctbname
}
values("
{
ts
}
",1)'
)
date_time
=
self
.
get_time
.
time_transform
(
self
.
ts_str
,
precision
)
self
.
data_check
(
date_time
,
precision
,
'ctb'
)
self
.
data_check
(
date_time
,
precision
,
'stb'
)
def
run
(
self
):
# sourcery skip: extract-duplicate-method
tdSql
.
prepare
()
tdLog
.
printNoPrefix
(
"==========step1:create tables=========="
)
tdSql
.
execute
(
'''create table if not exists ntb
(ts timestamp, c1 int, c2 float,c3 double,c4 timestamp)
'''
)
tdSql
.
execute
(
'''create table if not exists stb
(ts timestamp, c1 int, c2 float,c3 double,c4 timestamp) tags(t0 int)
'''
)
tdSql
.
execute
(
'''create table if not exists stb_1 using stb tags(100)
'''
)
tdLog
.
printNoPrefix
(
"==========step2:insert data into ntb=========="
)
# RFC3339:2020-01-01T00:00:00+8:00
# ISO8601:2020-01-01T00:00:00.000+0800
tdSql
.
execute
(
'insert into ntb values(now,1,1.55,100.555555,today())("2020-1-1 00:00:00",10,11.11,99.999999,now())(today(),3,3.333,333.333333,now())'
)
tdSql
.
execute
(
'insert into stb_1 values(now,1,1.55,100.555555,today())("2020-1-1 00:00:00",10,11.11,99.999999,now())(today(),3,3.333,333.333333,now())'
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00') from ntb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
query
(
"select timediff(1,0,1d) from ntb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select timediff(1,0,1d) from db.ntb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select timediff(1,0,1s) from ntb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
query
(
"select timediff(1,0,1s) from db.ntb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
query
(
"select timediff(1,0,1w) from ntb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select timediff(1,0,1w) from db.ntb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select timediff(1,0,1h) from ntb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select timediff(1,0,1h) from db.ntb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select timediff(1,0,1m) from ntb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select timediff(1,0,1m) from db.ntb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select timediff(1,0,1a) from ntb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
1000
)
tdSql
.
query
(
"select timediff(1,0,1a) from db.ntb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
1000
)
tdSql
.
error
(
"select timediff(1,0,1u) from ntb"
)
#tdSql.checkRows(3)
#tdSql.checkData(0,0,1000000)
tdSql
.
error
(
"select timediff(1,0,1u) from db.ntb"
)
#tdSql.checkRows(3)
#tdSql.checkData(0,0,1000000)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00') from stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00') from db.stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1d) from stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1d) from db.stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
1
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1h) from stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
24
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1h) from db.stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
24
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1w) from stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1m) from stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
1440
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1m) from db.stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
1440
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1s) from stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
86400
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1s) from db.stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
86400
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1a) from stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
86400000
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1a) from db.stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
86400000
)
tdSql
.
error
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1u) from stb"
)
#tdSql.checkRows(3)
#tdSql.checkData(0,0,86400000000)
tdSql
.
error
(
"select timediff('2020-1-1 00:00:00','2020-1-2 00:00:00',1u) from db.stb"
)
#tdSql.checkRows(3)
#tdSql.checkData(0,0,86400000000)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00') from stb_1"
)
tdSql
.
checkRows
(
3
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00') from db.stb_1"
)
tdSql
.
checkRows
(
3
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1w) from stb_1 "
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1w) from db.stb_1 "
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1d) from stb_1 "
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1d) from db.stb_1 "
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
0
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1h) from stb_1 "
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
12
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1h) from db.stb_1 "
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
12
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1m) from stb_1"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
720
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1m) from db.stb_1"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
720
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1s) from stb_1"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
43200
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1s) from db.stb_1"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
43200
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1a) from stb_1"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
43200000
)
tdSql
.
query
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1a) from db.stb_1"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
43200000
)
tdSql
.
error
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1u) from stb_1"
)
#tdSql.checkRows(3)
#tdSql.checkData(0,0,43200000000)
tdSql
.
error
(
"select timediff('2020-1-1 00:00:00','2020-1-1 12:00:00',1u) from db.stb_1"
)
#tdSql.checkRows(3)
#tdSql.checkData(0,0,43200000000)
tdSql
.
query
(
"select timediff('a','b') from stb"
)
tdSql
.
checkRows
(
3
)
tdSql
.
checkData
(
0
,
0
,
None
)
tdSql
.
checkData
(
1
,
0
,
None
)
tdSql
.
checkData
(
2
,
0
,
None
)
tdSql
.
error
(
"select timediff(1.5,1.5) from stb"
)
tdSql
.
error
(
"select timediff(1) from stb"
)
tdSql
.
error
(
"select timediff(10,1,1.5) from stb"
)
# tdSql.error("select timediff(10,1,2s) from stb")
# tdSql.error("select timedifff(10,1,c1) from stb")
tdSql
.
error
(
"select timediff(1.5,1.5) from stb_1"
)
tdSql
.
error
(
"select timediff(1) from stb_1"
)
tdSql
.
error
(
"select timediff(10,1,1.5) from stb_1"
)
# tdSql.error("select timediff(10,1,2s) from stb_1")
# tdSql.error("select timedifff(10,1,c1) from stb_1")
tdSql
.
error
(
"select timediff(1.5,1.5) from ntb"
)
tdSql
.
error
(
"select timediff(1) from ntb"
)
tdSql
.
error
(
"select timediff(10,1,1.5) from ntb"
)
# tdSql.error("select timediff(10,1,2s) from ntb")
# tdSql.error("select timedifff(10,1,c1) from ntb")
self
.
function_check_ntb
()
self
.
function_check_stb
()
def
stop
(
self
):
tdSql
.
close
()
...
...
tests/system-test/2-query/distribute_agg_stddev.py
浏览文件 @
29949a96
tests/system-test/2-query/timetruncate.py
浏览文件 @
29949a96
...
...
@@ -5,153 +5,115 @@ from util.sql import *
import
numpy
as
np
import
time
from
datetime
import
datetime
from
util.gettime
import
*
class
TDTestCase
:
def
init
(
self
,
conn
,
logSql
):
tdLog
.
debug
(
"start to execute %s"
%
__file__
)
tdSql
.
init
(
conn
.
cursor
())
self
.
rowNum
=
10
self
.
ts
=
1537146000000
# 2018-9-17 09:00:00.000
self
.
get_time
=
GetTime
()
self
.
ts_str
=
[
'2020-1-1'
,
'2020-2-1 00:00:01'
,
'2020-3-1 00:00:00.001'
,
'2020-4-1 00:00:00.001002'
,
'2020-5-1 00:00:00.001002001'
]
self
.
db_param_precision
=
[
'ms'
,
'us'
,
'ns'
]
self
.
time_unit
=
[
'1w'
,
'1d'
,
'1h'
,
'1m'
,
'1s'
,
'1a'
,
'1u'
]
#self.error_unit = ['1b',
'2w','2d','2h','2m','2s','2a','2u','1c','#1']
self
.
time_unit
=
[
'1w'
,
'1d'
,
'1h'
,
'1m'
,
'1s'
,
'1a'
,
'1u'
,
'1b'
]
self
.
error_unit
=
[
'2w'
,
'2d'
,
'2h'
,
'2m'
,
'2s'
,
'2a'
,
'2u'
,
'1c'
,
'#1'
]
self
.
error_unit
=
[
'2w'
,
'2d'
,
'2h'
,
'2m'
,
'2s'
,
'2a'
,
'2u'
,
'1c'
,
'#1'
]
self
.
ntbname
=
'ntb'
self
.
stbname
=
'stb'
self
.
ctbname
=
'ctb'
def
get_ms_timestamp
(
self
,
ts_str
):
_ts_str
=
ts_str
if
" "
in
ts_str
:
p
=
ts_str
.
split
(
" "
)[
1
]
if
len
(
p
)
>
15
:
_ts_str
=
ts_str
[:
-
3
]
if
':'
in
_ts_str
and
'.'
in
_ts_str
:
timestamp
=
datetime
.
strptime
(
_ts_str
,
"%Y-%m-%d %H:%M:%S.%f"
)
date_time
=
int
(
int
(
time
.
mktime
(
timestamp
.
timetuple
()))
*
1000
+
timestamp
.
microsecond
/
1000
)
elif
':'
in
_ts_str
and
'.'
not
in
_ts_str
:
timestamp
=
datetime
.
strptime
(
_ts_str
,
"%Y-%m-%d %H:%M:%S"
)
date_time
=
int
(
int
(
time
.
mktime
(
timestamp
.
timetuple
()))
*
1000
+
timestamp
.
microsecond
/
1000
)
else
:
timestamp
=
datetime
.
strptime
(
_ts_str
,
"%Y-%m-%d"
)
date_time
=
int
(
int
(
time
.
mktime
(
timestamp
.
timetuple
()))
*
1000
+
timestamp
.
microsecond
/
1000
)
return
date_time
def
get_us_timestamp
(
self
,
ts_str
):
_ts
=
self
.
get_ms_timestamp
(
ts_str
)
*
1000
if
" "
in
ts_str
:
p
=
ts_str
.
split
(
" "
)[
1
]
if
len
(
p
)
>
12
:
us_ts
=
p
[
12
:
15
]
_ts
+=
int
(
us_ts
)
return
_ts
def
get_ns_timestamp
(
self
,
ts_str
):
_ts
=
self
.
get_us_timestamp
(
ts_str
)
*
1000
if
" "
in
ts_str
:
p
=
ts_str
.
split
(
" "
)[
1
]
if
len
(
p
)
>
15
:
us_ts
=
p
[
15
:]
_ts
+=
int
(
us_ts
)
return
_ts
def
time_transform
(
self
,
ts_str
,
precision
):
date_time
=
[]
if
precision
==
'ms'
:
for
i
in
ts_str
:
date_time
.
append
(
self
.
get_ms_timestamp
(
i
))
elif
precision
==
'us'
:
for
i
in
ts_str
:
date_time
.
append
(
self
.
get_us_timestamp
(
i
))
elif
precision
==
'ns'
:
for
i
in
ts_str
:
date_time
.
append
(
self
.
get_us_timestamp
(
i
))
return
date_time
def
check_ms_timestamp
(
self
,
unit
,
date_time
):
if
unit
.
lower
()
==
'1a'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
ts_result
=
self
.
get_ms_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
ts_result
=
self
.
get_
time
.
get_
ms_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
tdSql
.
checkEqual
(
ts_result
,
int
(
date_time
[
i
]))
elif
unit
.
lower
()
==
'1s'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
ts_result
=
self
.
get_ms_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
ts_result
=
self
.
get_
time
.
get_
ms_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
tdSql
.
checkEqual
(
ts_result
,
int
(
date_time
[
i
]
/
1000
)
*
1000
)
elif
unit
.
lower
()
==
'1m'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
ts_result
=
self
.
get_ms_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
ts_result
=
self
.
get_
time
.
get_
ms_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
tdSql
.
checkEqual
(
ts_result
,
int
(
date_time
[
i
]
/
1000
/
60
)
*
60
*
1000
)
elif
unit
.
lower
()
==
'1h'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
ts_result
=
self
.
get_ms_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
ts_result
=
self
.
get_
time
.
get_
ms_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
tdSql
.
checkEqual
(
ts_result
,
int
(
date_time
[
i
]
/
1000
/
60
/
60
)
*
60
*
60
*
1000
)
elif
unit
.
lower
()
==
'1d'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
ts_result
=
self
.
get_ms_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
ts_result
=
self
.
get_
time
.
get_
ms_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
tdSql
.
checkEqual
(
ts_result
,
int
(
date_time
[
i
]
/
1000
/
60
/
60
/
24
)
*
24
*
60
*
60
*
1000
)
elif
unit
.
lower
()
==
'1w'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
ts_result
=
self
.
get_ms_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
ts_result
=
self
.
get_
time
.
get_
ms_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
tdSql
.
checkEqual
(
ts_result
,
int
(
date_time
[
i
]
/
1000
/
60
/
60
/
24
/
7
)
*
7
*
24
*
60
*
60
*
1000
)
def
check_us_timestamp
(
self
,
unit
,
date_time
):
if
unit
.
lower
()
==
'1u'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
ts_result
=
self
.
get_us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
ts_result
=
self
.
get_
time
.
get_
us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
tdSql
.
checkEqual
(
ts_result
,
int
(
date_time
[
i
]))
elif
unit
.
lower
()
==
'1a'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
ts_result
=
self
.
get_us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
ts_result
=
self
.
get_
time
.
get_
us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
tdSql
.
checkEqual
(
ts_result
,
int
(
date_time
[
i
]
/
1000
)
*
1000
)
elif
unit
.
lower
()
==
'1s'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
ts_result
=
self
.
get_us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
ts_result
=
self
.
get_
time
.
get_
us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
tdSql
.
checkEqual
(
ts_result
,
int
(
date_time
[
i
]
/
1000
/
1000
)
*
1000
*
1000
)
elif
unit
.
lower
()
==
'1m'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
ts_result
=
self
.
get_us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
ts_result
=
self
.
get_
time
.
get_
us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
tdSql
.
checkEqual
(
ts_result
,
int
(
date_time
[
i
]
/
1000
/
1000
/
60
)
*
60
*
1000
*
1000
)
elif
unit
.
lower
()
==
'1h'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
ts_result
=
self
.
get_us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
ts_result
=
self
.
get_
time
.
get_
us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
tdSql
.
checkEqual
(
ts_result
,
int
(
date_time
[
i
]
/
1000
/
1000
/
60
/
60
)
*
60
*
60
*
1000
*
1000
)
elif
unit
.
lower
()
==
'1d'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
ts_result
=
self
.
get_us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
ts_result
=
self
.
get_
time
.
get_
us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
tdSql
.
checkEqual
(
ts_result
,
int
(
date_time
[
i
]
/
1000
/
1000
/
60
/
60
/
24
)
*
24
*
60
*
60
*
1000
*
1000
)
elif
unit
.
lower
()
==
'1w'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
ts_result
=
self
.
get_us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
ts_result
=
self
.
get_
time
.
get_
us_timestamp
(
str
(
tdSql
.
queryResult
[
i
][
0
]))
tdSql
.
checkEqual
(
ts_result
,
int
(
date_time
[
i
]
/
1000
/
1000
/
60
/
60
/
24
/
7
)
*
7
*
24
*
60
*
60
*
1000
*
1000
)
def
check_ns_timestamp
(
self
,
unit
,
date_time
):
if
unit
.
lower
()
==
'1
u
'
:
if
unit
.
lower
()
==
'1
b
'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
)
*
1000
)
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]))
elif
unit
.
lower
()
==
'1u'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
/
1000
)
*
1000
)
elif
unit
.
lower
()
==
'1a'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
/
1000
)
*
1000
*
1000
)
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
/
1000
/
1000
)
*
1000
*
1000
)
elif
unit
.
lower
()
==
'1s'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
/
1000
/
1000
)
*
1000
*
1000
*
1000
)
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
/
1000
/
1000
/
1000
)
*
1000
*
1000
*
1000
)
elif
unit
.
lower
()
==
'1m'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
/
1000
/
1000
/
60
)
*
60
*
1000
*
1000
*
1000
)
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
/
1000
/
1000
/
1000
/
60
)
*
60
*
1000
*
1000
*
1000
)
elif
unit
.
lower
()
==
'1h'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
/
1000
/
1000
/
60
/
60
)
*
60
*
60
*
1000
*
1000
*
1000
)
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
/
1000
/
1000
/
1000
/
60
/
60
)
*
60
*
60
*
1000
*
1000
*
1000
)
elif
unit
.
lower
()
==
'1d'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
/
1000
/
1000
/
60
/
60
/
24
)
*
24
*
60
*
60
*
1000
*
1000
*
1000
)
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
/
1000
/
1000
/
1000
/
60
/
60
/
24
)
*
24
*
60
*
60
*
1000
*
1000
*
1000
)
elif
unit
.
lower
()
==
'1w'
:
for
i
in
range
(
len
(
self
.
ts_str
)):
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
/
1000
/
1000
/
60
/
60
/
24
/
7
)
*
7
*
24
*
60
*
60
*
1000
*
1000
*
1000
)
tdSql
.
checkEqual
(
tdSql
.
queryResult
[
i
][
0
],
int
(
date_time
[
i
]
*
1000
/
1000
/
1000
/
1000
/
1000
/
60
/
60
/
24
/
7
)
*
7
*
24
*
60
*
60
*
1000
*
1000
*
1000
)
def
check_tb_type
(
self
,
unit
,
tb_type
):
if
tb_type
.
lower
()
==
'ntb'
:
tdSql
.
query
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
ntbname
}
'
)
elif
tb_type
.
lower
()
==
'ctb'
:
tdSql
.
query
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
ctbname
}
'
)
elif
tb_type
.
lower
()
==
'stb'
:
tdSql
.
query
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
stbname
}
'
)
def
data_check
(
self
,
date_time
,
precision
,
tb_type
):
for
unit
in
self
.
time_unit
:
if
(
unit
.
lower
()
==
'1u'
and
precision
.
lower
()
==
'ms'
)
or
(
)
:
if
(
unit
.
lower
()
==
'1u'
and
precision
.
lower
()
==
'ms'
)
or
(
unit
.
lower
()
==
'1b'
and
precision
.
lower
()
==
'us'
)
or
(
unit
.
lower
()
==
'1b'
and
precision
.
lower
()
==
'ms'
)
:
if
tb_type
.
lower
()
==
'ntb'
:
tdSql
.
error
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
ntbname
}
'
)
elif
tb_type
.
lower
()
==
'ctb'
:
...
...
@@ -159,30 +121,15 @@ class TDTestCase:
elif
tb_type
.
lower
()
==
'stb'
:
tdSql
.
error
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
stbname
}
'
)
elif
precision
.
lower
()
==
'ms'
:
if
tb_type
.
lower
()
==
'ntb'
:
tdSql
.
query
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
ntbname
}
'
)
elif
tb_type
.
lower
()
==
'ctb'
:
tdSql
.
query
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
ctbname
}
'
)
elif
tb_type
.
lower
()
==
'stb'
:
tdSql
.
query
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
stbname
}
'
)
self
.
check_tb_type
(
unit
,
tb_type
)
tdSql
.
checkRows
(
len
(
self
.
ts_str
))
self
.
check_ms_timestamp
(
unit
,
date_time
)
elif
precision
.
lower
()
==
'us'
:
if
tb_type
.
lower
()
==
'ntb'
:
tdSql
.
query
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
ntbname
}
'
)
elif
tb_type
.
lower
()
==
'ctb'
:
tdSql
.
query
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
ctbname
}
'
)
elif
tb_type
.
lower
()
==
'stb'
:
tdSql
.
query
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
stbname
}
'
)
self
.
check_tb_type
(
unit
,
tb_type
)
tdSql
.
checkRows
(
len
(
self
.
ts_str
))
self
.
check_us_timestamp
(
unit
,
date_time
)
elif
precision
.
lower
()
==
'ns'
:
if
tb_type
.
lower
()
==
'ntb'
:
tdSql
.
query
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
ntbname
}
'
)
elif
tb_type
.
lower
()
==
'ctb'
:
tdSql
.
query
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
ctbname
}
'
)
elif
tb_type
.
lower
()
==
'stb'
:
tdSql
.
query
(
f
'select timetruncate(ts,
{
unit
}
) from
{
self
.
stbname
}
'
)
self
.
check_tb_type
(
unit
,
tb_type
)
tdSql
.
checkRows
(
len
(
self
.
ts_str
))
self
.
check_ns_timestamp
(
unit
,
date_time
)
for
unit
in
self
.
error_unit
:
...
...
@@ -200,9 +147,8 @@ class TDTestCase:
tdSql
.
execute
(
f
'create table
{
self
.
ntbname
}
(ts timestamp,c0 int)'
)
for
ts
in
self
.
ts_str
:
tdSql
.
execute
(
f
'insert into
{
self
.
ntbname
}
values("
{
ts
}
",1)'
)
date_time
=
self
.
time_transform
(
self
.
ts_str
,
precision
)
date_time
=
self
.
get_time
.
time_transform
(
self
.
ts_str
,
precision
)
self
.
data_check
(
date_time
,
precision
,
'ntb'
)
def
function_check_stb
(
self
):
for
precision
in
self
.
db_param_precision
:
tdSql
.
execute
(
'drop database if exists db'
)
...
...
@@ -212,7 +158,7 @@ class TDTestCase:
tdSql
.
execute
(
f
'create table
{
self
.
ctbname
}
using
{
self
.
stbname
}
tags(1)'
)
for
ts
in
self
.
ts_str
:
tdSql
.
execute
(
f
'insert into
{
self
.
ctbname
}
values("
{
ts
}
",1)'
)
date_time
=
self
.
time_transform
(
self
.
ts_str
,
precision
)
date_time
=
self
.
get_time
.
time_transform
(
self
.
ts_str
,
precision
)
self
.
data_check
(
date_time
,
precision
,
'ctb'
)
self
.
data_check
(
date_time
,
precision
,
'stb'
)
def
run
(
self
):
...
...
tests/system-test/simpletest.bat
浏览文件 @
29949a96
@REM
python3 .\test.py -f 0-others\taosShell.py
python3
.\test.py
-f
0
-others
\taosShell.py
python3
.\test.py
-f
0
-others
\taosShellError.py
python3
.\test.py
-f
0
-others
\taosShellNetChk.py
python3
.\test.py
-f
0
-others
\telemetry.py
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录