Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
28ea375f
T
TDengine
项目概览
taosdata
/
TDengine
1 年多 前同步成功
通知
1185
Star
22016
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
28ea375f
编写于
4月 11, 2023
作者:
D
dapan1121
提交者:
GitHub
4月 11, 2023
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #20850 from taosdata/enh/ts-3112
enh: add queryMaxConcurrentTables configuration
上级
40448c3e
d2f9f2d7
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
22 addition
and
17 deletion
+22
-17
include/common/tglobal.h
include/common/tglobal.h
+1
-0
source/common/src/tglobal.c
source/common/src/tglobal.c
+3
-0
source/libs/scheduler/inc/schInt.h
source/libs/scheduler/inc/schInt.h
+2
-3
source/libs/scheduler/src/schFlowCtrl.c
source/libs/scheduler/src/schFlowCtrl.c
+12
-12
source/libs/scheduler/src/scheduler.c
source/libs/scheduler/src/scheduler.c
+4
-2
未找到文件。
include/common/tglobal.h
浏览文件 @
28ea375f
...
...
@@ -104,6 +104,7 @@ extern int32_t tsCacheLazyLoadThreshold; // cost threshold for last/last_row lo
// query client
extern
int32_t
tsQueryPolicy
;
extern
int32_t
tsQueryRspPolicy
;
extern
int64_t
tsQueryMaxConcurrentTables
;
extern
int32_t
tsQuerySmaOptimize
;
extern
int32_t
tsQueryRsmaTolerance
;
extern
bool
tsQueryPlannerTrace
;
...
...
source/common/src/tglobal.c
浏览文件 @
28ea375f
...
...
@@ -103,6 +103,7 @@ char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table
// query
int32_t
tsQueryPolicy
=
1
;
int32_t
tsQueryRspPolicy
=
0
;
int64_t
tsQueryMaxConcurrentTables
=
200
;
// unit is TSDB_TABLE_NUM_UNIT
bool
tsEnableQueryHb
=
false
;
int32_t
tsQuerySmaOptimize
=
0
;
int32_t
tsQueryRsmaTolerance
=
1000
;
// the tolerance time (ms) to judge from which level to query rsma data.
...
...
@@ -340,6 +341,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if
(
cfgAddInt32
(
pCfg
,
"maxRetryWaitTime"
,
tsMaxRetryWaitTime
,
0
,
86400000
,
0
)
!=
0
)
return
-
1
;
if
(
cfgAddBool
(
pCfg
,
"useAdapter"
,
tsUseAdapter
,
true
)
!=
0
)
return
-
1
;
if
(
cfgAddBool
(
pCfg
,
"crashReporting"
,
tsEnableCrashReport
,
true
)
!=
0
)
return
-
1
;
if
(
cfgAddInt64
(
pCfg
,
"queryMaxConcurrentTables"
,
tsQueryMaxConcurrentTables
,
INT64_MIN
,
INT64_MAX
,
1
)
!=
0
)
return
-
1
;
tsNumOfRpcThreads
=
tsNumOfCores
/
2
;
tsNumOfRpcThreads
=
TRANGE
(
tsNumOfRpcThreads
,
2
,
TSDB_MAX_RPC_THREADS
);
...
...
@@ -735,6 +737,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsKeepColumnName
=
cfgGetItem
(
pCfg
,
"keepColumnName"
)
->
bval
;
tsUseAdapter
=
cfgGetItem
(
pCfg
,
"useAdapter"
)
->
bval
;
tsEnableCrashReport
=
cfgGetItem
(
pCfg
,
"crashReporting"
)
->
bval
;
tsQueryMaxConcurrentTables
=
cfgGetItem
(
pCfg
,
"queryMaxConcurrentTables"
)
->
i64
;
tsMaxRetryWaitTime
=
cfgGetItem
(
pCfg
,
"maxRetryWaitTime"
)
->
i32
;
...
...
source/libs/scheduler/inc/schInt.h
浏览文件 @
28ea375f
...
...
@@ -54,7 +54,6 @@ typedef enum {
#define SCHEDULE_DEFAULT_MAX_JOB_NUM 1000
#define SCHEDULE_DEFAULT_MAX_TASK_NUM 1000
#define SCHEDULE_DEFAULT_MAX_NODE_TABLE_NUM 200 // unit is TSDB_TABLE_NUM_UNIT
#define SCHEDULE_DEFAULT_POLICY SCH_LOAD_SEQ
#define SCHEDULE_DEFAULT_MAX_NODE_NUM 20
...
...
@@ -134,7 +133,7 @@ typedef struct SSchStatusFps {
typedef
struct
SSchedulerCfg
{
uint32_t
maxJobNum
;
int
32
_t
maxNodeTableNum
;
int
64
_t
maxNodeTableNum
;
SCH_POLICY
schPolicy
;
bool
enableReSchedule
;
}
SSchedulerCfg
;
...
...
@@ -175,7 +174,7 @@ typedef struct SSchHbCallbackParam {
typedef
struct
SSchFlowControl
{
SRWLatch
lock
;
bool
sorted
;
int
32
_t
tableNumSum
;
int
64
_t
tableNumSum
;
uint32_t
execTaskNum
;
SArray
*
taskList
;
// Element is SSchTask*
}
SSchFlowControl
;
...
...
source/libs/scheduler/src/schFlowCtrl.c
浏览文件 @
28ea375f
...
...
@@ -46,7 +46,7 @@ int32_t schChkJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel) {
return
TSDB_CODE_SUCCESS
;
}
int
32
_t
sum
=
0
;
int
64
_t
sum
=
0
;
int32_t
taskNum
=
taosArrayGetSize
(
pJob
->
dataSrcTasks
);
for
(
int32_t
i
=
0
;
i
<
taskNum
;
++
i
)
{
SSchTask
*
pTask
=
*
(
SSchTask
**
)
taosArrayGet
(
pJob
->
dataSrcTasks
,
i
);
...
...
@@ -55,7 +55,7 @@ int32_t schChkJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel) {
}
if
(
schMgmt
.
cfg
.
maxNodeTableNum
<=
0
||
sum
<
schMgmt
.
cfg
.
maxNodeTableNum
)
{
SCH_JOB_DLOG
(
"job no need flow ctrl, totalTableNum:%
d"
,
sum
);
SCH_JOB_DLOG
(
"job no need flow ctrl, totalTableNum:%
"
PRId64
,
sum
);
return
TSDB_CODE_SUCCESS
;
}
...
...
@@ -68,7 +68,7 @@ int32_t schChkJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel) {
SCH_SET_JOB_NEED_FLOW_CTRL
(
pJob
);
SCH_JOB_DLOG
(
"job NEED flow ctrl, totalTableNum:%
d"
,
sum
);
SCH_JOB_DLOG
(
"job NEED flow ctrl, totalTableNum:%
"
PRId64
,
sum
);
return
TSDB_CODE_SUCCESS
;
}
...
...
@@ -94,7 +94,7 @@ int32_t schDecTaskFlowQuota(SSchJob *pJob, SSchTask *pTask) {
--
ctrl
->
execTaskNum
;
ctrl
->
tableNumSum
-=
pTask
->
plan
->
execNodeStat
.
tableNum
;
SCH_TASK_DLOG
(
"task quota removed, fqdn:%s, port:%d, tableNum:%d, remainNum:%
d
, remainExecTaskNum:%d"
,
ep
->
fqdn
,
SCH_TASK_DLOG
(
"task quota removed, fqdn:%s, port:%d, tableNum:%d, remainNum:%
"
PRId64
"
, remainExecTaskNum:%d"
,
ep
->
fqdn
,
ep
->
port
,
pTask
->
plan
->
execNodeStat
.
tableNum
,
ctrl
->
tableNumSum
,
ctrl
->
execTaskNum
);
_return:
...
...
@@ -125,7 +125,7 @@ int32_t schCheckIncTaskFlowQuota(SSchJob *pJob, SSchTask *pTask, bool *enough) {
SCH_ERR_RET
(
TSDB_CODE_OUT_OF_MEMORY
);
}
SCH_TASK_DLOG
(
"task quota added, fqdn:%s, port:%d, tableNum:%d, remainNum:%
d
, remainExecTaskNum:%d"
,
ep
->
fqdn
,
SCH_TASK_DLOG
(
"task quota added, fqdn:%s, port:%d, tableNum:%d, remainNum:%
"
PRId64
"
, remainExecTaskNum:%d"
,
ep
->
fqdn
,
ep
->
port
,
pTask
->
plan
->
execNodeStat
.
tableNum
,
nctrl
.
tableNumSum
,
nctrl
.
execTaskNum
);
*
enough
=
true
;
...
...
@@ -142,7 +142,7 @@ int32_t schCheckIncTaskFlowQuota(SSchJob *pJob, SSchTask *pTask, bool *enough) {
break
;
}
int
32
_t
sum
=
pTask
->
plan
->
execNodeStat
.
tableNum
+
ctrl
->
tableNumSum
;
int
64
_t
sum
=
pTask
->
plan
->
execNodeStat
.
tableNum
+
ctrl
->
tableNumSum
;
if
(
sum
<=
schMgmt
.
cfg
.
maxNodeTableNum
)
{
ctrl
->
tableNumSum
=
sum
;
...
...
@@ -173,7 +173,7 @@ int32_t schCheckIncTaskFlowQuota(SSchJob *pJob, SSchTask *pTask, bool *enough) {
_return:
SCH_TASK_DLOG
(
"task quota %s added, fqdn:%s, port:%d, tableNum:%d, remainNum:%
d
, remainExecTaskNum:%d"
,
SCH_TASK_DLOG
(
"task quota %s added, fqdn:%s, port:%d, tableNum:%d, remainNum:%
"
PRId64
"
, remainExecTaskNum:%d"
,
((
*
enough
)
?
""
:
"NOT"
),
ep
->
fqdn
,
ep
->
port
,
pTask
->
plan
->
execNodeStat
.
tableNum
,
ctrl
->
tableNumSum
,
ctrl
->
execTaskNum
);
...
...
@@ -203,7 +203,7 @@ int32_t schLaunchTasksInFlowCtrlListImpl(SSchJob *pJob, SSchFlowControl *ctrl) {
return
TSDB_CODE_SUCCESS
;
}
int
32
_t
remainNum
=
schMgmt
.
cfg
.
maxNodeTableNum
-
ctrl
->
tableNumSum
;
int
64
_t
remainNum
=
schMgmt
.
cfg
.
maxNodeTableNum
-
ctrl
->
tableNumSum
;
int32_t
taskNum
=
taosArrayGetSize
(
ctrl
->
taskList
);
int32_t
code
=
0
;
SSchTask
*
pTask
=
NULL
;
...
...
@@ -217,7 +217,7 @@ int32_t schLaunchTasksInFlowCtrlListImpl(SSchJob *pJob, SSchFlowControl *ctrl) {
SEp
*
ep
=
SCH_GET_CUR_EP
(
&
pTask
->
plan
->
execNode
);
if
(
pTask
->
plan
->
execNodeStat
.
tableNum
>
remainNum
&&
ctrl
->
execTaskNum
>
0
)
{
SCH_TASK_DLOG
(
"task NOT to launch, fqdn:%s, port:%d, tableNum:%d, remainNum:%
d
, remainExecTaskNum:%d"
,
ep
->
fqdn
,
SCH_TASK_DLOG
(
"task NOT to launch, fqdn:%s, port:%d, tableNum:%d, remainNum:%
"
PRId64
"
, remainExecTaskNum:%d"
,
ep
->
fqdn
,
ep
->
port
,
pTask
->
plan
->
execNodeStat
.
tableNum
,
ctrl
->
tableNumSum
,
ctrl
->
execTaskNum
);
continue
;
...
...
@@ -228,14 +228,14 @@ int32_t schLaunchTasksInFlowCtrlListImpl(SSchJob *pJob, SSchFlowControl *ctrl) {
taosArrayRemove
(
ctrl
->
taskList
,
i
);
SCH_TASK_DLOG
(
"task to launch, fqdn:%s, port:%d, tableNum:%d, remainNum:%
d
, remainExecTaskNum:%d"
,
ep
->
fqdn
,
SCH_TASK_DLOG
(
"task to launch, fqdn:%s, port:%d, tableNum:%d, remainNum:%
"
PRId64
"
, remainExecTaskNum:%d"
,
ep
->
fqdn
,
ep
->
port
,
pTask
->
plan
->
execNodeStat
.
tableNum
,
ctrl
->
tableNumSum
,
ctrl
->
execTaskNum
);
SCH_ERR_JRET
(
schAsyncLaunchTaskImpl
(
pJob
,
pTask
));
remainNum
-=
pTask
->
plan
->
execNodeStat
.
tableNum
;
if
(
remainNum
<=
0
)
{
SCH_TASK_DLOG
(
"no more task to launch, fqdn:%s, port:%d, remainNum:%
d
, remainExecTaskNum:%d"
,
ep
->
fqdn
,
ep
->
port
,
SCH_TASK_DLOG
(
"no more task to launch, fqdn:%s, port:%d, remainNum:%
"
PRId64
"
, remainExecTaskNum:%d"
,
ep
->
fqdn
,
ep
->
port
,
ctrl
->
tableNumSum
,
ctrl
->
execTaskNum
);
break
;
...
...
@@ -244,7 +244,7 @@ int32_t schLaunchTasksInFlowCtrlListImpl(SSchJob *pJob, SSchFlowControl *ctrl) {
if
(
i
<
(
taskNum
-
1
))
{
SSchTask
*
pLastTask
=
*
(
SSchTask
**
)
taosArrayGetLast
(
ctrl
->
taskList
);
if
(
remainNum
<
pLastTask
->
plan
->
execNodeStat
.
tableNum
)
{
SCH_TASK_DLOG
(
"no more task to launch, fqdn:%s, port:%d, remainNum:%
d
, remainExecTaskNum:%d, smallestInList:%d"
,
SCH_TASK_DLOG
(
"no more task to launch, fqdn:%s, port:%d, remainNum:%
"
PRId64
"
, remainExecTaskNum:%d, smallestInList:%d"
,
ep
->
fqdn
,
ep
->
port
,
ctrl
->
tableNumSum
,
ctrl
->
execTaskNum
,
pLastTask
->
plan
->
execNodeStat
.
tableNum
);
break
;
...
...
source/libs/scheduler/src/scheduler.c
浏览文件 @
28ea375f
...
...
@@ -18,6 +18,7 @@
#include "schInt.h"
#include "tmsg.h"
#include "tref.h"
#include "tglobal.h"
SSchedulerMgmt
schMgmt
=
{
.
jobRef
=
-
1
,
...
...
@@ -30,11 +31,12 @@ int32_t schedulerInit() {
}
schMgmt
.
cfg
.
maxJobNum
=
SCHEDULE_DEFAULT_MAX_JOB_NUM
;
schMgmt
.
cfg
.
maxNodeTableNum
=
SCHEDULE_DEFAULT_MAX_NODE_TABLE_NUM
;
schMgmt
.
cfg
.
maxNodeTableNum
=
tsQueryMaxConcurrentTables
;
schMgmt
.
cfg
.
schPolicy
=
SCHEDULE_DEFAULT_POLICY
;
schMgmt
.
cfg
.
enableReSchedule
=
true
;
qDebug
(
"schedule policy init to %d"
,
schMgmt
.
cfg
.
schPolicy
);
qDebug
(
"schedule init, policy: %d, maxNodeTableNum: %"
PRId64
", reSchedule:%d"
,
schMgmt
.
cfg
.
schPolicy
,
schMgmt
.
cfg
.
maxNodeTableNum
,
schMgmt
.
cfg
.
enableReSchedule
);
schMgmt
.
jobRef
=
taosOpenRef
(
schMgmt
.
cfg
.
maxJobNum
,
schFreeJobImpl
);
if
(
schMgmt
.
jobRef
<
0
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录