Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
pentaLiker
DolphinScheduler
提交
20734d3c
DolphinScheduler
项目概览
pentaLiker
/
DolphinScheduler
与 Fork 源项目一致
Fork自
apache / DolphinScheduler
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
DolphinScheduler
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
20734d3c
编写于
5月 25, 2019
作者:
L
lidongdai
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update api docs
上级
d9bf06d8
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
292 addition
and
558 deletion
+292
-558
conf/escheduler.conf
conf/escheduler.conf
+0
-31
conf/install.sh
conf/install.sh
+0
-310
conf/run.sh
conf/run.sh
+0
-105
conf/zoo.cfg
conf/zoo.cfg
+0
-30
escheduler-api/src/main/java/cn/escheduler/api/configuration/SwaggerConfig.java
...n/java/cn/escheduler/api/configuration/SwaggerConfig.java
+1
-1
escheduler-api/src/main/java/cn/escheduler/api/controller/AlertGroupController.java
...va/cn/escheduler/api/controller/AlertGroupController.java
+45
-8
escheduler-api/src/main/java/cn/escheduler/api/controller/DataAnalysisController.java
.../cn/escheduler/api/controller/DataAnalysisController.java
+38
-6
escheduler-api/src/main/java/cn/escheduler/api/controller/DataSourceController.java
...va/cn/escheduler/api/controller/DataSourceController.java
+1
-1
escheduler-api/src/main/java/cn/escheduler/api/controller/TaskInstanceController.java
.../cn/escheduler/api/controller/TaskInstanceController.java
+2
-2
escheduler-api/src/main/java/cn/escheduler/api/controller/WorkerGroupController.java
...a/cn/escheduler/api/controller/WorkerGroupController.java
+29
-6
escheduler-api/src/main/resources/i18n/messages.properties
escheduler-api/src/main/resources/i18n/messages.properties
+83
-58
escheduler-api/src/main/resources/i18n/messages_en_US.properties
...ler-api/src/main/resources/i18n/messages_en_US.properties
+25
-0
escheduler-api/src/main/resources/i18n/messages_zh_CN.properties
...ler-api/src/main/resources/i18n/messages_zh_CN.properties
+26
-0
escheduler-api/src/main/resources/logback.xml
escheduler-api/src/main/resources/logback.xml
+42
-0
未找到文件。
conf/escheduler.conf
已删除
100644 → 0
浏览文件 @
d9bf06d8
server
{
listen
8888
;
# 访问端口
server_name
localhost
;
#charset koi8-r;
#access_log /var/log/nginx/host.access.log main;
location
/ {
root
/
opt
/
escheduler
/
front
/
server
;
# 静态文件目录
index
index
.
html
index
.
html
;
}
location
/
escheduler
{
proxy_pass
http
://
127
.
0
.
0
.
1
:
12345
;
# 接口地址
proxy_set_header
Host
$
host
;
proxy_set_header
X
-
Real
-
IP
$
remote_addr
;
proxy_set_header
x_real_ipP
$
remote_addr
;
proxy_set_header
remote_addr
$
remote_addr
;
proxy_set_header
X
-
Forwarded
-
For
$
proxy_add_x_forwarded_for
;
proxy_http_version
1
.
1
;
proxy_connect_timeout
4
s
;
proxy_read_timeout
30
s
;
proxy_send_timeout
12
s
;
proxy_set_header
Upgrade
$
http_upgrade
;
proxy_set_header
Connection
"upgrade"
;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page
500
502
503
504
/
50
x
.
html
;
location
= /
50
x
.
html
{
root
/
usr
/
share
/
nginx
/
html
;
}
}
conf/install.sh
已删除
100644 → 0
浏览文件 @
d9bf06d8
#!/bin/sh
workDir
=
`
/opt/easyscheduler
`
workDir
=
`
cd
${
workDir
}
;
pwd
`
#To be compatible with MacOS and Linux
txt
=
""
if
[[
"
$OSTYPE
"
==
"darwin"
*
]]
;
then
# Mac OSX
txt
=
"''"
elif
[[
"
$OSTYPE
"
==
"linux-gnu"
]]
;
then
# linux
txt
=
""
elif
[[
"
$OSTYPE
"
==
"cygwin"
]]
;
then
# POSIX compatibility layer and Linux environment emulation for Windows
echo
"Easy Scheduler not support Windows operating system"
exit
1
elif
[[
"
$OSTYPE
"
==
"msys"
]]
;
then
# Lightweight shell and GNU utilities compiled for Windows (part of MinGW)
echo
"Easy Scheduler not support Windows operating system"
exit
1
elif
[[
"
$OSTYPE
"
==
"win32"
]]
;
then
echo
"Easy Scheduler not support Windows operating system"
exit
1
elif
[[
"
$OSTYPE
"
==
"freebsd"
*
]]
;
then
# ...
txt
=
""
else
# Unknown.
echo
"Operating system unknown, please tell us(submit issue) for better service"
exit
1
fi
source
${
workDir
}
/conf/config/run_config.conf
source
${
workDir
}
/conf/config/install_config.conf
# mysql配置
# mysql 地址,端口
mysqlHost
=
"127.0.0.1:3306"
# mysql 数据库名称
mysqlDb
=
"easyscheduler"
# mysql 用户名
mysqlUserName
=
"easyscheduler"
# mysql 密码
mysqlPassword
=
"easyschedulereasyscheduler"
# conf/config/install_config.conf配置
# 安装路径,不要当前路径(pwd)一样
installPath
=
"/opt/easyscheduler"
# 部署用户
deployUser
=
"escheduler"
# zk集群
zkQuorum
=
"192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
# 安装hosts
ips
=
"ark0,ark1,ark2,ark3,ark4"
# conf/config/run_config.conf配置
# 运行Master的机器
masters
=
"ark0,ark1"
# 运行Worker的机器
workers
=
"ark2,ark3,ark4"
# 运行Alert的机器
alertServer
=
"ark3"
# 运行Api的机器
apiServers
=
"ark1"
# alert配置
# 邮件协议
mailProtocol
=
"SMTP"
# 邮件服务host
mailServerHost
=
"smtp.exmail.qq.com"
# 邮件服务端口
mailServerPort
=
"25"
# 发送人
mailSender
=
"xxxxxxxxxx"
# 发送人密码
mailPassword
=
"xxxxxxxxxx"
# 下载Excel路径
xlsFilePath
=
"/tmp/xls"
# hadoop 配置
# 是否启动hdfs,如果启动则为true,需要配置以下hadoop相关参数;
# 不启动设置为false,如果为false,以下配置不需要修改
hdfsStartupSate
=
"false"
# namenode地址,支持HA,需要将core-site.xml和hdfs-site.xml放到conf目录下
namenodeFs
=
"hdfs://mycluster:8020"
# resourcemanager HA配置,如果是单resourcemanager,这里为空即可
yarnHaIps
=
"192.168.xx.xx,192.168.xx.xx"
# 如果是单 resourcemanager,只需要配置一个主机名称,如果是resourcemanager HA,则默认配置就好
singleYarnIp
=
"ark1"
# hdfs根路径,根路径的owner必须是部署用户
hdfsPath
=
"/escheduler"
# common 配置
# 程序路径
programPath
=
"/tmp/escheduler"
#下载路径
downloadPath
=
"/tmp/escheduler/download"
# 任务执行路径
execPath
=
"/tmp/escheduler/exec"
# SHELL环境变量路径
shellEnvPath
=
"
$installPath
/conf/env/.escheduler_env.sh"
# Python换将变量路径
pythonEnvPath
=
"
$installPath
/conf/env/escheduler_env.py"
# 资源文件的后缀
resSuffixs
=
"txt,log,sh,conf,cfg,py,java,sql,hql,xml"
# 开发状态,如果是true,对于SHELL脚本可以在execPath目录下查看封装后的SHELL脚本,如果是false则执行完成直接删除
devState
=
"true"
# zk 配置
# zk根目录
zkRoot
=
"/escheduler"
# 用来记录挂掉机器的zk目录
zkDeadServers
=
"/escheduler/dead-servers"
# masters目录
zkMasters
=
"/escheduler/masters"
# workers目录
zkWorkers
=
"/escheduler/workers"
# zk master分布式锁
mastersLock
=
"/escheduler/lock/masters"
# zk worker分布式锁
workersLock
=
"/escheduler/lock/workers"
# zk master容错分布式锁
mastersFailover
=
"/escheduler/lock/failover/masters"
# zk worker容错分布式锁
workersFailover
=
"/escheduler/lock/failover/masters"
# zk session 超时
zkSessionTimeout
=
"300"
# zk 连接超时
zkConnectionTimeout
=
"300"
# zk 重试间隔
zkRetrySleep
=
"100"
# zk重试最大次数
zkRetryMaxtime
=
"5"
# master 配置
# master执行线程最大数,流程实例的最大并行度
masterExecThreads
=
"100"
# master任务执行线程最大数,每一个流程实例的最大并行度
masterExecTaskNum
=
"20"
# master心跳间隔
masterHeartbeatInterval
=
"10"
# master任务提交重试次数
masterTaskCommitRetryTimes
=
"5"
# master任务提交重试时间间隔
masterTaskCommitInterval
=
"100"
# master最大cpu平均负载,用来判断master是否还有执行能力
masterMaxCupLoadAvg
=
"10"
# master预留内存,用来判断master是否还有执行能力
masterReservedMemory
=
"1"
# worker 配置
# worker执行线程
workerExecThreads
=
"100"
# worker心跳间隔
workerHeartbeatInterval
=
"10"
# worker一次抓取任务数
workerFetchTaskNum
=
"10"
# worker最大cpu平均负载,用来判断master是否还有执行能力
workerMaxCupLoadAvg
=
"10"
# worker预留内存,用来判断master是否还有执行能力
workerReservedMemory
=
"1"
# api 配置
# api 服务端口
apiServerPort
=
"12345"
# api session 超时
apiServerSessionTimeout
=
"7200"
# api 上下文路径
apiServerContextPath
=
"/escheduler/"
# spring 最大文件大小
springMaxFileSize
=
"1024MB"
# spring 最大请求文件大小
springMaxRequestSize
=
"1024MB"
# api 最大post请求大小
apiMaxHttpPostSize
=
"5000000"
# 1,替换文件
echo
"1,替换文件"
sed
-i
${
txt
}
"s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://
${
mysqlHost
}
/
${
mysqlDb
}
?characterEncoding=UTF-8#g"
conf/dao/data_source.properties
sed
-i
${
txt
}
"s#spring.datasource.username.*#spring.datasource.username=
${
mysqlUserName
}
#g"
conf/dao/data_source.properties
sed
-i
${
txt
}
"s#spring.datasource.password.*#spring.datasource.password=
${
mysqlPassword
}
#g"
conf/dao/data_source.properties
sed
-i
${
txt
}
"s#org.quartz.dataSource.myDs.URL.*#org.quartz.dataSource.myDs.URL=jdbc:mysql://
${
mysqlHost
}
/
${
mysqlDb
}
?characterEncoding=UTF-8#g"
conf/quartz.properties
sed
-i
${
txt
}
"s#org.quartz.dataSource.myDs.user.*#org.quartz.dataSource.myDs.user=
${
mysqlUserName
}
#g"
conf/quartz.properties
sed
-i
${
txt
}
"s#org.quartz.dataSource.myDs.password.*#org.quartz.dataSource.myDs.password=
${
mysqlPassword
}
#g"
conf/quartz.properties
sed
-i
${
txt
}
"s#fs.defaultFS.*#fs.defaultFS=
${
namenodeFs
}
#g"
conf/common/hadoop/hadoop.properties
sed
-i
${
txt
}
"s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=
${
yarnHaIps
}
#g"
conf/common/hadoop/hadoop.properties
sed
-i
${
txt
}
"s#yarn.application.status.address.*#yarn.application.status.address=http://
${
singleYarnIp
}
:8088/ws/v1/cluster/apps/%s#g"
conf/common/hadoop/hadoop.properties
sed
-i
${
txt
}
"s#data.basedir.path.*#data.basedir.path=
${
programPath
}
#g"
conf/common/common.properties
sed
-i
${
txt
}
"s#data.download.basedir.path.*#data.download.basedir.path=
${
downloadPath
}
#g"
conf/common/common.properties
sed
-i
${
txt
}
"s#process.exec.basepath.*#process.exec.basepath=
${
execPath
}
#g"
conf/common/common.properties
sed
-i
${
txt
}
"s#data.store2hdfs.basepath.*#data.store2hdfs.basepath=
${
hdfsPath
}
#g"
conf/common/common.properties
sed
-i
${
txt
}
"s#hdfs.startup.state.*#hdfs.startup.state=
${
hdfsStartupSate
}
#g"
conf/common/common.properties
sed
-i
${
txt
}
"s#escheduler.env.path.*#escheduler.env.path=
${
shellEnvPath
}
#g"
conf/common/common.properties
sed
-i
${
txt
}
"s#escheduler.env.py.*#escheduler.env.py=
${
pythonEnvPath
}
#g"
conf/common/common.properties
sed
-i
${
txt
}
"s#resource.view.suffixs.*#resource.view.suffixs=
${
resSuffixs
}
#g"
conf/common/common.properties
sed
-i
${
txt
}
"s#development.state.*#development.state=
${
devState
}
#g"
conf/common/common.properties
sed
-i
${
txt
}
"s#zookeeper.quorum.*#zookeeper.quorum=
${
zkQuorum
}
#g"
conf/zookeeper.properties
sed
-i
${
txt
}
"s#zookeeper.escheduler.root.*#zookeeper.escheduler.root=
${
zkRoot
}
#g"
conf/zookeeper.properties
sed
-i
${
txt
}
"s#zookeeper.escheduler.dead.servers.*#zookeeper.escheduler.dead.servers=
${
zkDeadServers
}
#g"
conf/zookeeper.properties
sed
-i
${
txt
}
"s#zookeeper.escheduler.masters.*#zookeeper.escheduler.masters=
${
zkMasters
}
#g"
conf/zookeeper.properties
sed
-i
${
txt
}
"s#zookeeper.escheduler.workers.*#zookeeper.escheduler.workers=
${
zkWorkers
}
#g"
conf/zookeeper.properties
sed
-i
${
txt
}
"s#zookeeper.escheduler.lock.masters.*#zookeeper.escheduler.lock.masters=
${
mastersLock
}
#g"
conf/zookeeper.properties
sed
-i
${
txt
}
"s#zookeeper.escheduler.lock.workers.*#zookeeper.escheduler.lock.workers=
${
workersLock
}
#g"
conf/zookeeper.properties
sed
-i
${
txt
}
"s#zookeeper.escheduler.lock.failover.masters.*#zookeeper.escheduler.lock.failover.masters=
${
mastersFailover
}
#g"
conf/zookeeper.properties
sed
-i
${
txt
}
"s#zookeeper.escheduler.lock.failover.workers.*#zookeeper.escheduler.lock.failover.workers=
${
workersFailover
}
#g"
conf/zookeeper.properties
sed
-i
${
txt
}
"s#zookeeper.session.timeout.*#zookeeper.session.timeout=
${
zkSessionTimeout
}
#g"
conf/zookeeper.properties
sed
-i
${
txt
}
"s#zookeeper.connection.timeout.*#zookeeper.connection.timeout=
${
zkConnectionTimeout
}
#g"
conf/zookeeper.properties
sed
-i
${
txt
}
"s#zookeeper.retry.sleep.*#zookeeper.retry.sleep=
${
zkRetrySleep
}
#g"
conf/zookeeper.properties
sed
-i
${
txt
}
"s#zookeeper.retry.maxtime.*#zookeeper.retry.maxtime=
${
zkRetryMaxtime
}
#g"
conf/zookeeper.properties
sed
-i
${
txt
}
"s#master.exec.threads.*#master.exec.threads=
${
masterExecThreads
}
#g"
conf/master.properties
sed
-i
${
txt
}
"s#master.exec.task.number.*#master.exec.task.number=
${
masterExecTaskNum
}
#g"
conf/master.properties
sed
-i
${
txt
}
"s#master.heartbeat.interval.*#master.heartbeat.interval=
${
masterHeartbeatInterval
}
#g"
conf/master.properties
sed
-i
${
txt
}
"s#master.task.commit.retryTimes.*#master.task.commit.retryTimes=
${
masterTaskCommitRetryTimes
}
#g"
conf/master.properties
sed
-i
${
txt
}
"s#master.task.commit.interval.*#master.task.commit.interval=
${
masterTaskCommitInterval
}
#g"
conf/master.properties
sed
-i
${
txt
}
"s#master.max.cpuload.avg.*#master.max.cpuload.avg=
${
masterMaxCupLoadAvg
}
#g"
conf/master.properties
sed
-i
${
txt
}
"s#master.reserved.memory.*#master.reserved.memory=
${
masterReservedMemory
}
#g"
conf/master.properties
sed
-i
${
txt
}
"s#worker.exec.threads.*#worker.exec.threads=
${
workerExecThreads
}
#g"
conf/worker.properties
sed
-i
${
txt
}
"s#worker.heartbeat.interval.*#worker.heartbeat.interval=
${
workerHeartbeatInterval
}
#g"
conf/worker.properties
sed
-i
${
txt
}
"s#worker.fetch.task.num.*#worker.fetch.task.num=
${
workerFetchTaskNum
}
#g"
conf/worker.properties
sed
-i
${
txt
}
"s#worker.max.cpuload.avg.*#worker.max.cpuload.avg=
${
workerMaxCupLoadAvg
}
#g"
conf/worker.properties
sed
-i
${
txt
}
"s#worker.reserved.memory.*#worker.reserved.memory=
${
workerReservedMemory
}
#g"
conf/worker.properties
sed
-i
${
txt
}
"s#server.port.*#server.port=
${
apiServerPort
}
#g"
conf/application.properties
sed
-i
${
txt
}
"s#server.session.timeout.*#server.session.timeout=
${
apiServerSessionTimeout
}
#g"
conf/application.properties
sed
-i
${
txt
}
"s#server.context-path.*#server.context-path=
${
apiServerContextPath
}
#g"
conf/application.properties
sed
-i
${
txt
}
"s#spring.http.multipart.max-file-size.*#spring.http.multipart.max-file-size=
${
springMaxFileSize
}
#g"
conf/application.properties
sed
-i
${
txt
}
"s#spring.http.multipart.max-request-size.*#spring.http.multipart.max-request-size=
${
springMaxRequestSize
}
#g"
conf/application.properties
sed
-i
${
txt
}
"s#server.max-http-post-size.*#server.max-http-post-size=
${
apiMaxHttpPostSize
}
#g"
conf/application.properties
sed
-i
${
txt
}
"s#mail.protocol.*#mail.protocol=
${
mailProtocol
}
#g"
conf/alert.properties
sed
-i
${
txt
}
"s#mail.server.host.*#mail.server.host=
${
mailServerHost
}
#g"
conf/alert.properties
sed
-i
${
txt
}
"s#mail.server.port.*#mail.server.port=
${
mailServerPort
}
#g"
conf/alert.properties
sed
-i
${
txt
}
"s#mail.sender.*#mail.sender=
${
mailSender
}
#g"
conf/alert.properties
sed
-i
${
txt
}
"s#mail.passwd.*#mail.passwd=
${
mailPassword
}
#g"
conf/alert.properties
sed
-i
${
txt
}
"s#xls.file.path.*#xls.file.path=
${
xlsFilePath
}
#g"
conf/alert.properties
sed
-i
${
txt
}
"s#installPath.*#installPath=
${
installPath
}
#g"
conf/config/install_config.conf
sed
-i
${
txt
}
"s#deployUser.*#deployUser=
${
deployUser
}
#g"
conf/config/install_config.conf
sed
-i
${
txt
}
"s#ips.*#ips=
${
ips
}
#g"
conf/config/install_config.conf
sed
-i
${
txt
}
"s#masters.*#masters=
${
masters
}
#g"
conf/config/run_config.conf
sed
-i
${
txt
}
"s#workers.*#workers=
${
workers
}
#g"
conf/config/run_config.conf
sed
-i
${
txt
}
"s#alertServer.*#alertServer=
${
alertServer
}
#g"
conf/config/run_config.conf
sed
-i
${
txt
}
"s#apiServers.*#apiServers=
${
apiServers
}
#g"
conf/config/run_config.conf
conf/run.sh
已删除
100644 → 0
浏览文件 @
d9bf06d8
#!/bin/sh
# execute any pre-init scripts
for
i
in
/scripts/pre-init.d/
*
sh
do
if
[
-e
"
${
i
}
"
]
;
then
echo
"[i] pre-init.d - processing
$i
"
.
"
${
i
}
"
fi
done
if
[
-d
"/run/mysqld"
]
;
then
echo
"[i] mysqld already present, skipping creation"
chown
-R
mysql:mysql /run/mysqld
else
echo
"[i] mysqld not found, creating...."
mkdir
-p
/run/mysqld
chown
-R
mysql:mysql /run/mysqld
fi
if
[
-d
/var/lib/mysql/mysql
]
;
then
echo
"[i] MySQL directory already present, skipping creation"
chown
-R
mysql:mysql /var/lib/mysql
else
echo
"[i] MySQL data directory not found, creating initial DBs"
chown
-R
mysql:mysql /var/lib/mysql
mysql_install_db
--user
=
mysql
--ldata
=
/var/lib/mysql
>
/dev/null
if
[
"
$MYSQL_ROOT_PASSWORD
"
=
""
]
;
then
MYSQL_ROOT_PASSWORD
=
`
pwgen 16 1
`
echo
"[i] MySQL root Password:
$MYSQL_ROOT_PASSWORD
"
fi
MYSQL_DATABASE
=
"easyscheduler"
MYSQL_USER
=
"easyscheduler"
MYSQL_PASSWORD
=
"easyschedulereasyscheduler"
tfile
=
`
mktemp
`
if
[
!
-f
"
$tfile
"
]
;
then
return
1
fi
cat
<<
EOF
>
$tfile
USE mysql;
FLUSH PRIVILEGES ;
GRANT ALL ON *.* TO 'root'@'%' identified by '
$MYSQL_ROOT_PASSWORD
' WITH GRANT OPTION ;
GRANT ALL ON *.* TO 'root'@'localhost' identified by '
$MYSQL_ROOT_PASSWORD
' WITH GRANT OPTION ;
SET PASSWORD FOR 'root'@'localhost'=PASSWORD('
${
MYSQL_ROOT_PASSWORD
}
') ;
DROP DATABASE IF EXISTS test ;
FLUSH PRIVILEGES ;
EOF
if
[
"
$MYSQL_DATABASE
"
!=
""
]
;
then
echo
"[i] Creating database:
$MYSQL_DATABASE
"
echo
"CREATE DATABASE IF NOT EXISTS
\`
$MYSQL_DATABASE
\`
CHARACTER SET utf8 COLLATE utf8_general_ci;"
>>
$tfile
if
[
"
$MYSQL_USER
"
!=
""
]
;
then
echo
"[i] Creating user:
$MYSQL_USER
with password
$MYSQL_PASSWORD
"
echo
"GRANT ALL ON
\`
$MYSQL_DATABASE
\`
.* to '
$MYSQL_USER
'@'%' IDENTIFIED BY '
$MYSQL_PASSWORD
';"
>>
$tfile
fi
fi
/usr/bin/mysqld
--user
=
mysql
--bootstrap
--verbose
=
0
--skip-name-resolve
--skip-networking
=
0 <
$tfile
rm
-f
$tfile
for
f
in
/docker-entrypoint-initdb.d/
*
;
do
case
"
$f
"
in
*
.sql
)
echo
"
$0
: running
$f
"
;
/usr/bin/mysqld
--user
=
mysql
--bootstrap
--verbose
=
0
--skip-name-resolve
--skip-networking
=
0 <
"
$f
"
;
echo
;;
*
.sql.gz
)
echo
"
$0
: running
$f
"
;
gunzip
-c
"
$f
"
| /usr/bin/mysqld
--user
=
mysql
--bootstrap
--verbose
=
0
--skip-name-resolve
--skip-networking
=
0 <
"
$f
"
;
echo
;;
*
)
echo
"
$0
: ignoring or entrypoint initdb empty
$f
"
;;
esac
echo
done
echo
echo
'MySQL init process done. Ready for start up.'
echo
echo
"exec /usr/bin/mysqld --user=mysql --console --skip-name-resolve --skip-networking=0"
"
$@
"
fi
# execute any pre-exec scripts
for
i
in
/scripts/pre-exec.d/
*
sh
do
if
[
-e
"
${
i
}
"
]
;
then
echo
"[i] pre-exec.d - processing
$i
"
.
${
i
}
fi
done
mysql
-ueasyscheduler
-peasyschedulereasyscheduler
--one-database
easyscheduler
-h127
.0.0.1 < /opt/easyscheduler/sql/escheduler.sql
mysql
-ueasyscheduler
-peasyschedulereasyscheduler
--one-database
easyscheduler
-h127
.0.0.1 < /opt/easyscheduler/sql/quartz.sql
source
/etc/profile
zkServer.sh start
cd
/opt/easyscheduler
rm
-rf
/etc/nginx/conf.d/default.conf
sh ./bin/escheduler-daemon.sh start master-server
sh ./bin/escheduler-daemon.sh start worker-server
sh ./bin/escheduler-daemon.sh start api-server
sh ./bin/escheduler-daemon.sh start logger-server
sh ./bin/escheduler-daemon.sh start alert-server
nginx
-c
/etc/nginx/nginx.conf
exec
/usr/bin/mysqld
--user
=
mysql
--console
--skip-name-resolve
--skip-networking
=
0
$@
conf/zoo.cfg
已删除
100644 → 0
浏览文件 @
d9bf06d8
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
dataDir=/opt/zookeeper/data
dataLogDir=/opt/zookeeper/logs
escheduler-api/src/main/java/cn/escheduler/api/configuration/SwaggerConfig.java
浏览文件 @
20734d3c
...
...
@@ -48,7 +48,7 @@ public class SwaggerConfig implements WebMvcConfigurer {
private
ApiInfo
apiInfo
()
{
return
new
ApiInfoBuilder
().
title
(
"Easy Scheduler Api Docs"
).
description
(
"Easy Scheduler Api Docs"
)
.
version
(
"1.0.0"
).
build
();
.
build
();
}
...
...
escheduler-api/src/main/java/cn/escheduler/api/controller/AlertGroupController.java
浏览文件 @
20734d3c
...
...
@@ -21,6 +21,10 @@ import cn.escheduler.api.utils.Constants;
import
cn.escheduler.api.utils.Result
;
import
cn.escheduler.common.enums.AlertType
;
import
cn.escheduler.dao.model.User
;
import
io.swagger.annotations.Api
;
import
io.swagger.annotations.ApiImplicitParam
;
import
io.swagger.annotations.ApiImplicitParams
;
import
io.swagger.annotations.ApiOperation
;
import
org.slf4j.Logger
;
import
org.slf4j.LoggerFactory
;
import
org.springframework.beans.factory.annotation.Autowired
;
...
...
@@ -36,7 +40,7 @@ import static cn.escheduler.api.enums.Status.*;
/**
* alert group controller
*/
@Api
Ignore
@Api
(
tags
=
"ALERT_GROUP_TAG"
,
position
=
1
)
@RestController
@RequestMapping
(
"alert-group"
)
public
class
AlertGroupController
extends
BaseController
{
...
...
@@ -55,9 +59,15 @@ public class AlertGroupController extends BaseController{
* @param desc
* @return
*/
@ApiOperation
(
value
=
"createAlertgroup"
,
notes
=
"CREATE_ALERT_GROUP_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"groupName"
,
value
=
"GROUP_NAME"
,
required
=
true
,
dataType
=
"String"
),
@ApiImplicitParam
(
name
=
"groupType"
,
value
=
"GROUP_TYPE"
,
required
=
true
,
dataType
=
"AlertType"
),
@ApiImplicitParam
(
name
=
"desc"
,
value
=
"DESC"
,
dataType
=
"String"
)
})
@PostMapping
(
value
=
"/create"
)
@ResponseStatus
(
HttpStatus
.
CREATED
)
public
Result
createAlertgroup
(
@RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
createAlertgroup
(
@
ApiIgnore
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
@RequestParam
(
value
=
"groupName"
)
String
groupName
,
@RequestParam
(
value
=
"groupType"
)
AlertType
groupType
,
@RequestParam
(
value
=
"desc"
,
required
=
false
)
String
desc
)
{
...
...
@@ -77,9 +87,10 @@ public class AlertGroupController extends BaseController{
* @param loginUser
* @return
*/
@ApiOperation
(
value
=
"list"
,
notes
=
"QUERY_ALERT_GROUP_LIST_NOTES"
)
@GetMapping
(
value
=
"/list"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
list
(
@RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
)
{
public
Result
list
(
@
ApiIgnore
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
)
{
logger
.
info
(
"login user {}, query all alertGroup"
,
loginUser
.
getUserName
());
try
{
...
...
@@ -100,9 +111,15 @@ public class AlertGroupController extends BaseController{
* @param pageSize
* @return
*/
@ApiOperation
(
value
=
"queryTaskListPaging"
,
notes
=
"QUERY_TASK_INSTANCE_LIST_PAGING_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"searchVal"
,
value
=
"SEARCH_VAL"
,
type
=
"String"
),
@ApiImplicitParam
(
name
=
"pageNo"
,
value
=
"PAGE_NO"
,
dataType
=
"Int"
,
example
=
"1"
),
@ApiImplicitParam
(
name
=
"pageSize"
,
value
=
"PAGE_SIZE"
,
dataType
=
"Int"
,
example
=
"20"
)
})
@GetMapping
(
value
=
"/list-paging"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
listPaging
(
@RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
listPaging
(
@
ApiIgnore
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
@RequestParam
(
"pageNo"
)
Integer
pageNo
,
@RequestParam
(
value
=
"searchVal"
,
required
=
false
)
String
searchVal
,
@RequestParam
(
"pageSize"
)
Integer
pageSize
){
...
...
@@ -131,9 +148,16 @@ public class AlertGroupController extends BaseController{
* @param desc
* @return
*/
@ApiOperation
(
value
=
"updateAlertgroup"
,
notes
=
"UPDATE_ALERT_GROUP_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"id"
,
value
=
"ALERT_GROUP_ID"
,
required
=
true
,
dataType
=
"Int"
,
example
=
"100"
),
@ApiImplicitParam
(
name
=
"groupName"
,
value
=
"GROUP_NAME"
,
required
=
true
,
dataType
=
"String"
),
@ApiImplicitParam
(
name
=
"groupType"
,
value
=
"GROUP_TYPE"
,
required
=
true
,
dataType
=
"AlertType"
),
@ApiImplicitParam
(
name
=
"desc"
,
value
=
"DESC"
,
dataType
=
"String"
)
})
@PostMapping
(
value
=
"/update"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
updateAlertgroup
(
@RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
updateAlertgroup
(
@
ApiIgnore
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
@RequestParam
(
value
=
"id"
)
int
id
,
@RequestParam
(
value
=
"groupName"
)
String
groupName
,
@RequestParam
(
value
=
"groupType"
)
AlertType
groupType
,
...
...
@@ -156,9 +180,13 @@ public class AlertGroupController extends BaseController{
* @param id
* @return
*/
@ApiOperation
(
value
=
"delAlertgroupById"
,
notes
=
"DELETE_ALERT_GROUP_BY_ID_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"id"
,
value
=
"ALERT_GROUP_ID"
,
required
=
true
,
dataType
=
"Int"
,
example
=
"100"
)
})
@PostMapping
(
value
=
"/delete"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
delAlertgroupById
(
@RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
delAlertgroupById
(
@
ApiIgnore
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
@RequestParam
(
value
=
"id"
)
int
id
)
{
logger
.
info
(
"login user {}, delete AlertGroup, id: {},"
,
loginUser
.
getUserName
(),
id
);
try
{
...
...
@@ -178,9 +206,13 @@ public class AlertGroupController extends BaseController{
* @param groupName
* @return
*/
@ApiOperation
(
value
=
"verifyGroupName"
,
notes
=
"VERIFY_ALERT_GROUP_NAME_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"groupName"
,
value
=
"GROUP_NAME"
,
required
=
true
,
dataType
=
"String"
),
})
@GetMapping
(
value
=
"/verify-group-name"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
verifyGroupName
(
@RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
verifyGroupName
(
@
ApiIgnore
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
@RequestParam
(
value
=
"groupName"
)
String
groupName
)
{
logger
.
info
(
"login user {}, verfiy group name: {}"
,
...
...
@@ -196,9 +228,14 @@ public class AlertGroupController extends BaseController{
* @param userIds
* @return
*/
@ApiOperation
(
value
=
"grantUser"
,
notes
=
"GRANT_ALERT_GROUP_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"id"
,
value
=
"ALERT_GROUP_ID"
,
required
=
true
,
dataType
=
"Int"
,
example
=
"100"
),
@ApiImplicitParam
(
name
=
"userIds"
,
value
=
"USER_IDS"
,
required
=
true
,
dataType
=
"String"
)
})
@PostMapping
(
value
=
"/grant-user"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
grantUser
(
@RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
grantUser
(
@
ApiIgnore
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
@RequestParam
(
value
=
"alertgroupId"
)
int
alertgroupId
,
@RequestParam
(
value
=
"userIds"
)
String
userIds
)
{
logger
.
info
(
"login user {}, grant user, alertGroupId: {},userIds : {}"
,
loginUser
.
getUserName
(),
alertgroupId
,
userIds
);
...
...
escheduler-api/src/main/java/cn/escheduler/api/controller/DataAnalysisController.java
浏览文件 @
20734d3c
...
...
@@ -21,6 +21,10 @@ import cn.escheduler.api.service.DataAnalysisService;
import
cn.escheduler.api.utils.Constants
;
import
cn.escheduler.api.utils.Result
;
import
cn.escheduler.dao.model.User
;
import
io.swagger.annotations.Api
;
import
io.swagger.annotations.ApiImplicitParam
;
import
io.swagger.annotations.ApiImplicitParams
;
import
io.swagger.annotations.ApiOperation
;
import
org.slf4j.Logger
;
import
org.slf4j.LoggerFactory
;
import
org.springframework.beans.factory.annotation.Autowired
;
...
...
@@ -35,7 +39,7 @@ import static cn.escheduler.api.enums.Status.*;
/**
* data analysis controller
*/
@Api
Ignore
@Api
(
tags
=
"DATA_ANALYSIS_TAG"
,
position
=
1
)
@RestController
@RequestMapping
(
"projects/analysis"
)
public
class
DataAnalysisController
extends
BaseController
{
...
...
@@ -54,9 +58,15 @@ public class DataAnalysisController extends BaseController{
* @param projectId
* @return
*/
@ApiOperation
(
value
=
"countTaskState"
,
notes
=
"COUNT_TASK_STATE_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"startDate"
,
value
=
"START_DATE"
,
dataType
=
"String"
),
@ApiImplicitParam
(
name
=
"endDate"
,
value
=
"END_DATE"
,
dataType
=
"String"
),
@ApiImplicitParam
(
name
=
"projectId"
,
value
=
"PROJECT_ID"
,
dataType
=
"Int"
,
example
=
"100"
)
})
@GetMapping
(
value
=
"/task-state-count"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
countTaskState
(
@RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
countTaskState
(
@
ApiIgnore
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
@RequestParam
(
value
=
"startDate"
,
required
=
false
)
String
startDate
,
@RequestParam
(
value
=
"endDate"
,
required
=
false
)
String
endDate
,
@RequestParam
(
value
=
"projectId"
,
required
=
false
,
defaultValue
=
"0"
)
int
projectId
){
...
...
@@ -78,9 +88,15 @@ public class DataAnalysisController extends BaseController{
* @param projectId
* @return
*/
@ApiOperation
(
value
=
"countProcessInstanceState"
,
notes
=
"COUNT_PROCESS_INSTANCE_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"startDate"
,
value
=
"START_DATE"
,
dataType
=
"String"
),
@ApiImplicitParam
(
name
=
"endDate"
,
value
=
"END_DATE"
,
dataType
=
"String"
),
@ApiImplicitParam
(
name
=
"projectId"
,
value
=
"PROJECT_ID"
,
dataType
=
"Int"
,
example
=
"100"
)
})
@GetMapping
(
value
=
"/process-state-count"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
countProcessInstanceState
(
@RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
countProcessInstanceState
(
@
ApiIgnore
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
@RequestParam
(
value
=
"startDate"
,
required
=
false
)
String
startDate
,
@RequestParam
(
value
=
"endDate"
,
required
=
false
)
String
endDate
,
@RequestParam
(
value
=
"projectId"
,
required
=
false
,
defaultValue
=
"0"
)
int
projectId
){
...
...
@@ -102,9 +118,13 @@ public class DataAnalysisController extends BaseController{
* @param projectId
* @return
*/
@ApiOperation
(
value
=
"countDefinitionByUser"
,
notes
=
"COUNT_PROCESS_DEFINITION_BY_USER_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"projectId"
,
value
=
"PROJECT_ID"
,
dataType
=
"Int"
,
example
=
"100"
)
})
@GetMapping
(
value
=
"/define-user-count"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
countDefinitionByUser
(
@RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
countDefinitionByUser
(
@
ApiIgnore
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
@RequestParam
(
value
=
"projectId"
,
required
=
false
,
defaultValue
=
"0"
)
int
projectId
){
try
{
logger
.
info
(
"count process definition , user:{}, project id"
,
...
...
@@ -125,9 +145,15 @@ public class DataAnalysisController extends BaseController{
* @param projectId
* @return
*/
@ApiOperation
(
value
=
"countCommandState"
,
notes
=
"COUNT_COMMAND_STATE_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"startDate"
,
value
=
"START_DATE"
,
dataType
=
"String"
),
@ApiImplicitParam
(
name
=
"endDate"
,
value
=
"END_DATE"
,
dataType
=
"String"
),
@ApiImplicitParam
(
name
=
"projectId"
,
value
=
"PROJECT_ID"
,
dataType
=
"Int"
,
example
=
"100"
)
})
@GetMapping
(
value
=
"/command-state-count"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
countCommandState
(
@RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
countCommandState
(
@
ApiIgnore
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
@RequestParam
(
value
=
"startDate"
,
required
=
false
)
String
startDate
,
@RequestParam
(
value
=
"endDate"
,
required
=
false
)
String
endDate
,
@RequestParam
(
value
=
"projectId"
,
required
=
false
,
defaultValue
=
"0"
)
int
projectId
){
...
...
@@ -149,9 +175,15 @@ public class DataAnalysisController extends BaseController{
* @param projectId
* @return
*/
@ApiOperation
(
value
=
"countQueueState"
,
notes
=
"COUNT_QUEUE_STATE_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"startDate"
,
value
=
"START_DATE"
,
dataType
=
"String"
),
@ApiImplicitParam
(
name
=
"endDate"
,
value
=
"END_DATE"
,
dataType
=
"String"
),
@ApiImplicitParam
(
name
=
"projectId"
,
value
=
"PROJECT_ID"
,
dataType
=
"Int"
,
example
=
"100"
)
})
@GetMapping
(
value
=
"/queue-count"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
countQueueState
(
@RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
countQueueState
(
@
ApiIgnore
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
@RequestParam
(
value
=
"projectId"
,
required
=
false
,
defaultValue
=
"0"
)
int
projectId
){
try
{
logger
.
info
(
"count command state, user:{}, start date: {}, end date:{}, project id {}"
,
...
...
escheduler-api/src/main/java/cn/escheduler/api/controller/DataSourceController.java
浏览文件 @
20734d3c
...
...
@@ -41,7 +41,7 @@ import static cn.escheduler.api.enums.Status.*;
/**
* data source controller
*/
@Api
(
tags
=
"DATA_SOURCE_TAG"
,
position
=
1
)
@Api
(
tags
=
"DATA_SOURCE_TAG"
,
position
=
3
)
@RestController
@RequestMapping
(
"datasources"
)
public
class
DataSourceController
extends
BaseController
{
...
...
escheduler-api/src/main/java/cn/escheduler/api/controller/TaskInstanceController.java
浏览文件 @
20734d3c
...
...
@@ -63,8 +63,8 @@ public class TaskInstanceController extends BaseController{
@ApiImplicitParam
(
name
=
"host"
,
value
=
"HOST"
,
type
=
"String"
),
@ApiImplicitParam
(
name
=
"startDate"
,
value
=
"START_DATE"
,
type
=
"String"
),
@ApiImplicitParam
(
name
=
"endDate"
,
value
=
"END_DATE"
,
type
=
"String"
),
@ApiImplicitParam
(
name
=
"pageNo"
,
value
=
"PAGE_NO"
,
dataType
=
"Int"
,
example
=
"1
00
"
),
@ApiImplicitParam
(
name
=
"pageSize"
,
value
=
"PAGE_SIZE"
,
dataType
=
"Int"
,
example
=
"
10
0"
)
@ApiImplicitParam
(
name
=
"pageNo"
,
value
=
"PAGE_NO"
,
dataType
=
"Int"
,
example
=
"1"
),
@ApiImplicitParam
(
name
=
"pageSize"
,
value
=
"PAGE_SIZE"
,
dataType
=
"Int"
,
example
=
"
2
0"
)
})
@GetMapping
(
"/list-paging"
)
@ResponseStatus
(
HttpStatus
.
OK
)
...
...
escheduler-api/src/main/java/cn/escheduler/api/controller/WorkerGroupController.java
浏览文件 @
20734d3c
...
...
@@ -19,9 +19,12 @@ package cn.escheduler.api.controller;
import
cn.escheduler.api.enums.Status
;
import
cn.escheduler.api.service.WorkerGroupService
;
import
cn.escheduler.api.utils.Constants
;
import
cn.escheduler.api.utils.Result
;
import
cn.escheduler.dao.model.User
;
import
io.swagger.annotations.Api
;
import
io.swagger.annotations.ApiImplicitParam
;
import
io.swagger.annotations.ApiImplicitParams
;
import
io.swagger.annotations.ApiOperation
;
import
org.slf4j.Logger
;
import
org.slf4j.LoggerFactory
;
import
org.springframework.beans.factory.annotation.Autowired
;
...
...
@@ -31,10 +34,12 @@ import springfox.documentation.annotations.ApiIgnore;
import
java.util.Map
;
import
static
cn
.
escheduler
.
api
.
utils
.
Constants
.
SESSION_USER
;
/**
* worker group controller
*/
@Api
Ignore
@Api
(
tags
=
"WORKER_GROUP_TAG"
,
position
=
1
)
@RestController
@RequestMapping
(
"/worker-group"
)
public
class
WorkerGroupController
extends
BaseController
{
...
...
@@ -54,9 +59,15 @@ public class WorkerGroupController extends BaseController{
* @param ipList
* @return
*/
@ApiOperation
(
value
=
"saveWorkerGroup"
,
notes
=
"CREATE_WORKER_GROUP_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"id"
,
value
=
"WORKER_GROUP_ID"
,
dataType
=
"Int"
,
example
=
"10"
,
defaultValue
=
"0"
),
@ApiImplicitParam
(
name
=
"name"
,
value
=
"WORKER_GROUP_NAME"
,
required
=
true
,
dataType
=
"String"
),
@ApiImplicitParam
(
name
=
"ipList"
,
value
=
"WORKER_IP_LIST"
,
required
=
true
,
dataType
=
"String"
)
})
@PostMapping
(
value
=
"/save"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
saveWorkerGroup
(
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
saveWorkerGroup
(
@
ApiIgnore
@RequestAttribute
(
value
=
SESSION_USER
)
User
loginUser
,
@RequestParam
(
value
=
"id"
,
required
=
false
,
defaultValue
=
"0"
)
int
id
,
@RequestParam
(
value
=
"name"
)
String
name
,
@RequestParam
(
value
=
"ipList"
)
String
ipList
...
...
@@ -81,9 +92,15 @@ public class WorkerGroupController extends BaseController{
* @param pageSize
* @return
*/
@ApiOperation
(
value
=
"queryAllWorkerGroupsPaging"
,
notes
=
"QUERY_WORKER_GROUP_PAGING_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"id"
,
value
=
"WORKER_GROUP_ID"
,
dataType
=
"Int"
,
example
=
"10"
,
defaultValue
=
"0"
),
@ApiImplicitParam
(
name
=
"name"
,
value
=
"WORKER_GROUP_NAME"
,
required
=
true
,
dataType
=
"String"
),
@ApiImplicitParam
(
name
=
"ipList"
,
value
=
"WORKER_IP_LIST"
,
required
=
true
,
dataType
=
"String"
)
})
@GetMapping
(
value
=
"/list-paging"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
queryAllWorkerGroupsPaging
(
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
queryAllWorkerGroupsPaging
(
@
ApiIgnore
@RequestAttribute
(
value
=
SESSION_USER
)
User
loginUser
,
@RequestParam
(
"pageNo"
)
Integer
pageNo
,
@RequestParam
(
value
=
"searchVal"
,
required
=
false
)
String
searchVal
,
@RequestParam
(
"pageSize"
)
Integer
pageSize
...
...
@@ -105,9 +122,10 @@ public class WorkerGroupController extends BaseController{
* @param loginUser
* @return
*/
@ApiOperation
(
value
=
"queryAllWorkerGroups"
,
notes
=
"QUERY_WORKER_GROUP_LIST_NOTES"
)
@GetMapping
(
value
=
"/all-groups"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
queryAllWorkerGroups
(
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
public
Result
queryAllWorkerGroups
(
@
ApiIgnore
@RequestAttribute
(
value
=
SESSION_USER
)
User
loginUser
)
{
logger
.
info
(
"query all worker group: login user {}"
,
loginUser
.
getUserName
()
);
...
...
@@ -127,9 +145,14 @@ public class WorkerGroupController extends BaseController{
* @param id
* @return
*/
@ApiOperation
(
value
=
"deleteById"
,
notes
=
"DELETE_WORKER_GROUP_BY_ID_NOTES"
)
@ApiImplicitParams
({
@ApiImplicitParam
(
name
=
"id"
,
value
=
"WORKER_GROUP_ID"
,
required
=
true
,
dataType
=
"Int"
,
example
=
"10"
),
})
@GetMapping
(
value
=
"/delete-by-id"
)
@ResponseStatus
(
HttpStatus
.
OK
)
public
Result
deleteById
(
@
RequestAttribute
(
value
=
Constants
.
SESSION_USER
)
User
loginUser
,
public
Result
deleteById
(
@
ApiIgnore
@RequestAttribute
(
value
=
SESSION_USER
)
User
loginUser
,
@RequestParam
(
"id"
)
Integer
id
)
{
logger
.
info
(
"delete worker group: login user {}, id:{} "
,
...
...
escheduler-api/src/main/resources/messages.properties
→
escheduler-api/src/main/resources/
i18n/
messages.properties
浏览文件 @
20734d3c
QUERY_SCHEDULE_LIST_NOTES
=
query schedule list
DESC
=
description
GROUP_NAME
=
group name
GROUP_TYPE
=
group type
QUERY_ALERT_GROUP_LIST_NOTES
=
query alert group list
UPDATE_ALERT_GROUP_NOTES
=
update alert group
DELETE_ALERT_GROUP_BY_ID_NOTES
=
delete alert group by id
VERIFY_ALERT_GROUP_NAME_NOTES
=
verify alert group name, check alert group exist or not
GRANT_ALERT_GROUP_NOTES
=
grant alert group
USER_IDS
=
user id list
ALERT_GROUP_TAG
=
alert group related operation
CREATE_ALERT_GROUP_NOTES
=
create alert group
WORKER_GROUP_TAG
=
worker group related operation
SAVE_WORKER_GROUP_NOTES
=
create worker group
WORKER_GROUP_NAME
=
worker group name
WORKER_IP_LIST
=
worker ip list, eg. 192.168.1.1,192.168.1.2
QUERY_WORKER_GROUP_PAGING_NOTES
=
query worker group paging
QUERY_WORKER_GROUP_LIST_NOTES
=
query worker group list
DELETE_WORKER_GROUP_BY_ID_NOTES
=
delete worker group by id
DATA_ANALYSIS_TAG
=
analysis related operation of task state
COUNT_TASK_STATE_NOTES
=
count task state
COUNT_PROCESS_INSTANCE_NOTES
=
count process instance state
COUNT_PROCESS_DEFINITION_BY_USER_NOTES
=
count process definition by user
COUNT_COMMAND_STATE_NOTES
=
count command state
COUNT_QUEUE_STATE_NOTES
=
count the running status of the task in the queue
\
ACCESS_TOKEN_TAG
=
access token related operation
MONITOR_TAG
=
monitor related operation
MASTER_LIST_NOTES
=
master server list
...
...
@@ -18,53 +43,53 @@ DB_TYPE=database type
DATA_SOURCE_HOST
=
DATA SOURCE HOST
DATA_SOURCE_PORT
=
data source port
DATABASE_NAME
=
database name
QUEUE_TAG
=
QUERY_QUEUE_LIST_NOTES
=
QUERY_QUEUE_LIST_PAGING_NOTES
=
CREATE_QUEUE_NOTES
=
YARN_QUEUE_NAME
=
QUEUE_ID
=
TENANT_DESC
=
QUERY_TENANT_LIST_PAGING_NOTES
=
QUERY_TENANT_LIST_NOTES
=
UPDATE_TENANT_NOTES
=
DELETE_TENANT_NOTES
=
RESOURCES_TAG
=
CREATE_RESOURCE_NOTES
=
RESOURCE_TYPE
=
RESOURCE_NAME
=
RESOURCE_DESC
=
RESOURCE_FILE
=
RESOURCE_ID
=
QUERY_RESOURCE_LIST_NOTES
=
DELETE_RESOURCE_BY_ID_NOTES
=
VIEW_RESOURCE_BY_ID_NOTES
=
ONLINE_CREATE_RESOURCE_NOTES
=
SUFFIX
=
CONTENT
=
UPDATE_RESOURCE_NOTES
=
DOWNLOAD_RESOURCE_NOTES
=
CREATE_UDF_FUNCTION_NOTES
=
UDF_TYPE
=
FUNC_NAME
=
CLASS_NAME
=
ARG_TYPES
=
UDF_DESC
=
VIEW_UDF_FUNCTION_NOTES
=
UPDATE_UDF_FUNCTION_NOTES
=
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES
=
VERIFY_UDF_FUNCTION_NAME_NOTES
=
DELETE_UDF_FUNCTION_NOTES
=
AUTHORIZED_FILE_NOTES
=
UNAUTHORIZED_FILE_NOTES
=
AUTHORIZED_UDF_FUNC_NOTES
=
UNAUTHORIZED_UDF_FUNC_NOTES
=
VERIFY_QUEUE_NOTES
=
TENANT_TAG
=
CREATE_TENANT_NOTES
=
TENANT_CODE
=
TENANT_NAME
=
QUEUE_NAME
=
QUEUE_TAG
=
queue related operation
QUERY_QUEUE_LIST_NOTES
=
query queue list
QUERY_QUEUE_LIST_PAGING_NOTES
=
query queue list paging
CREATE_QUEUE_NOTES
=
create queue
YARN_QUEUE_NAME
=
yarn(hadoop) queue name
QUEUE_ID
=
queue id
TENANT_DESC
=
tenant desc
QUERY_TENANT_LIST_PAGING_NOTES
=
query tenant list paging
QUERY_TENANT_LIST_NOTES
=
query tenant list
UPDATE_TENANT_NOTES
=
update tenant
DELETE_TENANT_NOTES
=
delete tenant
RESOURCES_TAG
=
resource center related operation
CREATE_RESOURCE_NOTES
=
create resource
RESOURCE_TYPE
=
resource file type
RESOURCE_NAME
=
resource name
RESOURCE_DESC
=
resource file desc
RESOURCE_FILE
=
resource file
RESOURCE_ID
=
resource id
QUERY_RESOURCE_LIST_NOTES
=
query resource list
DELETE_RESOURCE_BY_ID_NOTES
=
delete resource by id
VIEW_RESOURCE_BY_ID_NOTES
=
view resource by id
ONLINE_CREATE_RESOURCE_NOTES
=
online create resource
SUFFIX
=
resource file suffix
CONTENT
=
resource file content
UPDATE_RESOURCE_NOTES
=
edit resource file online
DOWNLOAD_RESOURCE_NOTES
=
download resource file
CREATE_UDF_FUNCTION_NOTES
=
create udf function
UDF_TYPE
=
UDF type
FUNC_NAME
=
function name
CLASS_NAME
=
package and class name
ARG_TYPES
=
arguments
UDF_DESC
=
udf desc
VIEW_UDF_FUNCTION_NOTES
=
view udf function
UPDATE_UDF_FUNCTION_NOTES
=
update udf function
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES
=
query udf function list paging
VERIFY_UDF_FUNCTION_NAME_NOTES
=
verify udf function name
DELETE_UDF_FUNCTION_NOTES
=
delete udf function
AUTHORIZED_FILE_NOTES
=
authorized file
UNAUTHORIZED_FILE_NOTES
=
unauthorized file
AUTHORIZED_UDF_FUNC_NOTES
=
authorized udf func
UNAUTHORIZED_UDF_FUNC_NOTES
=
unauthorized udf func
VERIFY_QUEUE_NOTES
=
verify queue
TENANT_TAG
=
tenant related operation
CREATE_TENANT_NOTES
=
create tenant
TENANT_CODE
=
tenant code
TENANT_NAME
=
tenant name
QUEUE_NAME
=
queue name
PASSWORD
=
password
DATA_SOURCE_OTHER
=
jdbc connection params, format:{"key1":"value1",...}
PROJECT_TAG
=
project related operation
...
...
@@ -178,14 +203,14 @@ HOST=ip address of running task
START_DATE
=
start date
END_DATE
=
end date
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES
=
query task list by process instance id
UPDATE_DATA_SOURCE_NOTES
=
DATA_SOURCE_ID
=
QUERY_DATA_SOURCE_NOTES
=
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES
=
QUERY_DATA_SOURCE_LIST_PAGING_NOTES
=
CONNECT_DATA_SOURCE_NOTES
=
CONNECT_DATA_SOURCE_TEST_NOTES
=
DELETE_DATA_SOURCE_NOTES
=
VERIFY_DATA_SOURCE_NOTES
=
UNAUTHORIZED_DATA_SOURCE_NOTES
=
AUTHORIZED_DATA_SOURCE_NOTES
=
UPDATE_DATA_SOURCE_NOTES
=
update data source
DATA_SOURCE_ID
=
DATA SOURCE ID
QUERY_DATA_SOURCE_NOTES
=
query data source by id
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES
=
query data source list by database type
QUERY_DATA_SOURCE_LIST_PAGING_NOTES
=
query data source list paging
CONNECT_DATA_SOURCE_NOTES
=
CONNECT DATA SOURCE
CONNECT_DATA_SOURCE_TEST_NOTES
=
connect data source test
DELETE_DATA_SOURCE_NOTES
=
delete data source
VERIFY_DATA_SOURCE_NOTES
=
verify data source
UNAUTHORIZED_DATA_SOURCE_NOTES
=
unauthorized data source
AUTHORIZED_DATA_SOURCE_NOTES
=
authorized data source
escheduler-api/src/main/resources/messages_en_US.properties
→
escheduler-api/src/main/resources/
i18n/
messages_en_US.properties
浏览文件 @
20734d3c
QUERY_SCHEDULE_LIST_NOTES
=
query schedule list
DESC
=
description
GROUP_NAME
=
group name
GROUP_TYPE
=
group type
QUERY_ALERT_GROUP_LIST_NOTES
=
query alert group list
UPDATE_ALERT_GROUP_NOTES
=
update alert group
DELETE_ALERT_GROUP_BY_ID_NOTES
=
delete alert group by id
VERIFY_ALERT_GROUP_NAME_NOTES
=
verify alert group name, check alert group exist or not
GRANT_ALERT_GROUP_NOTES
=
grant alert group
USER_IDS
=
user id list
ALERT_GROUP_TAG
=
alert group related operation
CREATE_ALERT_GROUP_NOTES
=
create alert group
WORKER_GROUP_TAG
=
worker group related operation
SAVE_WORKER_GROUP_NOTES
=
create worker group
WORKER_GROUP_NAME
=
worker group name
WORKER_IP_LIST
=
worker ip list, eg. 192.168.1.1,192.168.1.2
QUERY_WORKER_GROUP_PAGING_NOTES
=
query worker group paging
QUERY_WORKER_GROUP_LIST_NOTES
=
query worker group list
DELETE_WORKER_GROUP_BY_ID_NOTES
=
delete worker group by id
DATA_ANALYSIS_TAG
=
analysis related operation of task state
COUNT_TASK_STATE_NOTES
=
count task state
COUNT_PROCESS_INSTANCE_NOTES
=
count process instance state
COUNT_PROCESS_DEFINITION_BY_USER_NOTES
=
count process definition by user
COUNT_COMMAND_STATE_NOTES
=
count command state
COUNT_QUEUE_STATE_NOTES
=
count the running status of the task in the queue
\
ACCESS_TOKEN_TAG
=
access token related operation
MONITOR_TAG
=
monitor related operation
MASTER_LIST_NOTES
=
master server list
...
...
escheduler-api/src/main/resources/messages_zh_CN.properties
→
escheduler-api/src/main/resources/
i18n/
messages_zh_CN.properties
浏览文件 @
20734d3c
QUERY_SCHEDULE_LIST_NOTES
=
查询定时列表
DESC
=
备注(描述)
GROUP_NAME
=
组名称
GROUP_TYPE
=
组类型
QUERY_ALERT_GROUP_LIST_NOTES
=
告警组列表
\
UPDATE_ALERT_GROUP_NOTES
=
编辑(更新)告警组
DELETE_ALERT_GROUP_BY_ID_NOTES
=
删除告警组通过ID
VERIFY_ALERT_GROUP_NAME_NOTES
=
检查告警组是否存在
GRANT_ALERT_GROUP_NOTES
=
授权告警组
USER_IDS
=
用户ID列表
ALERT_GROUP_TAG
=
告警组相关操作
WORKER_GROUP_TAG
=
Worker分组管理
SAVE_WORKER_GROUP_NOTES
=
创建Worker分组
\
WORKER_GROUP_NAME
=
Worker分组名称
WORKER_IP_LIST
=
Worker ip列表,注意:多个IP地址以逗号分割
\
QUERY_WORKER_GROUP_PAGING_NOTES
=
Worker分组管理
QUERY_WORKER_GROUP_LIST_NOTES
=
查询worker group分组
DELETE_WORKER_GROUP_BY_ID_NOTES
=
删除worker group通过ID
DATA_ANALYSIS_TAG
=
任务状态分析相关操作
COUNT_TASK_STATE_NOTES
=
任务状态统计
COUNT_PROCESS_INSTANCE_NOTES
=
统计流程实例状态
COUNT_PROCESS_DEFINITION_BY_USER_NOTES
=
统计用户创建的流程定义
COUNT_COMMAND_STATE_NOTES
=
统计命令状态
COUNT_QUEUE_STATE_NOTES
=
统计队列里任务状态
ACCESS_TOKEN_TAG
=
access token相关操作,需要先登录
MONITOR_TAG
=
监控相关操作
MASTER_LIST_NOTES
=
master服务列表
...
...
escheduler-api/src/main/resources/logback.xml
0 → 100644
浏览文件 @
20734d3c
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration
scan=
"true"
scanPeriod=
"120 seconds"
>
<logger
name=
"org.apache.zookeeper"
level=
"WARN"
/>
<logger
name=
"org.apache.hbase"
level=
"WARN"
/>
<logger
name=
"org.apache.hadoop"
level=
"WARN"
/>
<property
name=
"log.base"
value=
"logs"
/>
<appender
name=
"STDOUT"
class=
"ch.qos.logback.core.ConsoleAppender"
>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>
UTF-8
</charset>
</encoder>
</appender>
<appender
name=
"APISERVERLOGFILE"
class=
"ch.qos.logback.core.rolling.RollingFileAppender"
>
<!-- Log level filter -->
<filter
class=
"ch.qos.logback.classic.filter.ThresholdFilter"
>
<level>
INFO
</level>
</filter>
<file>
${log.base}/escheduler-api-server.log
</file>
<rollingPolicy
class=
"ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy"
>
<fileNamePattern>
${log.base}/escheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log
</fileNamePattern>
<maxHistory>
168
</maxHistory>
<maxFileSize>
64MB
</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>
UTF-8
</charset>
</encoder>
</appender>
<root
level=
"INFO"
>
<appender-ref
ref=
"STDOUT"
/>
</root>
</configuration>
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录