提交 6fed9a24 编写于 作者: H huyuanming

Merge remote-tracking branch 'upstream/branch-1.0.2' into branch-1.0.2

#Maintin by jimmy
#Email: zhengge2012@gmail.com
FROM anapsix/alpine-java:8_jdk
WORKDIR /tmp
RUN wget http://archive.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.tar.gz
RUN tar -zxvf apache-maven-3.6.1-bin.tar.gz && rm apache-maven-3.6.1-bin.tar.gz
RUN mv apache-maven-3.6.1 /usr/lib/mvn
RUN chown -R root:root /usr/lib/mvn
RUN ln -s /usr/lib/mvn/bin/mvn /usr/bin/mvn
RUN wget https://archive.apache.org/dist/zookeeper/zookeeper-3.4.6/zookeeper-3.4.6.tar.gz
RUN tar -zxvf zookeeper-3.4.6.tar.gz
RUN mv zookeeper-3.4.6 /opt/zookeeper
RUN rm -rf zookeeper-3.4.6.tar.gz
RUN echo "export ZOOKEEPER_HOME=/opt/zookeeper" >>/etc/profile
RUN echo "export PATH=$PATH:$ZOOKEEPER_HOME/bin" >>/etc/profile
ADD conf/zoo.cfg /opt/zookeeper/conf/zoo.cfg
#RUN source /etc/profile
#RUN zkServer.sh start
RUN apk add --no-cache git npm nginx mariadb mariadb-client mariadb-server-utils pwgen
WORKDIR /opt
RUN git clone https://github.com/analysys/EasyScheduler.git
WORKDIR /opt/EasyScheduler
RUN mvn -U clean package assembly:assembly -Dmaven.test.skip=true
RUN mv /opt/EasyScheduler/target/escheduler-1.0.0-SNAPSHOT /opt/easyscheduler
WORKDIR /opt/EasyScheduler/escheduler-ui
RUN npm install
RUN npm audit fix
RUN npm run build
RUN mkdir -p /opt/escheduler/front/server
RUN cp -rfv dist/* /opt/escheduler/front/server
WORKDIR /
RUN rm -rf /opt/EasyScheduler
#configure mysql server https://github.com/yobasystems/alpine-mariadb/tree/master/alpine-mariadb-amd64
ADD conf/run.sh /scripts/run.sh
RUN mkdir /docker-entrypoint-initdb.d && \
mkdir /scripts/pre-exec.d && \
mkdir /scripts/pre-init.d && \
chmod -R 755 /scripts
RUN rm -rf /var/cache/apk/*
EXPOSE 8888
ENTRYPOINT ["/scripts/run.sh"]
Easy Scheduler
Easy Scheduler
============
[![License](https://img.shields.io/badge/license-Apache%202-4EB1BA.svg)](https://www.apache.org/licenses/LICENSE-2.0.html)
......@@ -43,7 +43,9 @@ Easy Scheduler
- [**升级文档**](https://analysys.github.io/easyscheduler_docs_cn/升级文档.html?_blank "升级文档")
- <a href="http://52.82.13.76:8888" target="_blank">我要体验</a> 普通用户登录:demo/demo123
- <a href="http://52.82.13.76:8888" target="_blank">我要体验</a>
- [**FAQ**](https://analysys.github.io/easyscheduler_docs_cn/FAQ.html?_blank "FAQ")
更多文档请参考 <a href="https://analysys.github.io/easyscheduler_docs_cn/" target="_blank">easyscheduler中文在线文档</a>
......
server {
listen 8888;# 访问端口
server_name localhost;
#charset koi8-r;
#access_log /var/log/nginx/host.access.log main;
location / {
root /opt/escheduler/front/server; # 静态文件目录
index index.html index.html;
}
location /escheduler {
proxy_pass http://127.0.0.1:12345; # 接口地址
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header x_real_ipP $remote_addr;
proxy_set_header remote_addr $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_connect_timeout 4s;
proxy_read_timeout 30s;
proxy_send_timeout 12s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
#!/bin/sh
workDir=`/opt/easyscheduler`
workDir=`cd ${workDir};pwd`
#To be compatible with MacOS and Linux
txt=""
if [[ "$OSTYPE" == "darwin"* ]]; then
# Mac OSX
txt="''"
elif [[ "$OSTYPE" == "linux-gnu" ]]; then
# linux
txt=""
elif [[ "$OSTYPE" == "cygwin" ]]; then
# POSIX compatibility layer and Linux environment emulation for Windows
echo "Easy Scheduler not support Windows operating system"
exit 1
elif [[ "$OSTYPE" == "msys" ]]; then
# Lightweight shell and GNU utilities compiled for Windows (part of MinGW)
echo "Easy Scheduler not support Windows operating system"
exit 1
elif [[ "$OSTYPE" == "win32" ]]; then
echo "Easy Scheduler not support Windows operating system"
exit 1
elif [[ "$OSTYPE" == "freebsd"* ]]; then
# ...
txt=""
else
# Unknown.
echo "Operating system unknown, please tell us(submit issue) for better service"
exit 1
fi
source ${workDir}/conf/config/run_config.conf
source ${workDir}/conf/config/install_config.conf
# mysql配置
# mysql 地址,端口
mysqlHost="127.0.0.1:3306"
# mysql 数据库名称
mysqlDb="easyscheduler"
# mysql 用户名
mysqlUserName="easyscheduler"
# mysql 密码
mysqlPassword="easyschedulereasyscheduler"
# conf/config/install_config.conf配置
# 安装路径,不要当前路径(pwd)一样
installPath="/opt/easyscheduler"
# 部署用户
deployUser="escheduler"
# zk集群
zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
# 安装hosts
ips="ark0,ark1,ark2,ark3,ark4"
# conf/config/run_config.conf配置
# 运行Master的机器
masters="ark0,ark1"
# 运行Worker的机器
workers="ark2,ark3,ark4"
# 运行Alert的机器
alertServer="ark3"
# 运行Api的机器
apiServers="ark1"
# alert配置
# 邮件协议
mailProtocol="SMTP"
# 邮件服务host
mailServerHost="smtp.exmail.qq.com"
# 邮件服务端口
mailServerPort="25"
# 发送人
mailSender="xxxxxxxxxx"
# 发送人密码
mailPassword="xxxxxxxxxx"
# 下载Excel路径
xlsFilePath="/tmp/xls"
# hadoop 配置
# 是否启动hdfs,如果启动则为true,需要配置以下hadoop相关参数;
# 不启动设置为false,如果为false,以下配置不需要修改
hdfsStartupSate="false"
# namenode地址,支持HA,需要将core-site.xml和hdfs-site.xml放到conf目录下
namenodeFs="hdfs://mycluster:8020"
# resourcemanager HA配置,如果是单resourcemanager,这里为空即可
yarnHaIps="192.168.xx.xx,192.168.xx.xx"
# 如果是单 resourcemanager,只需要配置一个主机名称,如果是resourcemanager HA,则默认配置就好
singleYarnIp="ark1"
# hdfs根路径,根路径的owner必须是部署用户
hdfsPath="/escheduler"
# common 配置
# 程序路径
programPath="/tmp/escheduler"
#下载路径
downloadPath="/tmp/escheduler/download"
# 任务执行路径
execPath="/tmp/escheduler/exec"
# SHELL环境变量路径
shellEnvPath="$installPath/conf/env/.escheduler_env.sh"
# Python换将变量路径
pythonEnvPath="$installPath/conf/env/escheduler_env.py"
# 资源文件的后缀
resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml"
# 开发状态,如果是true,对于SHELL脚本可以在execPath目录下查看封装后的SHELL脚本,如果是false则执行完成直接删除
devState="true"
# zk 配置
# zk根目录
zkRoot="/escheduler"
# 用来记录挂掉机器的zk目录
zkDeadServers="/escheduler/dead-servers"
# masters目录
zkMasters="/escheduler/masters"
# workers目录
zkWorkers="/escheduler/workers"
# zk master分布式锁
mastersLock="/escheduler/lock/masters"
# zk worker分布式锁
workersLock="/escheduler/lock/workers"
# zk master容错分布式锁
mastersFailover="/escheduler/lock/failover/masters"
# zk worker容错分布式锁
workersFailover="/escheduler/lock/failover/masters"
# zk session 超时
zkSessionTimeout="300"
# zk 连接超时
zkConnectionTimeout="300"
# zk 重试间隔
zkRetrySleep="100"
# zk重试最大次数
zkRetryMaxtime="5"
# master 配置
# master执行线程最大数,流程实例的最大并行度
masterExecThreads="100"
# master任务执行线程最大数,每一个流程实例的最大并行度
masterExecTaskNum="20"
# master心跳间隔
masterHeartbeatInterval="10"
# master任务提交重试次数
masterTaskCommitRetryTimes="5"
# master任务提交重试时间间隔
masterTaskCommitInterval="100"
# master最大cpu平均负载,用来判断master是否还有执行能力
masterMaxCupLoadAvg="10"
# master预留内存,用来判断master是否还有执行能力
masterReservedMemory="1"
# worker 配置
# worker执行线程
workerExecThreads="100"
# worker心跳间隔
workerHeartbeatInterval="10"
# worker一次抓取任务数
workerFetchTaskNum="10"
# worker最大cpu平均负载,用来判断master是否还有执行能力
workerMaxCupLoadAvg="10"
# worker预留内存,用来判断master是否还有执行能力
workerReservedMemory="1"
# api 配置
# api 服务端口
apiServerPort="12345"
# api session 超时
apiServerSessionTimeout="7200"
# api 上下文路径
apiServerContextPath="/escheduler/"
# spring 最大文件大小
springMaxFileSize="1024MB"
# spring 最大请求文件大小
springMaxRequestSize="1024MB"
# api 最大post请求大小
apiMaxHttpPostSize="5000000"
# 1,替换文件
echo "1,替换文件"
sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/dao/data_source.properties
sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${mysqlUserName}#g" conf/dao/data_source.properties
sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${mysqlPassword}#g" conf/dao/data_source.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.URL.*#org.quartz.dataSource.myDs.URL=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.user.*#org.quartz.dataSource.myDs.user=${mysqlUserName}#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.password.*#org.quartz.dataSource.myDs.password=${mysqlPassword}#g" conf/quartz.properties
sed -i ${txt} "s#fs.defaultFS.*#fs.defaultFS=${namenodeFs}#g" conf/common/hadoop/hadoop.properties
sed -i ${txt} "s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=${yarnHaIps}#g" conf/common/hadoop/hadoop.properties
sed -i ${txt} "s#yarn.application.status.address.*#yarn.application.status.address=http://${singleYarnIp}:8088/ws/v1/cluster/apps/%s#g" conf/common/hadoop/hadoop.properties
sed -i ${txt} "s#data.basedir.path.*#data.basedir.path=${programPath}#g" conf/common/common.properties
sed -i ${txt} "s#data.download.basedir.path.*#data.download.basedir.path=${downloadPath}#g" conf/common/common.properties
sed -i ${txt} "s#process.exec.basepath.*#process.exec.basepath=${execPath}#g" conf/common/common.properties
sed -i ${txt} "s#data.store2hdfs.basepath.*#data.store2hdfs.basepath=${hdfsPath}#g" conf/common/common.properties
sed -i ${txt} "s#hdfs.startup.state.*#hdfs.startup.state=${hdfsStartupSate}#g" conf/common/common.properties
sed -i ${txt} "s#escheduler.env.path.*#escheduler.env.path=${shellEnvPath}#g" conf/common/common.properties
sed -i ${txt} "s#escheduler.env.py.*#escheduler.env.py=${pythonEnvPath}#g" conf/common/common.properties
sed -i ${txt} "s#resource.view.suffixs.*#resource.view.suffixs=${resSuffixs}#g" conf/common/common.properties
sed -i ${txt} "s#development.state.*#development.state=${devState}#g" conf/common/common.properties
sed -i ${txt} "s#zookeeper.quorum.*#zookeeper.quorum=${zkQuorum}#g" conf/zookeeper.properties
sed -i ${txt} "s#zookeeper.escheduler.root.*#zookeeper.escheduler.root=${zkRoot}#g" conf/zookeeper.properties
sed -i ${txt} "s#zookeeper.escheduler.dead.servers.*#zookeeper.escheduler.dead.servers=${zkDeadServers}#g" conf/zookeeper.properties
sed -i ${txt} "s#zookeeper.escheduler.masters.*#zookeeper.escheduler.masters=${zkMasters}#g" conf/zookeeper.properties
sed -i ${txt} "s#zookeeper.escheduler.workers.*#zookeeper.escheduler.workers=${zkWorkers}#g" conf/zookeeper.properties
sed -i ${txt} "s#zookeeper.escheduler.lock.masters.*#zookeeper.escheduler.lock.masters=${mastersLock}#g" conf/zookeeper.properties
sed -i ${txt} "s#zookeeper.escheduler.lock.workers.*#zookeeper.escheduler.lock.workers=${workersLock}#g" conf/zookeeper.properties
sed -i ${txt} "s#zookeeper.escheduler.lock.failover.masters.*#zookeeper.escheduler.lock.failover.masters=${mastersFailover}#g" conf/zookeeper.properties
sed -i ${txt} "s#zookeeper.escheduler.lock.failover.workers.*#zookeeper.escheduler.lock.failover.workers=${workersFailover}#g" conf/zookeeper.properties
sed -i ${txt} "s#zookeeper.session.timeout.*#zookeeper.session.timeout=${zkSessionTimeout}#g" conf/zookeeper.properties
sed -i ${txt} "s#zookeeper.connection.timeout.*#zookeeper.connection.timeout=${zkConnectionTimeout}#g" conf/zookeeper.properties
sed -i ${txt} "s#zookeeper.retry.sleep.*#zookeeper.retry.sleep=${zkRetrySleep}#g" conf/zookeeper.properties
sed -i ${txt} "s#zookeeper.retry.maxtime.*#zookeeper.retry.maxtime=${zkRetryMaxtime}#g" conf/zookeeper.properties
sed -i ${txt} "s#master.exec.threads.*#master.exec.threads=${masterExecThreads}#g" conf/master.properties
sed -i ${txt} "s#master.exec.task.number.*#master.exec.task.number=${masterExecTaskNum}#g" conf/master.properties
sed -i ${txt} "s#master.heartbeat.interval.*#master.heartbeat.interval=${masterHeartbeatInterval}#g" conf/master.properties
sed -i ${txt} "s#master.task.commit.retryTimes.*#master.task.commit.retryTimes=${masterTaskCommitRetryTimes}#g" conf/master.properties
sed -i ${txt} "s#master.task.commit.interval.*#master.task.commit.interval=${masterTaskCommitInterval}#g" conf/master.properties
sed -i ${txt} "s#master.max.cpuload.avg.*#master.max.cpuload.avg=${masterMaxCupLoadAvg}#g" conf/master.properties
sed -i ${txt} "s#master.reserved.memory.*#master.reserved.memory=${masterReservedMemory}#g" conf/master.properties
sed -i ${txt} "s#worker.exec.threads.*#worker.exec.threads=${workerExecThreads}#g" conf/worker.properties
sed -i ${txt} "s#worker.heartbeat.interval.*#worker.heartbeat.interval=${workerHeartbeatInterval}#g" conf/worker.properties
sed -i ${txt} "s#worker.fetch.task.num.*#worker.fetch.task.num=${workerFetchTaskNum}#g" conf/worker.properties
sed -i ${txt} "s#worker.max.cpuload.avg.*#worker.max.cpuload.avg=${workerMaxCupLoadAvg}#g" conf/worker.properties
sed -i ${txt} "s#worker.reserved.memory.*#worker.reserved.memory=${workerReservedMemory}#g" conf/worker.properties
sed -i ${txt} "s#server.port.*#server.port=${apiServerPort}#g" conf/application.properties
sed -i ${txt} "s#server.session.timeout.*#server.session.timeout=${apiServerSessionTimeout}#g" conf/application.properties
sed -i ${txt} "s#server.context-path.*#server.context-path=${apiServerContextPath}#g" conf/application.properties
sed -i ${txt} "s#spring.http.multipart.max-file-size.*#spring.http.multipart.max-file-size=${springMaxFileSize}#g" conf/application.properties
sed -i ${txt} "s#spring.http.multipart.max-request-size.*#spring.http.multipart.max-request-size=${springMaxRequestSize}#g" conf/application.properties
sed -i ${txt} "s#server.max-http-post-size.*#server.max-http-post-size=${apiMaxHttpPostSize}#g" conf/application.properties
sed -i ${txt} "s#mail.protocol.*#mail.protocol=${mailProtocol}#g" conf/alert.properties
sed -i ${txt} "s#mail.server.host.*#mail.server.host=${mailServerHost}#g" conf/alert.properties
sed -i ${txt} "s#mail.server.port.*#mail.server.port=${mailServerPort}#g" conf/alert.properties
sed -i ${txt} "s#mail.sender.*#mail.sender=${mailSender}#g" conf/alert.properties
sed -i ${txt} "s#mail.passwd.*#mail.passwd=${mailPassword}#g" conf/alert.properties
sed -i ${txt} "s#xls.file.path.*#xls.file.path=${xlsFilePath}#g" conf/alert.properties
sed -i ${txt} "s#installPath.*#installPath=${installPath}#g" conf/config/install_config.conf
sed -i ${txt} "s#deployUser.*#deployUser=${deployUser}#g" conf/config/install_config.conf
sed -i ${txt} "s#ips.*#ips=${ips}#g" conf/config/install_config.conf
sed -i ${txt} "s#masters.*#masters=${masters}#g" conf/config/run_config.conf
sed -i ${txt} "s#workers.*#workers=${workers}#g" conf/config/run_config.conf
sed -i ${txt} "s#alertServer.*#alertServer=${alertServer}#g" conf/config/run_config.conf
sed -i ${txt} "s#apiServers.*#apiServers=${apiServers}#g" conf/config/run_config.conf
#!/bin/sh
# execute any pre-init scripts
for i in /scripts/pre-init.d/*sh
do
if [ -e "${i}" ]; then
echo "[i] pre-init.d - processing $i"
. "${i}"
fi
done
if [ -d "/run/mysqld" ]; then
echo "[i] mysqld already present, skipping creation"
chown -R mysql:mysql /run/mysqld
else
echo "[i] mysqld not found, creating...."
mkdir -p /run/mysqld
chown -R mysql:mysql /run/mysqld
fi
if [ -d /var/lib/mysql/mysql ]; then
echo "[i] MySQL directory already present, skipping creation"
chown -R mysql:mysql /var/lib/mysql
else
echo "[i] MySQL data directory not found, creating initial DBs"
chown -R mysql:mysql /var/lib/mysql
mysql_install_db --user=mysql --ldata=/var/lib/mysql > /dev/null
if [ "$MYSQL_ROOT_PASSWORD" = "" ]; then
MYSQL_ROOT_PASSWORD=`pwgen 16 1`
echo "[i] MySQL root Password: $MYSQL_ROOT_PASSWORD"
fi
MYSQL_DATABASE="easyscheduler"
MYSQL_USER="easyscheduler"
MYSQL_PASSWORD="easyschedulereasyscheduler"
tfile=`mktemp`
if [ ! -f "$tfile" ]; then
return 1
fi
cat << EOF > $tfile
USE mysql;
FLUSH PRIVILEGES ;
GRANT ALL ON *.* TO 'root'@'%' identified by '$MYSQL_ROOT_PASSWORD' WITH GRANT OPTION ;
GRANT ALL ON *.* TO 'root'@'localhost' identified by '$MYSQL_ROOT_PASSWORD' WITH GRANT OPTION ;
SET PASSWORD FOR 'root'@'localhost'=PASSWORD('${MYSQL_ROOT_PASSWORD}') ;
DROP DATABASE IF EXISTS test ;
FLUSH PRIVILEGES ;
EOF
if [ "$MYSQL_DATABASE" != "" ]; then
echo "[i] Creating database: $MYSQL_DATABASE"
echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` CHARACTER SET utf8 COLLATE utf8_general_ci;" >> $tfile
if [ "$MYSQL_USER" != "" ]; then
echo "[i] Creating user: $MYSQL_USER with password $MYSQL_PASSWORD"
echo "GRANT ALL ON \`$MYSQL_DATABASE\`.* to '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD';" >> $tfile
fi
fi
/usr/bin/mysqld --user=mysql --bootstrap --verbose=0 --skip-name-resolve --skip-networking=0 < $tfile
rm -f $tfile
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sql) echo "$0: running $f"; /usr/bin/mysqld --user=mysql --bootstrap --verbose=0 --skip-name-resolve --skip-networking=0 < "$f"; echo ;;
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | /usr/bin/mysqld --user=mysql --bootstrap --verbose=0 --skip-name-resolve --skip-networking=0 < "$f"; echo ;;
*) echo "$0: ignoring or entrypoint initdb empty $f" ;;
esac
echo
done
echo
echo 'MySQL init process done. Ready for start up.'
echo
echo "exec /usr/bin/mysqld --user=mysql --console --skip-name-resolve --skip-networking=0" "$@"
fi
# execute any pre-exec scripts
for i in /scripts/pre-exec.d/*sh
do
if [ -e "${i}" ]; then
echo "[i] pre-exec.d - processing $i"
. ${i}
fi
done
mysql -ueasyscheduler -peasyschedulereasyscheduler --one-database easyscheduler -h127.0.0.1 < /opt/easyscheduler/sql/escheduler.sql
mysql -ueasyscheduler -peasyschedulereasyscheduler --one-database easyscheduler -h127.0.0.1 < /opt/easyscheduler/sql/quartz.sql
source /etc/profile
zkServer.sh start
cd /opt/easyscheduler
rm -rf /etc/nginx/conf.d/default.conf
sh ./bin/escheduler-daemon.sh start master-server
sh ./bin/escheduler-daemon.sh start worker-server
sh ./bin/escheduler-daemon.sh start api-server
sh ./bin/escheduler-daemon.sh start logger-server
sh ./bin/escheduler-daemon.sh start alert-server
nginx -c /etc/nginx/nginx.conf
exec /usr/bin/mysqld --user=mysql --console --skip-name-resolve --skip-networking=0 $@
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
dataDir=/opt/zookeeper/data
dataLogDir=/opt/zookeeper/logs
Q:单机运行服务老挂,应该是内存不够,测试机器4核8G。生产环境需要分布式,如果单机的话建议的配置是?
A: Easy Scheduler有5个服务组成,这些服务本身需要的内存和cpu不多,
| 服务 | 内存 | cpu核数 |
| ------------ | ---- | ------- |
| MasterServer | 2G | 2核 |
| WorkerServer | 2G | 2核 |
| ApiServer | 512M | 1核 |
| AlertServer | 512M | 1核 |
| LoggerServer | 512M | 1核 |
注意:由于如果任务较多,WorkServer所在机器建议物理内存在16G以上
---
Q: 管理员为什么不能创建项目?
A: 管理员目前属于"纯管理", 没有租户,即没有linux上对应的用户,所以没有执行权限, 但是有所有的查看权限。如果需要创建项目等业务操作,请使用管理员创建租户和普通用户,然后使用普通用户登录进行操作
---
Q: 系统支持哪些邮箱?
A: 支持绝大多数邮箱,qq、163、126、139、outlook、aliyun等皆可支持
---
Q:常用的系统变量时间参数有哪些,如何使用?
![image-20190524180218862](/Users/stone/Library/Application Support/typora-user-images/image-20190524180218862.png)
---
Q:pip install kazoo 这个安装报错。是必须安装的吗?
A: 这个是python连接zookeeper需要使用到的
---
Q: 如果alert、api、logger服务任意一个宕机,任何还会正常执行吧
A: 不影响,影响正在运行中的任务的服务有Master和Worker服务
---
Q: 这个怎么指定机器运行任务的啊 」
A: 通过worker分组: 这个流程只能在指定的机器组里执行。默认是Default,可以在任一worker上执行。
---
Q: 跨用户的任务依赖怎么实现呢, 比如A用户写了一个任务,B用户需要依赖这个任务
就比如说 我们数仓组 写了一个 中间宽表的任务, 其他业务部门想要使用这个中间表的时候,他们应该是另外一个用户,怎么依赖这个中间表呢
A: 有两种情况,一个是要运行这个宽表任务,可以使用子工作流把宽表任务放到自己的工作流里面。另一个是检查这个宽表任务有没有完成,可以使用依赖节点来检查这个宽表任务在指定的时间周期有没有完成。
---
Q: 启动WorkerServer服务时不能正常启动,报以下信息是什么原因?
```
[INFO] 2019-05-06 16:39:31.492 cn.escheduler.server.zk.ZKWorkerClient:[155] - register failure , worker already started on : 127.0.0.1, please wait for a moment and try again
```
A:Worker/Master Server在启动时,会向Zookeeper注册自己的启动信息,是Zookeeper的临时节点,如果两次启动时间间隔较短的情况,上次启动的Worker/Master Server在Zookeeper的会话还未过期,会出现上述信息,处理办法是等待session过期,一般是1分钟左右
----
Q: 编译时escheduler-grpc模块一直报错:Information:java: Errors occurred while compiling module 'escheduler-rpc', 找不到LogParameter、RetStrInfo、RetByteInfo等class类
A: 这是因为rpc源码包是google Grpc实现的,需要使用maven进行编译,在根目录下执行:mvn -U clean package assembly:assembly -Dmaven.test.skip=true , 然后刷新下整个项目
----
Q:EasyScheduler支持windows上运行么?
A: 建议在Ubuntu、Centos上运行,暂不支持windows上运行,不过windows上可以进行编译。开发调试的话建议Ubuntu或者mac上进行。
-----
Q: 自定义参数如何使用?
A:
----
Q:任务为什么不执行?
A: 不执行的原因:
查看xx表里有没有内容?
查看Master server的运行日志:
查看Worker Server的运行日志
----
Q: 为什么任务一直在运行中?
A:
----
Q: 任务
......@@ -43,6 +43,8 @@ Easy Scheduler
- [**升级文档**](https://analysys.github.io/easyscheduler_docs_cn/升级文档.html?_blank "升级文档")
- [**FAQ**](https://analysys.github.io/easyscheduler_docs_cn/FAQ.html?_blank "FAQ")
- <a href="http://52.82.13.76:8888" target="_blank">我要体验</a> 普通用户登录:demo/demo123
更多文档请参考 <a href="https://analysys.github.io/easyscheduler_docs_cn/" target="_blank">easyscheduler中文在线文档</a>
......
......@@ -8,6 +8,7 @@
* 后端部署文档
* [准备工作](后端部署文档.md#1、准备工作)
* [部署](后端部署文档.md#2、部署)
* [快速上手](快速上手.md#快速上手)
* [系统使用手册](系统使用手册.md#使用手册)
* [系统架构设计](系统架构设计.md#系统架构设计)
* 前端开发文档
......@@ -21,7 +22,8 @@
* 后端开发文档
* [开发环境搭建](后端开发文档.md#项目编译)
* [自定义任务插件文档](任务插件开发.md#任务插件开发)
* FAQ
* [FAQ](EasyScheduler FAQ.md#FAQ)
* 系统版本升级文档
* [版本升级](升级文档.md)
* 历次版本发布内容
......
docs/zh_CN/images/hive_edit.png

29.0 KB | W: | H:

docs/zh_CN/images/hive_edit.png

45.5 KB | W: | H:

docs/zh_CN/images/hive_edit.png
docs/zh_CN/images/hive_edit.png
docs/zh_CN/images/hive_edit.png
docs/zh_CN/images/hive_edit.png
  • 2-up
  • Swipe
  • Onion skin
docs/zh_CN/images/hive_edit2.png

33.3 KB | W: | H:

docs/zh_CN/images/hive_edit2.png

47.3 KB | W: | H:

docs/zh_CN/images/hive_edit2.png
docs/zh_CN/images/hive_edit2.png
docs/zh_CN/images/hive_edit2.png
docs/zh_CN/images/hive_edit2.png
  • 2-up
  • Swipe
  • Onion skin
docs/zh_CN/images/mysql_edit.png

99.8 KB | W: | H:

docs/zh_CN/images/mysql_edit.png

47.3 KB | W: | H:

docs/zh_CN/images/mysql_edit.png
docs/zh_CN/images/mysql_edit.png
docs/zh_CN/images/mysql_edit.png
docs/zh_CN/images/mysql_edit.png
  • 2-up
  • Swipe
  • Onion skin
......@@ -4,7 +4,7 @@
## 1、准备工作
目前最新安装包版本是1.0.2,下载地址: [码云下载](https://gitee.com/easyscheduler/EasyScheduler/attach_files/) ,下载escheduler-backend-1.0.2.tar.gz(后端简称escheduler-backend),escheduler-ui-1.0.2.tar.gz(前端简称escheduler-ui)
目前最新安装包版本是1.0.3,下载地址: [码云下载](https://gitee.com/easyscheduler/EasyScheduler/attach_files/) ,下载escheduler-backend-1.0.3.tar.gz(后端简称escheduler-backend),escheduler-ui-1.0.3.tar.gz(前端简称escheduler-ui)
#### 准备一: 基础软件安装(必装项请自行安装)
......@@ -101,6 +101,12 @@ install.sh : 一键部署脚本
- 修改部署参数(根据自己服务器及业务情况):
- 修改 **install.sh**中的各参数,替换成自身业务所需的值
- monitorServerState 开关变量,在1.0.3版本中增加,控制是否启动自启动脚本(监控master,worker状态,如果掉线会自动启动)
默认值为"false"表示不启动自启动脚本,如果需要启动改为"true"
- hdfsStartupSate 开关变量,控制是否启动hdfs
默认值为"false"表示不启动hdfs
如果需要启动改为"true",启动hdfs需要自行创建hdfs根路径,也就是install.sh中的 hdfsPath
- 如果使用hdfs相关功能,需要拷贝**hdfs-site.xml****core-site.xml**到conf目录下
......@@ -143,7 +149,7 @@ install.sh : 一键部署脚本
### 2.2 编译源码来部署
将源码包release版本1.0.2下载后,解压进入根目录
将源码包release版本1.0.3下载后,解压进入根目录
* 执行编译命令:
......
## 快速上手
* 管理员用户登录
>地址:192.168.xx.xx:8888 用户名密码:admin/esheduler123
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/login.jpg" width="60%" />
</p>
* 创建队列
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/create-queue.png" width="60%" />
</p>
* 创建租户
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/addtenant.png" width="60%" />
</p>
* 创建普通用户
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/useredit2.png" width="60%" />
</p>
* 创建告警组
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/mail_edit.png" width="60%" />
</p>
* 使用普通用户登录
> 点击右上角用户名“退出”,重新使用普通用户登录。
* 项目管理->创建项目->点击项目名称
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/project.png" width="60%" />
</p>
* 点击工作流定义->创建工作流定义->上线流程定义
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/dag1.png" width="60%" />
</p>
* 运行流程定义->点击工作流实例->点击流程实例名称->双击任务节点->查看任务执行日志
<p align="center">
<img src="https://analysys.github.io/easyscheduler_docs_cn/images/task-log.png" width="60%" />
</p>
\ No newline at end of file
此差异已折叠。
......@@ -4,7 +4,7 @@
<parent>
<groupId>cn.analysys</groupId>
<artifactId>escheduler</artifactId>
<version>1.0.2-SNAPSHOT</version>
<version>1.0.3-SNAPSHOT</version>
</parent>
<artifactId>escheduler-alert</artifactId>
<packaging>jar</packaging>
......
......@@ -59,6 +59,8 @@ public class Constants {
public static final String MAIL_SMTP_STARTTLS_ENABLE = "mail.smtp.starttls.enable";
public static final String MAIL_SMTP_SSL_ENABLE = "mail.smtp.ssl.enable";
public static final String TEXT_HTML_CHARSET_UTF_8 = "text/html;charset=utf-8";
public static final String STRING_TRUE = "true";
......
......@@ -33,6 +33,7 @@ import org.springframework.util.ResourceUtils;
import javax.mail.*;
import javax.mail.internet.*;
import java.io.*;
import java.security.Security;
import java.util.*;
import static cn.escheduler.alert.utils.PropertyUtils.getInt;
......@@ -58,6 +59,10 @@ public class MailUtils {
public static final String xlsFilePath = getString(Constants.XLS_FILE_PATH);
public static final String starttlsEnable = getString(Constants.MAIL_SMTP_STARTTLS_ENABLE);
public static final String sslEnable = getString(Constants.MAIL_SMTP_SSL_ENABLE);
private static Template MAIL_TEMPLATE;
static {
......@@ -122,7 +127,10 @@ public class MailUtils {
//set charset
email.setCharset(Constants.UTF_8);
// TLS verification
email.setTLS(true);
email.setTLS(Boolean.valueOf(starttlsEnable));
// SSL verification
email.setSSL(Boolean.valueOf(sslEnable));
if (CollectionUtils.isNotEmpty(receivers)){
// receivers mail
for (String receiver : receivers) {
......@@ -269,11 +277,15 @@ public class MailUtils {
* @throws MessagingException
*/
private static MimeMessage getMimeMessage(Collection<String> receivers) throws MessagingException {
// Security.addProvider(new com.sun.net.ssl.internal.ssl.Provider());
// final String SSL_FACTORY = "javax.net.ssl.SSLSocketFactory";
Properties props = new Properties();
props.setProperty(Constants.MAIL_HOST, mailServerHost);
props.setProperty(Constants.MAIL_SMTP_AUTH, Constants.STRING_TRUE);
props.setProperty(Constants.MAIL_TRANSPORT_PROTOCOL, mailProtocol);
props.setProperty(Constants.MAIL_SMTP_STARTTLS_ENABLE, Constants.STRING_TRUE);
props.setProperty(Constants.MAIL_SMTP_STARTTLS_ENABLE, starttlsEnable);
props.setProperty("mail.smtp.ssl.enable", sslEnable);
Authenticator auth = new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
......
......@@ -8,8 +8,13 @@ mail.server.port=25
mail.sender=xxxxxxx
mail.passwd=xxxxxxx
# TLS
mail.smtp.starttls.enable=false
# SSL
mail.smtp.ssl.enable=true
#xls file path,need create if not exist
xls.file.path=/opt/xls
xls.file.path=/tmp/xls
......
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'>
<html>
<head><title> easyscheduler </title>
<meta name='Keywords' content=''>
<meta name='Description' content=''>
<style type="text/css">table {
font-size: 14px;
color: #333333;
border-width: 1px;
border-color: #666666;
border-collapse: collapse;
}
table th {
border-width: 1px;
padding: 8px;
border-style: solid;
border-color: #666666;
background-color: #dedede;
}
table td {
border-width: 1px;
padding: 8px;
border-style: solid;
border-color: #666666;
background-color: #ffffff;
}</style>
</head>
<body>
<table>
<thead>
<#if title??> ${title} </#if>
</thead>
<#if content??> ${content} </#if>
</table>
</body>
</html>
\ No newline at end of file
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title> easyscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type="text/css">table { margin-top:0px; padding-top:0px; border:1px solid; font-size: 14px; color: #333333; border-width: 1px; border-color: #666666; border-collapse: collapse; } table th { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #dedede; } table td { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #ffffff; }</style></head><body style="margin:0;padding:0"><table border="1px" cellpadding="5px" cellspacing="-10px"><thead><#if title??> ${title}</#if></thead><#if content??> ${content}</#if></table></body></html>
\ No newline at end of file
......@@ -3,7 +3,7 @@
<parent>
<groupId>cn.analysys</groupId>
<artifactId>escheduler</artifactId>
<version>1.0.2-SNAPSHOT</version>
<version>1.0.3-SNAPSHOT</version>
</parent>
<artifactId>escheduler-api</artifactId>
<packaging>jar</packaging>
......@@ -34,6 +34,10 @@
<artifactId>leveldbjni-all</artifactId>
<groupId>org.fusesource.leveldbjni</groupId>
</exclusion>
<exclusion>
<artifactId>protobuf-java</artifactId>
<groupId>com.google.protobuf</groupId>
</exclusion>
</exclusions>
</dependency>
......
......@@ -26,8 +26,8 @@ import org.quartz.impl.matchers.GroupMatcher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Calendar;
import java.util.*;
import java.util.Calendar;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
......@@ -226,8 +226,12 @@ public class QuartzExecutors {
public boolean deleteJob(String jobName, String jobGroupName) {
lock.writeLock().lock();
try {
logger.info("try to delete job, job name: {}, job group name: {},", jobName, jobGroupName);
return scheduler.deleteJob(new JobKey(jobName, jobGroupName));
JobKey jobKey = new JobKey(jobName,jobGroupName);
if(scheduler.checkExists(jobKey)){
logger.info("try to delete job, job name: {}, job group name: {},", jobName, jobGroupName);
return scheduler.deleteJob(jobKey);
}
} catch (SchedulerException e) {
logger.error(String.format("delete job : %s failed",jobName), e);
} finally {
......
......@@ -178,9 +178,11 @@ public class ExecutorService extends BaseService{
}
ProcessDefinition processDefinition = processDao.findProcessDefineById(processInstance.getProcessDefinitionId());
result = checkProcessDefinitionValid(processDefinition, processInstance.getProcessDefinitionId());
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
if(executeType != ExecuteType.STOP && executeType != ExecuteType.PAUSE){
result = checkProcessDefinitionValid(processDefinition, processInstance.getProcessDefinitionId());
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
}
checkResult = checkExecuteType(processInstance, executeType);
......
......@@ -293,12 +293,12 @@ public class ProcessDefinitionService extends BaseDAGService {
processDefine.setTimeout(processData.getTimeout());
//custom global params
List<Property> globalParamsList = processData.getGlobalParams();
if (globalParamsList != null && globalParamsList.size() > 0) {
Set<Property> userDefParamsSet = new HashSet<>(globalParamsList);
List<Property> globalParamsList = new ArrayList<>();
if (processData.getGlobalParams() != null && processData.getGlobalParams().size() > 0) {
Set<Property> userDefParamsSet = new HashSet<>(processData.getGlobalParams());
globalParamsList = new ArrayList<>(userDefParamsSet);
processDefine.setGlobalParamList(globalParamsList);
}
processDefine.setGlobalParamList(globalParamsList);
processDefine.setUpdateTime(now);
processDefine.setFlag(Flag.YES);
if (processDefineMapper.update(processDefine) > 0) {
......
......@@ -701,17 +701,19 @@ public class ResourcesService extends BaseService {
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
Set<Resource> resourceSet = null;
List<Object> list ;
if (resourceList != null && resourceList.size() > 0) {
resourceSet = new HashSet<>(resourceList);
Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId);
getAuthorizedResourceList(resourceSet, authedResourceList);
list = new ArrayList<>(resourceSet);
}else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, new ArrayList<>(resourceSet));
result.put(Constants.DATA_LIST, list);
putMsg(result,Status.SUCCESS);
return result;
}
......
......@@ -514,6 +514,13 @@ public class SchedulerService extends BaseService {
putMsg(result, Status.SCHEDULE_CRON_NOT_EXISTS, scheduleId);
return result;
}
// Determine if the login user is the owner of the schedule
if (loginUser.getId() != schedule.getUserId()) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
// check schedule is already online
if(schedule.getReleaseState() == ReleaseState.ONLINE){
putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE,schedule.getId());
......
......@@ -115,6 +115,9 @@ public class UsersService extends BaseService {
user.setUserType(UserType.GENERAL_USER);
user.setCreateTime(now);
user.setUpdateTime(now);
if (StringUtils.isEmpty(queue)){
queue = "";
}
user.setQueue(queue);
// save user
......
......@@ -4,7 +4,7 @@
<parent>
<artifactId>escheduler</artifactId>
<groupId>cn.analysys</groupId>
<version>1.0.2-SNAPSHOT</version>
<version>1.0.3-SNAPSHOT</version>
</parent>
<artifactId>escheduler-common</artifactId>
<name>escheduler-common</name>
......@@ -148,10 +148,10 @@
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</exclusion>
<!--<exclusion>-->
<!--<groupId>com.google.protobuf</groupId>-->
<!--<artifactId>protobuf-java</artifactId>-->
<!--</exclusion>-->
</exclusions>
</dependency>
......@@ -175,10 +175,10 @@
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</exclusion>
<!--<exclusion>-->
<!--<groupId>com.google.protobuf</groupId>-->
<!--<artifactId>protobuf-java</artifactId>-->
<!--</exclusion>-->
<exclusion>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-all</artifactId>
......
......@@ -162,6 +162,11 @@ public final class Constants {
*/
public static final String ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_WORKERS = "zookeeper.escheduler.lock.failover.workers";
/**
* MasterServer startup failover runing and fault tolerance process
*/
public static final String ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "zookeeper.escheduler.lock.failover.startup.masters";
/**
* need send warn times when master server or worker server failover
*/
......
......@@ -206,7 +206,10 @@ public class TaskQueueZkImpl extends AbstractZKClient implements ITaskQueue {
String taskIdPath = tasksQueuePath + nodeValue;
logger.info("consume task {}", taskIdPath);
try{
zk.delete().forPath(taskIdPath);
Stat stat = zk.checkExists().forPath(taskIdPath);
if(stat != null){
zk.delete().forPath(taskIdPath);
}
}catch(Exception e){
logger.error(String.format("delete task:%s from zookeeper fail, exception:" ,nodeValue) ,e);
}
......
......@@ -80,6 +80,9 @@ public class DependentUtils {
case "last3Hours":
result = DependentDateUtils.getLastHoursInterval(businessDate, 3);
break;
case "today":
result = DependentDateUtils.getTodayInterval(businessDate);
break;
case "last1Days":
result = DependentDateUtils.getLastDayInterval(businessDate, 1);
break;
......
......@@ -27,10 +27,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TimeZone;
import java.util.*;
/**
* json utils
......@@ -109,7 +106,7 @@ public class JSONUtils {
*/
public static <T> List<T> toList(String json, Class<T> clazz) {
if (StringUtils.isEmpty(json)) {
return null;
return new ArrayList<>();
}
try {
return JSONArray.parseArray(json, clazz);
......@@ -117,7 +114,7 @@ public class JSONUtils {
logger.error("JSONArray.parseArray exception!",e);
}
return null;
return new ArrayList<>();
}
......
......@@ -42,6 +42,21 @@ public class DependentDateUtils {
return dateIntervals;
}
/**
* get today day interval list
* @param businessDate
* @return
*/
public static List<DateInterval> getTodayInterval(Date businessDate){
List<DateInterval> dateIntervals = new ArrayList<>();
Date beginTime = DateUtils.getStartOfDay(businessDate);
Date endTime = DateUtils.getEndOfDay(businessDate);
dateIntervals.add(new DateInterval(beginTime, endTime));
return dateIntervals;
}
/**
* get last day interval list
* @param businessDate
......
......@@ -16,6 +16,7 @@ zookeeper.escheduler.lock.workers=/escheduler/lock/workers
#escheduler failover directory
zookeeper.escheduler.lock.failover.masters=/escheduler/lock/failover/masters
zookeeper.escheduler.lock.failover.workers=/escheduler/lock/failover/workers
zookeeper.escheduler.lock.failover.startup.masters=/escheduler/lock/failover/startup-masters
#escheduler failover directory
zookeeper.session.timeout=300
......
......@@ -52,6 +52,10 @@ public class DependentUtilsTest {
public void getDateIntervalList() {
Date curDay = DateUtils.stringToDate("2019-02-05 00:00:00");
DateInterval diCur = new DateInterval(DateUtils.getStartOfDay(curDay),
DateUtils.getEndOfDay(curDay));
Date day1 = DateUtils.stringToDate("2019-02-04 00:00:00");
DateInterval di1 = new DateInterval(DateUtils.getStartOfDay(day1),
DateUtils.getEndOfDay(day1));
......@@ -70,6 +74,13 @@ public class DependentUtilsTest {
Assert.assertEquals(dateIntervals.get(1), di1);
Assert.assertEquals(dateIntervals.get(0), di2);
dateValue = "today";
dateIntervals = DependentUtils.getDateIntervalList(curDay, dateValue);
Assert.assertEquals(dateIntervals.get(0), diCur);
}
@Test
......
......@@ -4,7 +4,7 @@
<parent>
<groupId>cn.analysys</groupId>
<artifactId>escheduler</artifactId>
<version>1.0.2-SNAPSHOT</version>
<version>1.0.3-SNAPSHOT</version>
</parent>
<artifactId>escheduler-dao</artifactId>
<name>escheduler-dao</name>
......@@ -125,6 +125,12 @@
<dependency>
<groupId>cn.analysys</groupId>
<artifactId>escheduler-common</artifactId>
<exclusions>
<exclusion>
<artifactId>protobuf-java</artifactId>
<groupId>com.google.protobuf</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
......
......@@ -20,6 +20,7 @@ import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.*;
import cn.escheduler.common.model.DateInterval;
import cn.escheduler.common.model.TaskNode;
import cn.escheduler.common.process.Property;
import cn.escheduler.common.queue.ITaskQueue;
import cn.escheduler.common.queue.TaskQueueFactory;
import cn.escheduler.common.task.subprocess.SubProcessParameters;
......@@ -41,6 +42,7 @@ import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import java.util.*;
import java.util.stream.Collectors;
import static cn.escheduler.common.Constants.*;
import static cn.escheduler.dao.datasource.ConnectionFactory.getMapper;
......@@ -586,10 +588,12 @@ public class ProcessDao extends AbstractBaseDao {
case START_FAILURE_TASK_PROCESS:
// find failed tasks and init these tasks
List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE);
List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE);
List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL);
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
failedList.addAll(killedList);
failedList.addAll(toleranceList);
for(Integer taskId : failedList){
initTaskInstance(this.findTaskInstanceById(taskId));
}
......@@ -689,41 +693,62 @@ public class ProcessDao extends AbstractBaseDao {
* handle sub work process instance, update relation table and command parameters
* set sub work process flag, extends parent work process command parameters.
*/
public ProcessInstance setSubProcessParam(ProcessInstance processInstance){
String cmdParam = processInstance.getCommandParam();
public ProcessInstance setSubProcessParam(ProcessInstance subProcessInstance){
String cmdParam = subProcessInstance.getCommandParam();
if(StringUtils.isEmpty(cmdParam)){
return processInstance;
return subProcessInstance;
}
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
// write sub process id into cmd param.
if(paramMap.containsKey(CMDPARAM_SUB_PROCESS)
&& CMDPARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMDPARAM_SUB_PROCESS))){
paramMap.remove(CMDPARAM_SUB_PROCESS);
paramMap.put(CMDPARAM_SUB_PROCESS, String.valueOf(processInstance.getId()));
processInstance.setCommandParam(JSONUtils.toJson(paramMap));
processInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(processInstance);
paramMap.put(CMDPARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId()));
subProcessInstance.setCommandParam(JSONUtils.toJson(paramMap));
subProcessInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(subProcessInstance);
}
// copy parent instance user def params to sub process..
String parentInstanceId = paramMap.get(CMDPARAM_SUB_PROCESS_PARENT_INSTANCE_ID);
if(StringUtils.isNotEmpty(parentInstanceId)){
ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId));
if(parentInstance != null){
processInstance.setGlobalParams(parentInstance.getGlobalParams());
this.saveProcessInstance(processInstance);
subProcessInstance.setGlobalParams(
joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams()));
this.saveProcessInstance(subProcessInstance);
}else{
logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam);
}
}
ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class);
if(processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0){
return processInstance;
return subProcessInstance;
}
// update sub process id to process map table
processInstanceMap.setProcessInstanceId(processInstance.getId());
processInstanceMap.setProcessInstanceId(subProcessInstance.getId());
this.updateWorkProcessInstanceMap(processInstanceMap);
return processInstance;
return subProcessInstance;
}
/**
* join parent global params into sub process.
* only the keys doesn't in sub process global would be joined.
* @param parentGlobalParams
* @param subGlobalParams
* @return
*/
private String joinGlobalParams(String parentGlobalParams, String subGlobalParams){
List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class);
List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class);
Map<String,String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
for(Property parent : parentPropertyList){
if(!subMap.containsKey(parent.getProp())){
subPropertyList.add(parent);
}
}
return JSONUtils.toJson(subPropertyList);
}
/**
......@@ -898,7 +923,11 @@ public class ProcessDao extends AbstractBaseDao {
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
// crate new task instance
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1 );
if(taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE){
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1 );
}
taskInstance.setEndTime(null);
taskInstance.setStartTime(new Date());
taskInstance.setFlag(Flag.YES);
taskInstance.setHost(null);
taskInstance.setId(0);
......@@ -1348,7 +1377,7 @@ public class ProcessDao extends AbstractBaseDao {
* @return
*/
public List<TaskInstance> queryNeedFailoverTaskInstances(String host){
return taskInstanceMapper.queryByHostAndStatus(host,stateArray);
return taskInstanceMapper.queryByHostAndStatus(host, stateArray);
}
/**
......@@ -1526,10 +1555,16 @@ public class ProcessDao extends AbstractBaseDao {
}
public void selfFaultTolerant(int ... states){
List<ProcessInstance> processInstanceList = processInstanceMapper.listByStatus(states);
/**
* master starup fault tolerant
*/
public void masterStartupFaultTolerant(){
int[] readyStopAndKill=new int[]{ExecutionStatus.READY_PAUSE.ordinal(),ExecutionStatus.READY_STOP.ordinal(),
ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(),ExecutionStatus.RUNNING_EXEUTION.ordinal()};
List<ProcessInstance> processInstanceList = processInstanceMapper.listByStatus(readyStopAndKill);
for (ProcessInstance processInstance:processInstanceList){
selfFaultTolerant(processInstance);
processNeedFailoverProcessInstances(processInstance);
}
}
......
......@@ -572,11 +572,10 @@ public class ProcessInstanceMapperProvider {
FROM(TABLE_NAME);
WHERE("process_definition_id=#{processDefinitionId} ");
if(parameter.get("startTime") != null && parameter.get("endTime") != null
){
WHERE("schedule_time between #{startTime} and #{endTime} " +
"or start_time between #{startTime} and #{endTime}");
WHERE("process_definition_id=#{processDefinitionId} and (schedule_time between #{startTime} and #{endTime} " +
"or start_time between #{startTime} and #{endTime})");
}
WHERE("`state` in (" + strStates.toString() + ")");
ORDER_BY("start_time desc limit 1");
......
......@@ -203,7 +203,9 @@ public class UserMapperProvider {
public String queryUserPaging(Map<String, Object> parameter) {
return new SQL() {
{
SELECT("u.*,t.tenant_name,q.queue_name");
SELECT("u.id,u.user_name,u.user_password,u.user_type,u.email,u.phone,u.tenant_id,u.create_time,u.update_time,t.tenant_name," +
"case when u.queue <> '' then u.queue else q.queue_name end as queue," +
"q.queue_name");
FROM(TABLE_NAME + " u ");
LEFT_OUTER_JOIN("t_escheduler_tenant t on u.tenant_id = t.id");
LEFT_OUTER_JOIN("t_escheduler_queue q on t.queue_id = q.id");
......@@ -228,7 +230,8 @@ public class UserMapperProvider {
public String queryDetailsById(Map<String, Object> parameter) {
return new SQL() {
{
SELECT("u.*,q.queue_name,t.tenant_name");
SELECT("u.*, t.tenant_name," +
"case when u.queue <> '' then u.queue else q.queue_name end as queue_name");
FROM(TABLE_NAME + " u,t_escheduler_tenant t,t_escheduler_queue q");
......
......@@ -422,8 +422,12 @@ public class TaskInstance {
if(this.isSubProcess()){
return false;
}
return (this.getState().typeIsFailure()
if(this.getState() == ExecutionStatus.NEED_FAULT_TOLERANCE){
return true;
}else {
return (this.getState().typeIsFailure()
&& this.getRetryTimes() < this.getMaxRetryTimes());
}
}
public void setDependency(String dependency) {
......
......@@ -4,7 +4,7 @@
<parent>
<artifactId>escheduler</artifactId>
<groupId>cn.analysys</groupId>
<version>1.0.2-SNAPSHOT</version>
<version>1.0.3-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
......
......@@ -3,7 +3,7 @@
<parent>
<artifactId>escheduler</artifactId>
<groupId>cn.analysys</groupId>
<version>1.0.2-SNAPSHOT</version>
<version>1.0.3-SNAPSHOT</version>
</parent>
<artifactId>escheduler-server</artifactId>
<name>escheduler-server</name>
......@@ -18,6 +18,10 @@
<groupId>cn.analysys</groupId>
<artifactId>escheduler-common</artifactId>
<exclusions>
<exclusion>
<artifactId>protobuf-java</artifactId>
<groupId>com.google.protobuf</groupId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
......
......@@ -869,7 +869,7 @@ public class MasterExecThread implements Runnable {
}
Date now = new Date();
long runningTime = DateUtils.differMs(now, processInstance.getStartTime());
long runningTime = DateUtils.diffMin(now, processInstance.getStartTime());
if(runningTime > processInstance.getTimeout()){
return true;
......
......@@ -31,6 +31,7 @@ import cn.escheduler.dao.model.TaskInstance;
import cn.escheduler.server.ResInfo;
import cn.escheduler.server.utils.ProcessUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.imps.CuratorFrameworkState;
import org.apache.curator.framework.recipes.cache.PathChildrenCache;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
......@@ -111,21 +112,46 @@ public class ZKMasterClient extends AbstractZKClient {
// init dao
this.initDao();
// init system znode
this.initSystemZNode();
InterProcessMutex mutex = null;
try {
// create distributed lock with the root node path of the lock space as /escheduler/lock/failover/master
String znodeLock = getMasterStartUpLockPath();
mutex = new InterProcessMutex(zkClient, znodeLock);
mutex.acquire();
// init system znode
this.initSystemZNode();
// monitor master
this.listenerMaster();
// monitor master
this.listenerMaster();
// monitor worker
this.listenerWorker();
// monitor worker
this.listenerWorker();
// register master
this.registMaster();
// register master
this.registMaster();
// check if fault tolerance is required,failure and tolerance
if (getActiveMasterNum() == 1) {
processDao.masterStartupFaultTolerant();
}
}catch (Exception e){
logger.error("master start up exception : " + e.getMessage(),e);
}finally {
if (mutex != null){
try {
mutex.release();
} catch (Exception e) {
if(e.getMessage().equals("instance must be started before calling this method")){
logger.warn("lock release");
}else{
logger.error("lock release failed : " + e.getMessage(),e);
}
// check if fault tolerance is required,failure and tolerance
if (getActiveMasterNum() == 1) {
processDao.selfFaultTolerant(ExecutionStatus.RUNNING_EXEUTION.ordinal(),ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal());
}
}
}
}
......@@ -417,6 +443,14 @@ public class ZKMasterClient extends AbstractZKClient {
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_MASTERS);
}
/**
* get master start up lock path
* @return
*/
public String getMasterStartUpLockPath(){
return conf.getString(Constants.ZOOKEEPER_ESCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS);
}
/**
* get master failover lock path
* @return
......
# 后端接口地址
API_BASE = http://192.168.xx.xx:12345
......
.hljs{display:block;overflow-x:auto;padding:0.5em;background:white;color:black}.hljs-comment,.hljs-quote,.hljs-variable{color:#008000}.hljs-keyword,.hljs-selector-tag,.hljs-built_in,.hljs-name,.hljs-tag{color:#00f}.hljs-string,.hljs-title,.hljs-section,.hljs-attribute,.hljs-literal,.hljs-template-tag,.hljs-template-variable,.hljs-type,.hljs-addition{color:#a31515}.hljs-deletion,.hljs-selector-attr,.hljs-selector-pseudo,.hljs-meta{color:#2b91af}.hljs-doctag{color:#808080}.hljs-attr{color:#f00}.hljs-symbol,.hljs-bullet,.hljs-link{color:#00b0e8}.hljs-emphasis{font-style:italic}.hljs-strong{font-weight:bold}
.jtk-node{position:absolute}.jtk-group{position:absolute;overflow:visible}[jtk-group-content]{position:relative}.katavorio-clone-drag{pointer-events:none}.jtk-surface{overflow:hidden!important;position:relative;cursor:move;cursor:-moz-grab;cursor:-webkit-grab;touch-action:none}.jtk-surface-panning{cursor:-moz-grabbing;cursor:-webkit-grabbing;-webkit-touch-callout:none;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jtk-surface-canvas{overflow:visible!important}.jtk-surface-droppable-node{touch-action:none}.jtk-surface-nopan{overflow:scroll!important;cursor:default}.jtk-surface-tile{border:none;outline:0;margin:0;-webkit-transition:opacity .3s ease .15s;-moz-transition:opacity .3s ease .15s;-o-transition:opacity .3s ease .15s;-ms-transition:opacity .3s ease .15s;transition:opacity .3s ease .15s}.jtk-lasso{border:2px solid #3177b8;background-color:#f5f5f5;opacity:.5;display:none;z-index:20000;position:absolute}.jtk-lasso-select-defeat *{-webkit-touch-callout:none;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.jtk-lasso-mask{position:fixed;z-index:20000;display:none;opacity:.5;background-color:#07234e;top:0;bottom:0;left:0;right:0}.jtk-surface-selected-element{border:2px dashed #f76258!important}.jtk-surface-pan{background-color:Azure;opacity:.4;text-align:center;cursor:pointer;z-index:2;-webkit-transition:background-color .15s ease-in;-moz-transition:background-color .15s ease-in;-o-transition:background-color .15s ease-in;transition:background-color .15s ease-in}.jtk-surface-pan-bottom,.jtk-surface-pan-top{width:100%;height:20px}.jtk-surface-pan-bottom:hover,.jtk-surface-pan-left:hover,.jtk-surface-pan-right:hover,.jtk-surface-pan-top:hover{opacity:.6;background-color:#3177b8;color:#fff;font-weight:700}.jtk-surface-pan-left,.jtk-surface-pan-right{width:20px;height:100%;line-height:40}.jtk-surface-pan-active,.jtk-surface-pan-active:hover{background-color:#f76258}.jtk-miniview{overflow:hidden!important;width:125px;height:125px;position:relative;background-color:#b2c9cd;border:1px solid #e2e6cd;border-radius:4px;opacity:.8}.jtk-miniview-panner{border:5px dotted #f5f5f5;opacity:.4;background-color:#4f6f7e;cursor:move;cursor:-moz-grab;cursor:-webkit-grab}.jtk-miniview-panning{cursor:-moz-grabbing;cursor:-webkit-grabbing}.jtk-miniview-element{background-color:#607a86;position:absolute}.jtk-miniview-group-element{background:0 0;border:2px solid #607a86}.jtk-miniview-collapse{color:#f5f5f5;position:absolute;font-size:18px;top:-1px;right:3px;cursor:pointer;font-weight:700}.jtk-miniview-collapse:before{content:"\2012"}.jtk-miniview-collapsed{background-color:#449ea6;border-radius:4px;height:22px;margin-right:0;padding:4px;width:21px}.jtk-miniview-collapsed .jtk-miniview-element,.jtk-miniview-collapsed .jtk-miniview-panner{visibility:hidden}.jtk-miniview-collapsed .jtk-miniview-collapse:before{content:"+"}.jtk-miniview-collapse:hover{color:#e4f013}.jtk-dialog-underlay{left:0;right:0;top:0;bottom:0;position:fixed;z-index:100000;opacity:.8;background-color:#ccc;display:none}.jtk-dialog-overlay{position:fixed;z-index:100001;display:none;background-color:#fff;font-family:"Open Sans",sans-serif;padding:7px;box-shadow:0 0 5px gray;overflow:hidden}.jtk-dialog-overlay-x{max-height:0;transition:max-height .5s ease-in;-moz-transition:max-height .5s ease-in;-ms-transition:max-height .5s ease-in;-o-transition:max-height .5s ease-in;-webkit-transition:max-height .5s ease-in}.jtk-dialog-overlay-y{max-width:0;transition:max-width .5s ease-in;-moz-transition:max-width .5s ease-in;-ms-transition:max-width .5s ease-in;-o-transition:max-width .5s ease-in;-webkit-transition:max-width .5s ease-in}.jtk-dialog-overlay-top{top:20px}.jtk-dialog-overlay-bottom{bottom:20px}.jtk-dialog-overlay-left{left:20px}.jtk-dialog-overlay-right{right:20px}.jtk-dialog-overlay-x.jtk-dialog-overlay-visible{max-height:1000px}.jtk-dialog-overlay-y.jtk-dialog-overlay-visible{max-width:1000px}.jtk-dialog-buttons{text-align:right;margin-top:5px}.jtk-dialog-button{border:none;cursor:pointer;margin-right:5px;min-width:56px;background-color:#fff;outline:1px solid #ccc}.jtk-dialog-button:hover{color:#fff;background-color:#234b5e}.jtk-dialog-title{text-align:left;font-size:14px;margin-bottom:9px}.jtk-dialog-content{font-size:12px;text-align:left;min-width:250px;margin:0 14px}.jtk-dialog-content ul{width:100%;padding-left:0}.jtk-dialog-content label{cursor:pointer;font-weight:inherit}.jtk-dialog-overlay input,.jtk-dialog-overlay textarea{background-color:#fff;border:1px solid #ccc;color:#333;font-size:14px;font-style:normal;outline:0;padding:6px 4px;margin-right:6px}.jtk-dialog-overlay input:focus,.jtk-dialog-overlay textarea:focus{background-color:#cbeae1;border:1px solid #83b8a8;color:#333;font-size:14px;font-style:normal;outline:0}.jtk-draw-skeleton{position:absolute;left:0;right:0;top:0;bottom:0;outline:2px solid #84acb3;opacity:.8}.jtk-draw-handle{position:absolute;width:7px;height:7px;background-color:#84acb3}.jtk-draw-handle-tl{left:0;top:0;cursor:nw-resize}.jtk-draw-handle-tr{right:0;top:0;cursor:ne-resize}.jtk-draw-handle-bl{left:0;bottom:0;cursor:sw-resize}.jtk-draw-handle-br{bottom:0;right:0;cursor:se-resize}.jtk-draw-drag{display:none;position:absolute;left:50%;top:50%;margin-left:-10px;margin-top:-10px;width:20px;height:20px;background-color:#84acb3;cursor:move}.jtk-drag-select-defeat *{-webkit-touch-callout:none;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}
.CodeMirror{font-family:monospace;height:300px;color:#000;direction:ltr}.CodeMirror-lines{padding:4px 0}.CodeMirror pre{padding:0 4px}.CodeMirror-gutter-filler,.CodeMirror-scrollbar-filler{background-color:#fff}.CodeMirror-gutters{border-right:1px solid #ddd;background-color:#f7f7f7;white-space:nowrap}.CodeMirror-linenumber{padding:0 3px 0 5px;min-width:20px;text-align:right;color:#999;white-space:nowrap}.CodeMirror-guttermarker{color:#000}.CodeMirror-guttermarker-subtle{color:#999}.CodeMirror-cursor{border-left:1px solid #000;border-right:none;width:0}.CodeMirror div.CodeMirror-secondarycursor{border-left:1px solid silver}.cm-fat-cursor .CodeMirror-cursor{width:auto;border:0!important;background:#7e7}.cm-fat-cursor div.CodeMirror-cursors{z-index:1}.cm-fat-cursor-mark{background-color:rgba(20,255,20,.5);-webkit-animation:blink 1.06s steps(1) infinite;-moz-animation:blink 1.06s steps(1) infinite;animation:blink 1.06s steps(1) infinite}.cm-animate-fat-cursor{width:auto;border:0;-webkit-animation:blink 1.06s steps(1) infinite;-moz-animation:blink 1.06s steps(1) infinite;animation:blink 1.06s steps(1) infinite;background-color:#7e7}@-moz-keyframes blink{50%{background-color:transparent}}@-webkit-keyframes blink{50%{background-color:transparent}}@keyframes blink{50%{background-color:transparent}}.cm-tab{display:inline-block;text-decoration:inherit}.CodeMirror-rulers{position:absolute;left:0;right:0;top:-50px;bottom:-20px;overflow:hidden}.CodeMirror-ruler{border-left:1px solid #ccc;top:0;bottom:0;position:absolute}.cm-s-default .cm-header{color:#00f}.cm-s-default .cm-quote{color:#090}.cm-negative{color:#d44}.cm-positive{color:#292}.cm-header,.cm-strong{font-weight:700}.cm-em{font-style:italic}.cm-link{text-decoration:underline}.cm-strikethrough{text-decoration:line-through}.cm-s-default .cm-keyword{color:#708}.cm-s-default .cm-atom{color:#219}.cm-s-default .cm-number{color:#164}.cm-s-default .cm-def{color:#00f}.cm-s-default .cm-variable-2{color:#05a}.cm-s-default .cm-type,.cm-s-default .cm-variable-3{color:#085}.cm-s-default .cm-comment{color:#a50}.cm-s-default .cm-string{color:#a11}.cm-s-default .cm-string-2{color:#f50}.cm-s-default .cm-meta{color:#555}.cm-s-default .cm-qualifier{color:#555}.cm-s-default .cm-builtin{color:#30a}.cm-s-default .cm-bracket{color:#997}.cm-s-default .cm-tag{color:#170}.cm-s-default .cm-attribute{color:#00c}.cm-s-default .cm-hr{color:#999}.cm-s-default .cm-link{color:#00c}.cm-s-default .cm-error{color:red}.cm-invalidchar{color:red}.CodeMirror-composing{border-bottom:2px solid}div.CodeMirror span.CodeMirror-matchingbracket{color:#0b0}div.CodeMirror span.CodeMirror-nonmatchingbracket{color:#a22}.CodeMirror-matchingtag{background:rgba(255,150,0,.3)}.CodeMirror-activeline-background{background:#e8f2ff}.CodeMirror{position:relative;overflow:hidden;background:#fff}.CodeMirror-scroll{overflow:scroll!important;margin-bottom:-30px;margin-right:-30px;padding-bottom:30px;height:100%;outline:0;position:relative}.CodeMirror-sizer{position:relative;border-right:30px solid transparent}.CodeMirror-gutter-filler,.CodeMirror-hscrollbar,.CodeMirror-scrollbar-filler,.CodeMirror-vscrollbar{position:absolute;z-index:6;display:none}.CodeMirror-vscrollbar{right:0;top:0;overflow-x:hidden;overflow-y:scroll}.CodeMirror-hscrollbar{bottom:0;left:0;overflow-y:hidden;overflow-x:scroll}.CodeMirror-scrollbar-filler{right:0;bottom:0}.CodeMirror-gutter-filler{left:0;bottom:0}.CodeMirror-gutters{position:absolute;left:0;top:0;min-height:100%;z-index:3}.CodeMirror-gutter{white-space:normal;height:100%;display:inline-block;vertical-align:top;margin-bottom:-30px}.CodeMirror-gutter-wrapper{position:absolute;z-index:4;background:0 0!important;border:none!important}.CodeMirror-gutter-background{position:absolute;top:0;bottom:0;z-index:4}.CodeMirror-gutter-elt{position:absolute;cursor:default;z-index:4}.CodeMirror-gutter-wrapper ::selection{background-color:transparent}.CodeMirror-gutter-wrapper ::-moz-selection{background-color:transparent}.CodeMirror-lines{cursor:text;min-height:1px}.CodeMirror pre{-moz-border-radius:0;-webkit-border-radius:0;border-radius:0;border-width:0;background:0 0;font-family:inherit;font-size:inherit;margin:0;white-space:pre;word-wrap:normal;line-height:inherit;color:inherit;z-index:2;position:relative;overflow:visible;-webkit-tap-highlight-color:transparent;-webkit-font-variant-ligatures:contextual;font-variant-ligatures:contextual}.CodeMirror-wrap pre{word-wrap:break-word;white-space:pre-wrap;word-break:normal}.CodeMirror-linebackground{position:absolute;left:0;right:0;top:0;bottom:0;z-index:0}.CodeMirror-linewidget{position:relative;z-index:2;padding:.1px}.CodeMirror-rtl pre{direction:rtl}.CodeMirror-code{outline:0}.CodeMirror-gutter,.CodeMirror-gutters,.CodeMirror-linenumber,.CodeMirror-scroll,.CodeMirror-sizer{-moz-box-sizing:content-box;box-sizing:content-box}.CodeMirror-measure{position:absolute;width:100%;height:0;overflow:hidden;visibility:hidden}.CodeMirror-cursor{position:absolute;pointer-events:none}.CodeMirror-measure pre{position:static}div.CodeMirror-cursors{visibility:hidden;position:relative;z-index:3}div.CodeMirror-dragcursors{visibility:visible}.CodeMirror-focused div.CodeMirror-cursors{visibility:visible}.CodeMirror-selected{background:#d9d9d9}.CodeMirror-focused .CodeMirror-selected{background:#d7d4f0}.CodeMirror-crosshair{cursor:crosshair}.CodeMirror-line::selection,.CodeMirror-line>span::selection,.CodeMirror-line>span>span::selection{background:#d7d4f0}.CodeMirror-line::-moz-selection,.CodeMirror-line>span::-moz-selection,.CodeMirror-line>span>span::-moz-selection{background:#d7d4f0}.cm-searching{background-color:#ffa;background-color:rgba(255,255,0,.4)}.cm-force-border{padding-right:.1px}@media print{.CodeMirror div.CodeMirror-cursors{visibility:hidden}}.cm-tab-wrap-hack:after{content:''}span.CodeMirror-selectedtext{background:0 0}
.cm-s-mdn-like.CodeMirror{color:#999;background-color:#fff}.cm-s-mdn-like .CodeMirror-line::selection,.cm-s-mdn-like .CodeMirror-line>span::selection,.cm-s-mdn-like .CodeMirror-line>span>span::selection,.cm-s-mdn-like div.CodeMirror-selected{background:#cfc}.cm-s-mdn-like .CodeMirror-line::-moz-selection,.cm-s-mdn-like .CodeMirror-line>span::-moz-selection,.cm-s-mdn-like .CodeMirror-line>span>span::-moz-selection{background:#cfc}.cm-s-mdn-like .CodeMirror-gutters{background:#f8f8f8;border-left:6px solid rgba(0,83,159,.65);color:#333}.cm-s-mdn-like .CodeMirror-linenumber{color:#aaa;padding-left:8px}.cm-s-mdn-like .CodeMirror-cursor{border-left:2px solid #222}.cm-s-mdn-like .cm-keyword{color:#6262FF}.cm-s-mdn-like .cm-atom{color:#F90}.cm-s-mdn-like .cm-number{color:#ca7841}.cm-s-mdn-like .cm-def{color:#8DA6CE}.cm-s-mdn-like span.cm-tag,.cm-s-mdn-like span.cm-variable-2{color:#690}.cm-s-mdn-like .cm-variable,.cm-s-mdn-like span.cm-def,.cm-s-mdn-like span.cm-variable-3{color:#07a}.cm-s-mdn-like .cm-property{color:#905}.cm-s-mdn-like .cm-qualifier{color:#690}.cm-s-mdn-like .cm-operator{color:#cda869}.cm-s-mdn-like .cm-comment{color:#777;font-weight:400}.cm-s-mdn-like .cm-string{color:#07a;font-style:italic}.cm-s-mdn-like .cm-string-2{color:#bd6b18}.cm-s-mdn-like .cm-meta{color:#000}.cm-s-mdn-like .cm-builtin{color:#9B7536}.cm-s-mdn-like .cm-tag{color:#997643}.cm-s-mdn-like .cm-attribute{color:#d6bb6d}.cm-s-mdn-like .cm-header{color:#FF6400}.cm-s-mdn-like .cm-hr{color:#AEAEAE}.cm-s-mdn-like .cm-link{color:#ad9361;font-style:italic;text-decoration:none}.cm-s-mdn-like .cm-error{border-bottom:1px solid red}div.cm-s-mdn-like .CodeMirror-activeline-background{background:#efefff}div.cm-s-mdn-like span.CodeMirror-matchingbracket{outline:grey solid 1px;color:inherit}.cm-s-mdn-like.CodeMirror{background-image:url(/~/codemirror/5.20.0/theme/mdn-like.min.css)}
.CodeMirror-hints{position:absolute;z-index:10;overflow:hidden;list-style:none;margin:0;padding:2px;-webkit-box-shadow:2px 3px 5px rgba(0,0,0,.2);-moz-box-shadow:2px 3px 5px rgba(0,0,0,.2);box-shadow:2px 3px 5px rgba(0,0,0,.2);border-radius:3px;border:1px solid silver;background:#fff;font-size:90%;font-family:monospace;max-height:20em;overflow-y:auto}.CodeMirror-hint{margin:0;padding:0 4px;border-radius:2px;white-space:pre;color:#000;cursor:pointer}li.CodeMirror-hint-active{background:#08f;color:#fff}
此差异已折叠。
此差异已折叠。
此差异已折叠。
Not found
\ No newline at end of file
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册