提交 de996377 编写于 作者: K khadgarmage 提交者: qiaozhanwei

reset pgsql (#1178)

+ 1
上级 035f4554
FROM ubuntu:18.04 FROM ubuntu:18.04
ENV LANG=C.UTF-8 ENV LANG=C.UTF-8
ENV DEBIAN_FRONTEND=noninteractive
ARG version ARG version
ARG tar_version ARG tar_version
...@@ -52,52 +53,46 @@ RUN cd /opt && \ ...@@ -52,52 +53,46 @@ RUN cd /opt && \
ENV NODE_HOME=/opt/node ENV NODE_HOME=/opt/node
ENV PATH $PATH:$NODE_HOME/bin ENV PATH $PATH:$NODE_HOME/bin
#5,add dolphinscheduler source code to /opt/dolphinscheduler_source #5,install postgresql
RUN apt-get update && \
apt-get install -y postgresql postgresql-contrib sudo && \
sed -i 's/localhost/*/g' /etc/postgresql/10/main/postgresql.conf
#6,install nginx
RUN apt-get update && \
apt-get install -y nginx && \
rm -rf /var/lib/apt/lists/* && \
echo "\ndaemon off;" >> /etc/nginx/nginx.conf && \
chown -R www-data:www-data /var/lib/nginx
#7,install sudo,python,vim,ping and ssh command
RUN apt-get update && \
apt-get -y install sudo && \
apt-get -y install python && \
apt-get -y install vim && \
apt-get -y install iputils-ping && \
apt-get -y install net-tools && \
apt-get -y install openssh-server && \
apt-get -y install python-pip && \
pip install kazoo
#8,add dolphinscheduler source code to /opt/dolphinscheduler_source
ADD . /opt/dolphinscheduler_source ADD . /opt/dolphinscheduler_source
#5,backend compilation #9,backend compilation
RUN cd /opt/dolphinscheduler_source && \ RUN cd /opt/dolphinscheduler_source && \
mvn -U clean package assembly:assembly -Dmaven.test.skip=true mvn -U clean package assembly:assembly -Dmaven.test.skip=true
#6,frontend compilation #10,frontend compilation
RUN chmod -R 777 /opt/dolphinscheduler_source/dolphinscheduler-ui && \ RUN chmod -R 777 /opt/dolphinscheduler_source/dolphinscheduler-ui && \
cd /opt/dolphinscheduler_source/dolphinscheduler-ui && \ cd /opt/dolphinscheduler_source/dolphinscheduler-ui && \
rm -rf /opt/dolphinscheduler_source/dolphinscheduler-ui/node_modules && \ rm -rf /opt/dolphinscheduler_source/dolphinscheduler-ui/node_modules && \
npm install node-sass --unsafe-perm && \ npm install node-sass --unsafe-perm && \
npm install && \ npm install && \
npm run build npm run build
#7,install mysql
RUN echo "deb http://cn.archive.ubuntu.com/ubuntu/ xenial main restricted universe multiverse" >> /etc/apt/sources.list
RUN echo "mysql-server mysql-server/root_password password root" | debconf-set-selections
RUN echo "mysql-server mysql-server/root_password_again password root" | debconf-set-selections
RUN apt-get update && \
apt-get -y install mysql-server-5.7 && \
mkdir -p /var/lib/mysql && \
mkdir -p /var/run/mysqld && \
mkdir -p /var/log/mysql && \
chown -R mysql:mysql /var/lib/mysql && \
chown -R mysql:mysql /var/run/mysqld && \
chown -R mysql:mysql /var/log/mysql
# UTF-8 and bind-address
RUN sed -i -e "$ a [client]\n\n[mysql]\n\n[mysqld]" /etc/mysql/my.cnf && \
sed -i -e "s/\(\[client\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \
sed -i -e "s/\(\[mysql\]\)/\1\ndefault-character-set = utf8/g" /etc/mysql/my.cnf && \
sed -i -e "s/\(\[mysqld\]\)/\1\ninit_connect='SET NAMES utf8'\ncharacter-set-server = utf8\ncollation-server=utf8_general_ci\nbind-address = 0.0.0.0/g" /etc/mysql/my.cnf
#8,install nginx
RUN apt-get update && \
apt-get install -y nginx && \
rm -rf /var/lib/apt/lists/* && \
echo "\ndaemon off;" >> /etc/nginx/nginx.conf && \
chown -R www-data:www-data /var/lib/nginx
#9,modify dolphinscheduler configuration file #11,modify dolphinscheduler configuration file
#backend configuration #backend configuration
RUN mkdir -p /opt/dolphinscheduler && \ RUN mkdir -p /opt/dolphinscheduler && \
tar -zxvf /opt/dolphinscheduler_source/target/dolphinscheduler-${tar_version}.tar.gz -C /opt/dolphinscheduler && \ tar -zxvf /opt/dolphinscheduler_source/target/dolphinscheduler-${tar_version}.tar.gz -C /opt/dolphinscheduler && \
...@@ -106,22 +101,11 @@ ADD ./dockerfile/conf/dolphinscheduler/conf /opt/dolphinscheduler/conf ...@@ -106,22 +101,11 @@ ADD ./dockerfile/conf/dolphinscheduler/conf /opt/dolphinscheduler/conf
#frontend nginx configuration #frontend nginx configuration
ADD ./dockerfile/conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d ADD ./dockerfile/conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d
#10,open port #12,open port
EXPOSE 2181 2888 3888 3306 80 12345 8888 EXPOSE 2181 2888 3888 3306 80 12345 8888
#11,install sudo,python,vim,ping and ssh command
RUN apt-get update && \
apt-get -y install sudo && \
apt-get -y install python && \
apt-get -y install vim && \
apt-get -y install iputils-ping && \
apt-get -y install net-tools && \
apt-get -y install openssh-server && \
apt-get -y install python-pip && \
pip install kazoo
COPY ./dockerfile/startup.sh /root/startup.sh COPY ./dockerfile/startup.sh /root/startup.sh
#12,modify permissions and set soft links #13,modify permissions and set soft links
RUN chmod +x /root/startup.sh && \ RUN chmod +x /root/startup.sh && \
chmod +x /opt/dolphinscheduler/script/create-dolphinscheduler.sh && \ chmod +x /opt/dolphinscheduler/script/create-dolphinscheduler.sh && \
chmod +x /opt/zookeeper/bin/zkServer.sh && \ chmod +x /opt/zookeeper/bin/zkServer.sh && \
......
# mysql
# url=jdbc:postgresql://192.168.220.154:5432/dolphinscheduler
# base spring data source configuration # base spring data source configuration
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
#spring.datasource.driver-class-name=org.postgresql.Driver # postgre
spring.datasource.driver-class-name=com.mysql.jdbc.Driver spring.datasource.driver-class-name=org.postgresql.Driver
spring.datasource.url=jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8 spring.datasource.url=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler
spring.datasource.username=root spring.datasource.username=root
spring.datasource.password=root@123 spring.datasource.password=root@123
...@@ -29,7 +27,7 @@ spring.datasource.timeBetweenConnectErrorMillis=60000 ...@@ -29,7 +27,7 @@ spring.datasource.timeBetweenConnectErrorMillis=60000
spring.datasource.minEvictableIdleTimeMillis=300000 spring.datasource.minEvictableIdleTimeMillis=300000
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work. #the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
spring.datasource.validationQuery=SELECT 1 FROM DUAL spring.datasource.validationQuery=SELECT 1
#check whether the connection is valid for timeout, in seconds #check whether the connection is valid for timeout, in seconds
spring.datasource.validationQueryTimeout=3 spring.datasource.validationQueryTimeout=3
...@@ -57,21 +55,21 @@ mybatis-plus.mapper-locations=classpath*:/org.apache.dolphinscheduler.dao.mapper ...@@ -57,21 +55,21 @@ mybatis-plus.mapper-locations=classpath*:/org.apache.dolphinscheduler.dao.mapper
mybatis-plus.typeEnumsPackage=org.apache.dolphinscheduler.*.enums mybatis-plus.typeEnumsPackage=org.apache.dolphinscheduler.*.enums
#实体扫描,多个package用逗号或者分号分隔 #Entity scan, where multiple packages are separated by a comma or semicolon
mybatis-plus.typeAliasesPackage=org.apache.dolphinscheduler.dao.entity mybatis-plus.typeAliasesPackage=org.apache.dolphinscheduler.dao.entity
#主键类型 AUTO:"数据库ID自增", INPUT:"用户输入ID", ID_WORKER:"全局唯一ID (数字类型唯一ID)", UUID:"全局唯一ID UUID"; #Primary key type AUTO:" database ID AUTO ", INPUT:" user INPUT ID", ID_WORKER:" global unique ID (numeric type unique ID)", UUID:" global unique ID UUID";
mybatis-plus.global-config.db-config.id-type=AUTO mybatis-plus.global-config.db-config.id-type=AUTO
#字段策略 IGNORED:"忽略判断",NOT_NULL:"非 NULL 判断"),NOT_EMPTY:"非空判断" #Field policy IGNORED:" ignore judgment ",NOT_NULL:" not NULL judgment "),NOT_EMPTY:" not NULL judgment"
mybatis-plus.global-config.db-config.field-strategy=NOT_NULL mybatis-plus.global-config.db-config.field-strategy=NOT_NULL
#驼峰下划线转换 #The hump underline is converted
mybatis-plus.global-config.db-config.column-underline=true mybatis-plus.global-config.db-config.column-underline=true
mybatis-plus.global-config.db-config.logic-delete-value=-1 mybatis-plus.global-config.db-config.logic-delete-value=-1
mybatis-plus.global-config.db-config.logic-not-delete-value=0 mybatis-plus.global-config.db-config.logic-not-delete-value=0
mybatis-plus.global-config.db-config.banner=false mybatis-plus.global-config.db-config.banner=false
#原生配置 #The original configuration
mybatis-plus.configuration.map-underscore-to-camel-case=true mybatis-plus.configuration.map-underscore-to-camel-case=true
mybatis-plus.configuration.cache-enabled=false mybatis-plus.configuration.cache-enabled=false
mybatis-plus.configuration.call-setters-on-nulls=true mybatis-plus.configuration.call-setters-on-nulls=true
...@@ -80,9 +78,9 @@ mybatis-plus.configuration.jdbc-type-for-null=null ...@@ -80,9 +78,9 @@ mybatis-plus.configuration.jdbc-type-for-null=null
# data quality analysis is not currently in use. please ignore the following configuration # data quality analysis is not currently in use. please ignore the following configuration
# task record flag # task record flag
task.record.flag=false task.record.flag=false
task.record.datasource.url=jdbc:mysql://127.0.0.1:3306/etl?characterEncoding=UTF-8 task.record.datasource.url=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler
task.record.datasource.username=xx task.record.datasource.username=root
task.record.datasource.password=xx task.record.datasource.password=root@123
# Logger Config # Logger Config
logging.level.org.apache.dolphinscheduler.dao=debug logging.level.org.apache.dolphinscheduler.dao=debug
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
limit 1 limit 1
</select> </select>
<select id="countCommandState" resultType="org.apache.dolphinscheduler.dao.entity.CommandCount"> <select id="countCommandState" resultType="org.apache.dolphinscheduler.dao.entity.CommandCount">
select cmd.command_type as state, count(1) as count select cmd.command_type as command_type, count(1) as count
from t_ds_command cmd, t_ds_process_definition process from t_ds_command cmd, t_ds_process_definition process
where cmd.process_definition_id = process.id where cmd.process_definition_id = process.id
<if test="projectIdArray != null and projectIdArray.length != 0"> <if test="projectIdArray != null and projectIdArray.length != 0">
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper"> <mapper namespace="org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper">
<select id="countCommandState" resultType="org.apache.dolphinscheduler.dao.entity.CommandCount"> <select id="countCommandState" resultType="org.apache.dolphinscheduler.dao.entity.CommandCount">
select cmd.command_type as commandType, count(1) as count select cmd.command_type as command_type, count(1) as count
from t_ds_error_command cmd, t_ds_process_definition process from t_ds_error_command cmd, t_ds_process_definition process
where cmd.process_definition_id = process.id where cmd.process_definition_id = process.id
<if test="projectIdArray != null and projectIdArray.length != 0"> <if test="projectIdArray != null and projectIdArray.length != 0">
......
...@@ -12,10 +12,14 @@ ...@@ -12,10 +12,14 @@
and pd.name = #{processDefinitionName} and pd.name = #{processDefinitionName}
</select> </select>
<select id="queryDefineListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> <select id="queryDefineListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
SELECT td.*,sc.schedule_release_state SELECT td.*,sc.schedule_release_state,tu.user_name
FROM t_ds_process_definition td FROM t_ds_process_definition td
left join (select process_definition_id,release_state as schedule_release_state from t_ds_schedules group by process_definition_id,release_state) sc on sc.process_definition_id = td.id left join (select process_definition_id,release_state as schedule_release_state from t_ds_schedules group by process_definition_id,release_state) sc on sc.process_definition_id = td.id
left join t_ds_user tu on td.user_id = tu.id
where td.project_id = #{projectId} where td.project_id = #{projectId}
<if test=" isAdmin == false ">
and tu.user_type=1
</if>
<if test=" searchVal != null and searchVal != ''"> <if test=" searchVal != null and searchVal != ''">
and td.name like concat('%', #{searchVal}, '%') and td.name like concat('%', #{searchVal}, '%')
</if> </if>
...@@ -24,12 +28,18 @@ ...@@ -24,12 +28,18 @@
</if> </if>
order by sc.schedule_release_state desc,td.update_time desc order by sc.schedule_release_state desc,td.update_time desc
</select> </select>
<select id="queryAllDefinitionList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> <select id="queryAllDefinitionList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
select * select *
from t_ds_process_definition from t_ds_process_definition
where project_id = #{projectId} where project_id = #{projectId}
order by create_time desc order by create_time desc
</select> </select>
<select id="queryDefinitionListByTenant" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
select *
from t_ds_process_definition
where tenant_id = #{tenantId}
</select>
<select id="queryDefinitionListByIdList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> <select id="queryDefinitionListByIdList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
select * select *
from t_ds_process_definition from t_ds_process_definition
...@@ -43,6 +53,9 @@ ...@@ -43,6 +53,9 @@
FROM t_ds_process_definition td FROM t_ds_process_definition td
JOIN t_ds_user tu on tu.id=td.user_id JOIN t_ds_user tu on tu.id=td.user_id
where 1 = 1 where 1 = 1
<if test=" isAdmin == false ">
and tu.user_type=1
</if>
<if test="projectIds != null and projectIds.length != 0"> <if test="projectIds != null and projectIds.length != 0">
and td.project_id in and td.project_id in
<foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")"> <foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")">
...@@ -51,6 +64,16 @@ ...@@ -51,6 +64,16 @@
</if> </if>
group by td.user_id,tu.user_name group by td.user_id,tu.user_name
</select> </select>
<select id="queryByDefineId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
SELECT
pd.*, u.user_name,
p.name AS project_name
FROM
t_ds_process_definition pd,
t_ds_user u,
t_ds_project p
WHERE
pd.user_id = u.id AND pd.project_id = p.id
AND pd.id = #{processDefineId}
</select>
</mapper> </mapper>
\ No newline at end of file
...@@ -19,6 +19,35 @@ ...@@ -19,6 +19,35 @@
</foreach> </foreach>
order by id asc order by id asc
</select> </select>
<select id="queryByTenantIdAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where 1=1
<if test="tenantId != -1">
and tenant_id =#{tenantId}
</if>
and state in
<foreach collection="states" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
order by id asc
</select>
<select id="queryByWorkerGroupIdAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where 1=1
<if test="workerGroupId != -1">
and worker_group_id =#{workerGroupId}
</if>
and state in
<foreach collection="states" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
order by id asc
</select>
<select id="queryProcessInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance"> <select id="queryProcessInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select instance.* select instance.*
from t_ds_process_instance instance from t_ds_process_instance instance
...@@ -59,6 +88,19 @@ ...@@ -59,6 +88,19 @@
set state = #{destState} set state = #{destState}
where state = #{originState} where state = #{originState}
</update> </update>
<update id="updateProcessInstanceByTenantId">
update t_ds_process_instance
set tenant_id = #{destTenantId}
where tenant_id = #{originTenantId}
</update>
<update id="updateProcessInstanceByWorkerGroupId">
update t_ds_process_instance
set worker_group_id = #{destWorkerGroupId}
where worker_group_id = #{originWorkerGroupId}
</update>
<select id="countInstanceStateByUser" resultType="org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount"> <select id="countInstanceStateByUser" resultType="org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount">
select t.state, count(0) as count select t.state, count(0) as count
from t_ds_process_instance t from t_ds_process_instance t
......
...@@ -6,4 +6,10 @@ ...@@ -6,4 +6,10 @@
from t_ds_session from t_ds_session
where user_id = #{userId} where user_id = #{userId}
</select> </select>
<select id="queryByUserIdAndIp" resultType="org.apache.dolphinscheduler.dao.entity.Session">
select *
from t_ds_session
where user_id = #{userId} AND ip = #{ip}
</select>
</mapper> </mapper>
\ No newline at end of file
...@@ -50,7 +50,6 @@ ...@@ -50,7 +50,6 @@
#{i} #{i}
</foreach> </foreach>
</if> </if>
and t.flag = 1
<if test="startTime != null and endTime != null"> <if test="startTime != null and endTime != null">
and t.start_time > #{startTime} and t.start_time <![CDATA[ <= ]]> #{endTime} and t.start_time > #{startTime} and t.start_time <![CDATA[ <= ]]> #{endTime}
</if> </if>
......
...@@ -3,10 +3,10 @@ ...@@ -3,10 +3,10 @@
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UDFUserMapper"> <mapper namespace="org.apache.dolphinscheduler.dao.mapper.UDFUserMapper">
<delete id="deleteByUserId"> <delete id="deleteByUserId">
delete from t_ds_relation_udfs_user delete from t_ds_relation_udfs_user
where user_id=#{userId} where user_id = #{userId}
</delete> </delete>
<delete id="deleteByUdfFuncId"> <delete id="deleteByUdfFuncId">
delete from t_ds_relation_udfs_user delete from t_ds_relation_udfs_user
where udf_id=#{udfFuncId} where udf_id = #{udfFuncId}
</delete> </delete>
</mapper> </mapper>
\ No newline at end of file
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
</foreach> </foreach>
</if> </if>
<if test="funcNames != null and funcNames != ''"> <if test="funcNames != null and funcNames != ''">
and func_name = #{funcName} and func_name = #{funcNames}
</if> </if>
order by id asc order by id asc
</select> </select>
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
from t_ds_udfs from t_ds_udfs
where 1=1 where 1=1
<if test="searchVal!= null and searchVal != ''"> <if test="searchVal!= null and searchVal != ''">
and name like concat('%', #{searchVal}, '%') and func_name like concat('%', #{searchVal}, '%')
</if> </if>
<if test="userId != 0"> <if test="userId != 0">
and id in ( and id in (
......
<?xml version="1.0" encoding="UTF-8" ?> <?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UserMapper"> <mapper namespace="org.apache.dolphinscheduler.dao.mapper.UserMapper">
<select id="queryAllGeneralUser" resultType="org.apache.dolphinscheduler.dao.entity.User"> <select id="queryAllGeneralUser" resultType="org.apache.dolphinscheduler.dao.entity.User">
select * from t_ds_user select * from t_ds_user
where user_type=1; where user_type=1;
...@@ -14,7 +13,6 @@ ...@@ -14,7 +13,6 @@
select * from t_ds_user select * from t_ds_user
where user_name=#{userName} and user_password = #{password} where user_name=#{userName} and user_password = #{password}
</select> </select>
<select id="queryUserPaging" resultType="org.apache.dolphinscheduler.dao.entity.User"> <select id="queryUserPaging" resultType="org.apache.dolphinscheduler.dao.entity.User">
select u.id,u.user_name,u.user_password,u.user_type,u.email,u.phone,u.tenant_id,u.create_time, select u.id,u.user_name,u.user_password,u.user_type,u.email,u.phone,u.tenant_id,u.create_time,
u.update_time,t.tenant_name, u.update_time,t.tenant_name,
...@@ -28,20 +26,22 @@ ...@@ -28,20 +26,22 @@
</if> </if>
order by u.update_time desc order by u.update_time desc
</select> </select>
<select id="queryDetailsById" resultType="org.apache.dolphinscheduler.dao.entity.User"> <select id="queryDetailsById" resultType="org.apache.dolphinscheduler.dao.entity.User">
select u.*, t.tenant_name, select u.*, t.tenant_name,
case when u.queue <![CDATA[ <> ]]> '' then u.queue else q.queue_name end as queue_name case when u.queue <![CDATA[ <> ]]> '' then u.queue else q.queue_name end as queue_name
from t_ds_user u,t_ds_tenant t,t_ds_queue q from t_ds_user u,t_ds_tenant t,t_ds_queue q
WHERE u.tenant_id = t.id and t.queue_id = q.id and u.id = #{userId} WHERE u.tenant_id = t.id and t.queue_id = q.id and u.id = #{userId}
</select> </select>
<select id="queryUserListByAlertGroupId" resultType="org.apache.dolphinscheduler.dao.entity.User"> <select id="queryUserListByAlertGroupId" resultType="org.apache.dolphinscheduler.dao.entity.User">
select u.* select u.*
from t_ds_user u, t_ds_relation_user_alertgroup rel from t_ds_user u, t_ds_relation_user_alertgroup rel
where u.id = rel.user_id AND u.user_type = 1 AND rel.alertgroup_id = #{alertgroupId} where u.id = rel.user_id AND rel.alertgroup_id = #{alertgroupId}
</select>
<select id="queryUserListByTenant" resultType="org.apache.dolphinscheduler.dao.entity.User">
select *
from t_ds_user
where tenant_id = #{tenantId}
</select> </select>
<select id="queryTenantCodeByUserId" resultType="org.apache.dolphinscheduler.dao.entity.User"> <select id="queryTenantCodeByUserId" resultType="org.apache.dolphinscheduler.dao.entity.User">
SELECT u.*,t.tenant_code SELECT u.*,t.tenant_code
FROM t_ds_user u, t_ds_tenant t FROM t_ds_user u, t_ds_tenant t
......
...@@ -18,7 +18,7 @@ org.quartz.threadPool.threadPriority = 5 ...@@ -18,7 +18,7 @@ org.quartz.threadPool.threadPriority = 5
#============================================================================ #============================================================================
# Configure JobStore # Configure JobStore
#============================================================================ #============================================================================
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.tablePrefix = QRTZ_ org.quartz.jobStore.tablePrefix = QRTZ_
...@@ -28,12 +28,12 @@ org.quartz.jobStore.clusterCheckinInterval = 5000 ...@@ -28,12 +28,12 @@ org.quartz.jobStore.clusterCheckinInterval = 5000
org.quartz.jobStore.dataSource = myDs org.quartz.jobStore.dataSource = myDs
#============================================================================ #============================================================================
# Configure Datasources # Configure Datasources
#============================================================================ #============================================================================
org.quartz.dataSource.myDs.connectionProvider.class = org.apache.dolphinscheduler.server.quartz.DruidConnectionProvider org.quartz.dataSource.myDs.connectionProvider.class = org.apache.dolphinscheduler.server.quartz.DruidConnectionProvider
org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver org.quartz.dataSource.myDs.driver = org.postgresql.Driver
org.quartz.dataSource.myDs.URL=jdbc:mysql://127.0.0.1:3306/dolphinscheduler?characterEncoding=utf8 org.quartz.dataSource.myDs.URL=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler
org.quartz.dataSource.myDs.user=root org.quartz.dataSource.myDs.user=root
org.quartz.dataSource.myDs.password=root@123 org.quartz.dataSource.myDs.password=root@123
org.quartz.dataSource.myDs.maxConnections = 10 org.quartz.dataSource.myDs.maxConnections = 10
org.quartz.dataSource.myDs.validationQuery = select 1 org.quartz.dataSource.myDs.validationQuery = select 1
\ No newline at end of file
#! /bin/bash
set -e set -e
if [ `netstat -anop|grep mysql|wc -l` -gt 0 ];then echo "start postgresql service"
echo "MySQL is Running." /etc/init.d/postgresql restart
else echo "create user and init db"
MYSQL_ROOT_PWD="root@123" sudo -u postgres psql <<'ENDSSH'
ESZ_DB="dolphinscheduler" create user root with password 'root@123';
echo "start mysql service" create database dolphinscheduler owner root;
chown -R mysql:mysql /var/lib/mysql /var/run/mysqld grant all privileges on database dolphinscheduler to root;
find /var/lib/mysql -type f -exec touch {} \; && service mysql restart $ sleep 10 \q
if [ ! -f /nohup.out ];then ENDSSH
echo "setting mysql password" echo "import sql data"
mysql --user=root --password=root -e "UPDATE mysql.user set authentication_string=password('$MYSQL_ROOT_PWD') where user='root'; FLUSH PRIVILEGES;" /opt/dolphinscheduler/script/create-dolphinscheduler.sh
echo "setting mysql permission"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '$MYSQL_ROOT_PWD' WITH GRANT OPTION; FLUSH PRIVILEGES;"
echo "create dolphinscheduler database"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;"
echo "import mysql data"
nohup /opt/dolphinscheduler/script/create-dolphinscheduler.sh &
sleep 90
fi
if [ `mysql --user=root --password=$MYSQL_ROOT_PWD -s -r -e "SELECT count(TABLE_NAME) FROM information_schema.TABLES WHERE TABLE_SCHEMA='dolphinscheduler';" | grep -v count` -eq 38 ];then
echo "\`$ESZ_DB\` the number of tables is correct"
else
echo "\`$ESZ_DB\` the number of tables is incorrect"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "DROP DATABASE \`$ESZ_DB\`;"
echo "create dolphinscheduler database"
mysql --user=root --password=$MYSQL_ROOT_PWD -e "CREATE DATABASE IF NOT EXISTS \`$ESZ_DB\` CHARACTER SET utf8 COLLATE utf8_general_ci; FLUSH PRIVILEGES;"
echo "import mysql data"
nohup /opt/dolphinscheduler/script/create-dolphinscheduler.sh &
sleep 90
fi
fi
/opt/zookeeper/bin/zkServer.sh restart /opt/zookeeper/bin/zkServer.sh restart
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册