提交 9135b142 编写于 作者: journey2018's avatar journey2018

add resource upload s3

上级 947f3ade
...@@ -86,8 +86,8 @@ public class ResourcesService extends BaseService { ...@@ -86,8 +86,8 @@ public class ResourcesService extends BaseService {
Result result = new Result(); Result result = new Result();
// if hdfs not startup // if hdfs not startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (!PropertyUtils.getResUploadStartupState()){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)); logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP); putMsg(result, Status.HDFS_NOT_STARTUP);
return result; return result;
} }
...@@ -185,9 +185,9 @@ public class ResourcesService extends BaseService { ...@@ -185,9 +185,9 @@ public class ResourcesService extends BaseService {
ResourceType type) { ResourceType type) {
Result result = new Result(); Result result = new Result();
// if hdfs not startup // if resource upload startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (!PropertyUtils.getResUploadStartupState()){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)); logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP); putMsg(result, Status.HDFS_NOT_STARTUP);
return result; return result;
} }
...@@ -386,9 +386,9 @@ public class ResourcesService extends BaseService { ...@@ -386,9 +386,9 @@ public class ResourcesService extends BaseService {
public Result delete(User loginUser, int resourceId) throws Exception { public Result delete(User loginUser, int resourceId) throws Exception {
Result result = new Result(); Result result = new Result();
// if hdfs not startup // if resource upload startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (!PropertyUtils.getResUploadStartupState()){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)); logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP); putMsg(result, Status.HDFS_NOT_STARTUP);
return result; return result;
} }
...@@ -449,9 +449,9 @@ public class ResourcesService extends BaseService { ...@@ -449,9 +449,9 @@ public class ResourcesService extends BaseService {
public Result readResource(int resourceId, int skipLineNum, int limit) { public Result readResource(int resourceId, int skipLineNum, int limit) {
Result result = new Result(); Result result = new Result();
// if hdfs not startup // if resource upload startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (!PropertyUtils.getResUploadStartupState()){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)); logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP); putMsg(result, Status.HDFS_NOT_STARTUP);
return result; return result;
} }
...@@ -510,9 +510,9 @@ public class ResourcesService extends BaseService { ...@@ -510,9 +510,9 @@ public class ResourcesService extends BaseService {
@Transactional(value = "TransactionManager",rollbackFor = Exception.class) @Transactional(value = "TransactionManager",rollbackFor = Exception.class)
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content) { public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content) {
Result result = new Result(); Result result = new Result();
// if hdfs not startup // if resource upload startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (!PropertyUtils.getResUploadStartupState()){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)); logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP); putMsg(result, Status.HDFS_NOT_STARTUP);
return result; return result;
} }
...@@ -573,9 +573,9 @@ public class ResourcesService extends BaseService { ...@@ -573,9 +573,9 @@ public class ResourcesService extends BaseService {
public Result updateResourceContent(int resourceId, String content) { public Result updateResourceContent(int resourceId, String content) {
Result result = new Result(); Result result = new Result();
// if hdfs not startup // if resource upload startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (!PropertyUtils.getResUploadStartupState()){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)); logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP); putMsg(result, Status.HDFS_NOT_STARTUP);
return result; return result;
} }
...@@ -663,9 +663,9 @@ public class ResourcesService extends BaseService { ...@@ -663,9 +663,9 @@ public class ResourcesService extends BaseService {
* @return * @return
*/ */
public org.springframework.core.io.Resource downloadResource(int resourceId) throws Exception { public org.springframework.core.io.Resource downloadResource(int resourceId) throws Exception {
// if hdfs not startup // if resource upload startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (!PropertyUtils.getResUploadStartupState()){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)); logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
throw new RuntimeException("hdfs not startup"); throw new RuntimeException("hdfs not startup");
} }
......
...@@ -96,7 +96,7 @@ public class TenantService extends BaseService{ ...@@ -96,7 +96,7 @@ public class TenantService extends BaseService{
tenantMapper.insert(tenant); tenantMapper.insert(tenant);
// if hdfs startup // if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (PropertyUtils.getResUploadStartupState()){
String resourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + tenantCode + "/resources"; String resourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + tenantCode + "/resources";
String udfsPath = HadoopUtils.getHdfsUdfDir(tenantCode); String udfsPath = HadoopUtils.getHdfsUdfDir(tenantCode);
/** /**
...@@ -178,7 +178,7 @@ public class TenantService extends BaseService{ ...@@ -178,7 +178,7 @@ public class TenantService extends BaseService{
Tenant newTenant = tenantMapper.queryByTenantCode(tenantCode); Tenant newTenant = tenantMapper.queryByTenantCode(tenantCode);
if (newTenant == null){ if (newTenant == null){
// if hdfs startup // if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (PropertyUtils.getResUploadStartupState()){
String resourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + tenantCode + "/resources"; String resourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + tenantCode + "/resources";
String udfsPath = HadoopUtils.getHdfsUdfDir(tenantCode); String udfsPath = HadoopUtils.getHdfsUdfDir(tenantCode);
//init hdfs resource //init hdfs resource
......
...@@ -80,9 +80,9 @@ public class UdfFuncService extends BaseService{ ...@@ -80,9 +80,9 @@ public class UdfFuncService extends BaseService{
int resourceId) { int resourceId) {
Result result = new Result(); Result result = new Result();
// if hdfs not startup // if resource upload startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (!PropertyUtils.getResUploadStartupState()){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)); logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP); putMsg(result, Status.HDFS_NOT_STARTUP);
return result; return result;
} }
...@@ -167,9 +167,9 @@ public class UdfFuncService extends BaseService{ ...@@ -167,9 +167,9 @@ public class UdfFuncService extends BaseService{
// verify udfFunc is exist // verify udfFunc is exist
UdfFunc udf = udfFuncMapper.queryUdfById(udfFuncId); UdfFunc udf = udfFuncMapper.queryUdfById(udfFuncId);
// if hdfs not startup // if resource upload startup
if (!PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (!PropertyUtils.getResUploadStartupState()){
logger.error("hdfs startup state: {}", PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)); logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP); putMsg(result, Status.HDFS_NOT_STARTUP);
return result; return result;
} }
......
...@@ -125,7 +125,7 @@ public class UsersService extends BaseService { ...@@ -125,7 +125,7 @@ public class UsersService extends BaseService {
Tenant tenant = tenantMapper.queryById(tenantId); Tenant tenant = tenantMapper.queryById(tenantId);
// if hdfs startup // if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (PropertyUtils.getResUploadStartupState()){
String userPath = HadoopUtils.getHdfsDataBasePath() + "/" + tenant.getTenantCode() + "/home/" + user.getId(); String userPath = HadoopUtils.getHdfsDataBasePath() + "/" + tenant.getTenantCode() + "/home/" + user.getId();
HadoopUtils.getInstance().mkdir(userPath); HadoopUtils.getInstance().mkdir(userPath);
...@@ -245,7 +245,7 @@ public class UsersService extends BaseService { ...@@ -245,7 +245,7 @@ public class UsersService extends BaseService {
Tenant newTenant = tenantMapper.queryById(tenantId); Tenant newTenant = tenantMapper.queryById(tenantId);
if (newTenant != null) { if (newTenant != null) {
// if hdfs startup // if hdfs startup
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (PropertyUtils.getResUploadStartupState()){
String newTenantCode = newTenant.getTenantCode(); String newTenantCode = newTenant.getTenantCode();
String oldResourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + oldTenant.getTenantCode() + "/resources"; String oldResourcePath = HadoopUtils.getHdfsDataBasePath() + "/" + oldTenant.getTenantCode() + "/resources";
String oldUdfsPath = HadoopUtils.getHdfsUdfDir(oldTenant.getTenantCode()); String oldUdfsPath = HadoopUtils.getHdfsUdfDir(oldTenant.getTenantCode());
...@@ -308,7 +308,7 @@ public class UsersService extends BaseService { ...@@ -308,7 +308,7 @@ public class UsersService extends BaseService {
User user = userMapper.queryTenantCodeByUserId(id); User user = userMapper.queryTenantCodeByUserId(id);
if (PropertyUtils.getBoolean(cn.escheduler.common.Constants.HDFS_STARTUP_STATE)){ if (PropertyUtils.getResUploadStartupState()){
String userPath = HadoopUtils.getHdfsDataBasePath() + "/" + user.getTenantCode() + "/home/" + id; String userPath = HadoopUtils.getHdfsDataBasePath() + "/" + user.getTenantCode() + "/home/" + id;
HadoopUtils.getInstance().delete(userPath, true); HadoopUtils.getInstance().delete(userPath, true);
......
...@@ -60,6 +60,23 @@ public final class Constants { ...@@ -60,6 +60,23 @@ public final class Constants {
*/ */
public static final String FS_DEFAULTFS = "fs.defaultFS"; public static final String FS_DEFAULTFS = "fs.defaultFS";
/**
* fs s3a endpoint
*/
public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint";
/**
* fs s3a access key
*/
public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key";
/**
* fs s3a secret key
*/
public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key";
/** /**
* yarn.resourcemanager.ha.rm.idsfs.defaultFS * yarn.resourcemanager.ha.rm.idsfs.defaultFS
*/ */
...@@ -123,9 +140,9 @@ public final class Constants { ...@@ -123,9 +140,9 @@ public final class Constants {
public static final String DEVELOPMENT_STATE = "development.state"; public static final String DEVELOPMENT_STATE = "development.state";
/** /**
* hdfs.startup.state * res.upload.startup.type
*/ */
public static final String HDFS_STARTUP_STATE = "hdfs.startup.state"; public static final String RES_UPLOAD_STARTUP_TYPE = "res.upload.startup.type";
/** /**
* zookeeper quorum * zookeeper quorum
......
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.common.enums;
/**
* data base types
*/
public enum ResUploadType {
/**
* 0 hdfs
* 1 s3
* 2 none
*/
HDFS,S3,NONE
}
...@@ -18,6 +18,7 @@ package cn.escheduler.common.utils; ...@@ -18,6 +18,7 @@ package cn.escheduler.common.utils;
import cn.escheduler.common.Constants; import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.ExecutionStatus; import cn.escheduler.common.enums.ExecutionStatus;
import cn.escheduler.common.enums.ResUploadType;
import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONException; import com.alibaba.fastjson.JSONException;
import com.alibaba.fastjson.JSONObject; import com.alibaba.fastjson.JSONObject;
...@@ -40,6 +41,7 @@ import java.util.stream.Stream; ...@@ -40,6 +41,7 @@ import java.util.stream.Stream;
import static cn.escheduler.common.Constants.*; import static cn.escheduler.common.Constants.*;
import static cn.escheduler.common.utils.PropertyUtils.*; import static cn.escheduler.common.utils.PropertyUtils.*;
import static cn.escheduler.common.utils.PropertyUtils.getString;
/** /**
* hadoop utils * hadoop utils
...@@ -94,48 +96,61 @@ public class HadoopUtils implements Closeable { ...@@ -94,48 +96,61 @@ public class HadoopUtils implements Closeable {
try { try {
configuration = new Configuration(); configuration = new Configuration();
if (getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE)){ String resUploadStartupType = PropertyUtils.getString(Constants.RES_UPLOAD_STARTUP_TYPE);
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF, ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH));
configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); if (resUploadType == ResUploadType.HDFS){
UserGroupInformation.setConfiguration(configuration); if (getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE)){
UserGroupInformation.loginUserFromKeytab(getString(Constants.LOGIN_USER_KEY_TAB_USERNAME), System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF,
getString(Constants.LOGIN_USER_KEY_TAB_PATH)); getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH));
} configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(getString(Constants.LOGIN_USER_KEY_TAB_USERNAME),
getString(Constants.LOGIN_USER_KEY_TAB_PATH));
}
String defaultFS = configuration.get(FS_DEFAULTFS); String defaultFS = configuration.get(FS_DEFAULTFS);
//first get key from core-site.xml hdfs-site.xml ,if null ,then try to get from properties file //first get key from core-site.xml hdfs-site.xml ,if null ,then try to get from properties file
// the default is the local file system // the default is the local file system
if(defaultFS.startsWith("file")){ if(defaultFS.startsWith("file")){
String defaultFSProp = getString(FS_DEFAULTFS); String defaultFSProp = getString(FS_DEFAULTFS);
if(StringUtils.isNotBlank(defaultFSProp)){ if(StringUtils.isNotBlank(defaultFSProp)){
Map<String, String> fsRelatedProps = getPrefixedProperties("fs."); Map<String, String> fsRelatedProps = getPrefixedProperties("fs.");
configuration.set(FS_DEFAULTFS,defaultFSProp); configuration.set(FS_DEFAULTFS,defaultFSProp);
fsRelatedProps.entrySet().stream().forEach(entry -> configuration.set(entry.getKey(), entry.getValue())); fsRelatedProps.entrySet().stream().forEach(entry -> configuration.set(entry.getKey(), entry.getValue()));
}else{
logger.error("property:{} can not to be empty, please set!");
throw new RuntimeException("property:{} can not to be empty, please set!");
}
}else{ }else{
logger.error("property:{} can not to be empty, please set!"); logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", FS_DEFAULTFS, defaultFS);
throw new RuntimeException("property:{} can not to be empty, please set!");
} }
}else{
logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", FS_DEFAULTFS, defaultFS);
}
if (fs == null) { if (fs == null) {
if(StringUtils.isNotEmpty(hdfsUser)){ if(StringUtils.isNotEmpty(hdfsUser)){
//UserGroupInformation ugi = UserGroupInformation.createProxyUser(hdfsUser,UserGroupInformation.getLoginUser()); //UserGroupInformation ugi = UserGroupInformation.createProxyUser(hdfsUser,UserGroupInformation.getLoginUser());
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser); UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser);
ugi.doAs(new PrivilegedExceptionAction<Boolean>() { ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
@Override @Override
public Boolean run() throws Exception { public Boolean run() throws Exception {
fs = FileSystem.get(configuration); fs = FileSystem.get(configuration);
return true; return true;
} }
}); });
}else{ }else{
logger.warn("hdfs.root.user is not set value!"); logger.warn("hdfs.root.user is not set value!");
fs = FileSystem.get(configuration); fs = FileSystem.get(configuration);
}
} }
}else if (resUploadType == ResUploadType.S3){
configuration.set(FS_DEFAULTFS,getString(FS_DEFAULTFS));
configuration.set(FS_S3A_ENDPOINT,getString(FS_S3A_ENDPOINT));
configuration.set(FS_S3A_ACCESS_KEY,getString(FS_S3A_ACCESS_KEY));
configuration.set(FS_S3A_SECRET_KEY,getString(FS_S3A_SECRET_KEY));
fs = FileSystem.get(configuration);
} }
String rmHaIds = getString(YARN_RESOURCEMANAGER_HA_RM_IDS); String rmHaIds = getString(YARN_RESOURCEMANAGER_HA_RM_IDS);
String appAddress = getString(Constants.YARN_APPLICATION_STATUS_ADDRESS); String appAddress = getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
if (!StringUtils.isEmpty(rmHaIds)) { if (!StringUtils.isEmpty(rmHaIds)) {
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
*/ */
package cn.escheduler.common.utils; package cn.escheduler.common.utils;
import cn.escheduler.common.Constants;
import cn.escheduler.common.enums.ResUploadType;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
...@@ -65,11 +67,15 @@ public class PropertyUtils { ...@@ -65,11 +67,15 @@ public class PropertyUtils {
} }
} }
/* /**
public static PropertyUtils getInstance(){ * judge whether resource upload startup
return propertyUtils; * @return
*/
public static Boolean getResUploadStartupState(){
String resUploadStartupType = PropertyUtils.getString(Constants.RES_UPLOAD_STARTUP_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
return resUploadType == ResUploadType.HDFS || resUploadType == ResUploadType.S3;
} }
*/
/** /**
* get property value * get property value
......
...@@ -16,8 +16,8 @@ hdfs.root.user=hdfs ...@@ -16,8 +16,8 @@ hdfs.root.user=hdfs
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended # data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended
data.store2hdfs.basepath=/escheduler data.store2hdfs.basepath=/escheduler
# whether hdfs starts # resource upload startup type : HDFS,S3,NONE
hdfs.startup.state=false res.upload.startup.type=NONE
# whether kerberos starts # whether kerberos starts
hadoop.security.authentication.startup.state=false hadoop.security.authentication.startup.state=false
......
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory # ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
# to the conf directory,support s3,for example : s3a://escheduler
fs.defaultFS=hdfs://mycluster:8020 fs.defaultFS=hdfs://mycluster:8020
# s3 need,s3 endpoint
fs.s3a.endpoint=http://192.168.199.91:9010
# s3 need,s3 access key
fs.s3a.access.key=A3DXS30FO22544RE
# s3 need,s3 secret key
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
#resourcemanager ha note this need ips , this empty if single #resourcemanager ha note this need ips , this empty if single
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
......
...@@ -110,14 +110,17 @@ xlsFilePath="/tmp/xls" ...@@ -110,14 +110,17 @@ xlsFilePath="/tmp/xls"
#是否启动监控自启动脚本 #是否启动监控自启动脚本
monitorServerState="false" monitorServerState="false"
# hadoop 配置 # 资源中心上传选择存储方式:HDFS,S3,NONE
# 是否启动hdfs,如果启动则为true,需要配置以下hadoop相关参数; resUploadStartupType="NONE"
# 不启动设置为false,如果为false,以下配置不需要修改
# 特别注意:如果启动hdfs,需要自行创建hdfs根路径,也就是install.sh中的 hdfsPath
hdfsStartupSate="false"
# namenode地址,支持HA,需要将core-site.xml和hdfs-site.xml放到conf目录下 # 如果resUploadStartupType为HDFS,defaultFS写namenode地址,支持HA,需要将core-site.xml和hdfs-site.xml放到conf目录下
namenodeFs="hdfs://mycluster:8020" # 如果是S3,则写S3地址,比如说:s3a://escheduler,注意,一定要创建根目录/escheduler
defaultFS="hdfs://mycluster:8020"
# 如果配置了S3,则需要有以下配置
s3Endpoint="http://192.168.199.91:9010"
s3AccessKey="A3DXS30FO22544RE"
s3SecretKey="OloCLq3n+8+sdPHUhJ21XrSxTC+JK"
# resourcemanager HA配置,如果是单resourcemanager,这里为空即可 # resourcemanager HA配置,如果是单resourcemanager,这里为空即可
yarnHaIps="192.168.xx.xx,192.168.xx.xx" yarnHaIps="192.168.xx.xx,192.168.xx.xx"
...@@ -273,7 +276,10 @@ sed -i ${txt} "s#org.quartz.dataSource.myDs.user.*#org.quartz.dataSource.myDs.us ...@@ -273,7 +276,10 @@ sed -i ${txt} "s#org.quartz.dataSource.myDs.user.*#org.quartz.dataSource.myDs.us
sed -i ${txt} "s#org.quartz.dataSource.myDs.password.*#org.quartz.dataSource.myDs.password=${mysqlPassword}#g" conf/quartz.properties sed -i ${txt} "s#org.quartz.dataSource.myDs.password.*#org.quartz.dataSource.myDs.password=${mysqlPassword}#g" conf/quartz.properties
sed -i ${txt} "s#fs.defaultFS.*#fs.defaultFS=${namenodeFs}#g" conf/common/hadoop/hadoop.properties sed -i ${txt} "s#fs.defaultFS.*#fs.defaultFS=${defaultFS}#g" conf/common/hadoop/hadoop.properties
sed -i ${txt} "s#fs.s3a.endpoint.*#fs.s3a.endpoint=${s3Endpoint}#g" conf/common/hadoop/hadoop.properties
sed -i ${txt} "s#fs.s3a.access.key.*#fs.s3a.access.key=${s3AccessKey}#g" conf/common/hadoop/hadoop.properties
sed -i ${txt} "s#fs.s3a.secret.key.*#fs.s3a.secret.key=${s3SecretKey}#g" conf/common/hadoop/hadoop.properties
sed -i ${txt} "s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=${yarnHaIps}#g" conf/common/hadoop/hadoop.properties sed -i ${txt} "s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=${yarnHaIps}#g" conf/common/hadoop/hadoop.properties
sed -i ${txt} "s#yarn.application.status.address.*#yarn.application.status.address=http://${singleYarnIp}:8088/ws/v1/cluster/apps/%s#g" conf/common/hadoop/hadoop.properties sed -i ${txt} "s#yarn.application.status.address.*#yarn.application.status.address=http://${singleYarnIp}:8088/ws/v1/cluster/apps/%s#g" conf/common/hadoop/hadoop.properties
...@@ -283,7 +289,7 @@ sed -i ${txt} "s#data.download.basedir.path.*#data.download.basedir.path=${downl ...@@ -283,7 +289,7 @@ sed -i ${txt} "s#data.download.basedir.path.*#data.download.basedir.path=${downl
sed -i ${txt} "s#process.exec.basepath.*#process.exec.basepath=${execPath}#g" conf/common/common.properties sed -i ${txt} "s#process.exec.basepath.*#process.exec.basepath=${execPath}#g" conf/common/common.properties
sed -i ${txt} "s#hdfs.root.user.*#hdfs.root.user=${hdfsRootUser}#g" conf/common/common.properties sed -i ${txt} "s#hdfs.root.user.*#hdfs.root.user=${hdfsRootUser}#g" conf/common/common.properties
sed -i ${txt} "s#data.store2hdfs.basepath.*#data.store2hdfs.basepath=${hdfsPath}#g" conf/common/common.properties sed -i ${txt} "s#data.store2hdfs.basepath.*#data.store2hdfs.basepath=${hdfsPath}#g" conf/common/common.properties
sed -i ${txt} "s#hdfs.startup.state.*#hdfs.startup.state=${hdfsStartupSate}#g" conf/common/common.properties sed -i ${txt} "s#res.upload.startup.type.*#res.upload.startup.type=${resUploadStartupType}#g" conf/common/common.properties
sed -i ${txt} "s#escheduler.env.path.*#escheduler.env.path=${shellEnvPath}#g" conf/common/common.properties sed -i ${txt} "s#escheduler.env.path.*#escheduler.env.path=${shellEnvPath}#g" conf/common/common.properties
sed -i ${txt} "s#resource.view.suffixs.*#resource.view.suffixs=${resSuffixs}#g" conf/common/common.properties sed -i ${txt} "s#resource.view.suffixs.*#resource.view.suffixs=${resSuffixs}#g" conf/common/common.properties
sed -i ${txt} "s#development.state.*#development.state=${devState}#g" conf/common/common.properties sed -i ${txt} "s#development.state.*#development.state=${devState}#g" conf/common/common.properties
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册