提交 0583d321 编写于 作者: Z zhourui

storage添加hadoop

上级 f8234f44
...@@ -366,6 +366,10 @@ ...@@ -366,6 +366,10 @@
<groupId>com.github.whvcse</groupId> <groupId>com.github.whvcse</groupId>
<artifactId>easy-captcha</artifactId> <artifactId>easy-captcha</artifactId>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
</dependency>
</dependencies> </dependencies>
<build> <build>
...@@ -410,7 +414,7 @@ ...@@ -410,7 +414,7 @@
</compilerArgs> </compilerArgs>
</configuration> </configuration>
</plugin> </plugin>
<!--This plugin's configuration is used to store Eclipse m2e settings <!--This plugin's configuration is used to store Eclipse m2e settings
only. It has no influence on the Maven build itself. --> only. It has no influence on the Maven build itself. -->
<plugin> <plugin>
<groupId>org.eclipse.m2e</groupId> <groupId>org.eclipse.m2e</groupId>
...@@ -854,7 +858,7 @@ ...@@ -854,7 +858,7 @@
<artifactId>javapoet</artifactId> <artifactId>javapoet</artifactId>
<version>1.11.1</version> <version>1.11.1</version>
</dependency> </dependency>
<!--dependency> <groupId>javax.visrec</groupId> <artifactId>visrec-api</artifactId> <!--dependency> <groupId>javax.visrec</groupId> <artifactId>visrec-api</artifactId>
<version>20200316</version> </dependency --> <version>20200316</version> </dependency -->
<dependency> <dependency>
<groupId>com.github.neuroph</groupId> <groupId>com.github.neuroph</groupId>
...@@ -886,7 +890,7 @@ ...@@ -886,7 +890,7 @@
<artifactId>slf4j-api</artifactId> <artifactId>slf4j-api</artifactId>
<version>1.7.25</version> <version>1.7.25</version>
</dependency> </dependency>
<!-- dependency> <groupId>cglib</groupId> <artifactId>cglib</artifactId> <!-- dependency> <groupId>cglib</groupId> <artifactId>cglib</artifactId>
<version>3.2.0</version> </dependency --> <version>3.2.0</version> </dependency -->
<dependency> <dependency>
<groupId>com.alibaba</groupId> <groupId>com.alibaba</groupId>
...@@ -1093,6 +1097,11 @@ ...@@ -1093,6 +1097,11 @@
<artifactId>easy-captcha</artifactId> <artifactId>easy-captcha</artifactId>
<version>1.6.2</version> <version>1.6.2</version>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>3.3.1</version>
</dependency>
</dependencies> </dependencies>
</dependencyManagement> </dependencyManagement>
<repositories> <repositories>
......
...@@ -2,8 +2,10 @@ package com.x.base.core.entity; ...@@ -2,8 +2,10 @@ package com.x.base.core.entity;
import java.io.ByteArrayInputStream; import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.FileNotFoundException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder; import java.net.URLEncoder;
import java.util.Date; import java.util.Date;
import java.util.Objects; import java.util.Objects;
...@@ -33,18 +35,18 @@ import com.x.base.core.project.tools.DefaultCharset; ...@@ -33,18 +35,18 @@ import com.x.base.core.project.tools.DefaultCharset;
@MappedSuperclass @MappedSuperclass
public abstract class StorageObject extends SliceJpaObject { public abstract class StorageObject extends SliceJpaObject {
private static FileSystemManager FILESYSTEMANAGERINSTANCE; private static FileSystemManager fileSystemManagerInstance;
private synchronized FileSystemManager getFileSystemManager() throws Exception { private synchronized FileSystemManager getFileSystemManager() throws FileSystemException {
if (FILESYSTEMANAGERINSTANCE == null) { if (fileSystemManagerInstance == null) {
StandardFileSystemManager fs = new StandardFileSystemManager(); StandardFileSystemManager fs = new StandardFileSystemManager();
fs.setFilesCache(new NullFilesCache()); fs.setFilesCache(new NullFilesCache());
fs.setCacheStrategy(CacheStrategy.ON_RESOLVE); fs.setCacheStrategy(CacheStrategy.ON_RESOLVE);
fs.init(); fs.init();
FILESYSTEMANAGERINSTANCE = fs; fileSystemManagerInstance = fs;
} }
return FILESYSTEMANAGERINSTANCE; return fileSystemManagerInstance;
} }
private static final long serialVersionUID = 7823729771901802653L; private static final long serialVersionUID = 7823729771901802653L;
...@@ -53,33 +55,35 @@ public abstract class StorageObject extends SliceJpaObject { ...@@ -53,33 +55,35 @@ public abstract class StorageObject extends SliceJpaObject {
public static final String DELETE_OPERATE = "delete"; public static final String DELETE_OPERATE = "delete";
abstract public String path() throws Exception; public abstract String path() throws Exception;
abstract public String getStorage(); public abstract String getStorage();
abstract public void setStorage(String storage); public abstract void setStorage(String storage);
abstract public Long getLength(); public abstract Long getLength();
abstract public void setLength(Long length); public abstract void setLength(Long length);
abstract public String getName(); public abstract String getName();
abstract public void setName(String name); public abstract void setName(String name);
abstract public String getExtension(); public abstract String getExtension();
abstract public void setExtension(String extension); public abstract void setExtension(String extension);
abstract public Date getLastUpdateTime(); public abstract Date getLastUpdateTime();
abstract public void setLastUpdateTime(Date lastUpdateTime); public abstract void setLastUpdateTime(Date lastUpdateTime);
abstract public Boolean getDeepPath(); public abstract Boolean getDeepPath();
abstract public void setDeepPath(Boolean deepPath); public abstract void setDeepPath(Boolean deepPath);
public String path(String operate) throws Exception { return this.path();} public String path(String operate) throws Exception {
return this.path();
}
@Transient @Transient
private byte[] bytes; private byte[] bytes;
...@@ -148,14 +152,24 @@ public abstract class StorageObject extends SliceJpaObject { ...@@ -148,14 +152,24 @@ public abstract class StorageObject extends SliceJpaObject {
/** 更新Content内容 */ /** 更新Content内容 */
public Long updateContent(StorageMapping mapping, byte[] bytes) throws Exception { public Long updateContent(StorageMapping mapping, byte[] bytes) throws Exception {
long length = -1L;
FileSystemManager manager = this.getFileSystemManager(); if (Objects.equals(StorageProtocol.hdfs, mapping.getProtocol())) {
return this.hdfsUpdateContent(mapping, bytes);
} else {
return this.vfsUpdateContent(mapping, bytes);
}
}
private Long vfsUpdateContent(StorageMapping mapping, byte[] bytes) throws Exception {
String prefix = this.getPrefix(mapping); String prefix = this.getPrefix(mapping);
String path = this.path(); String path = this.path();
if (StringUtils.isEmpty(path)) { if (StringUtils.isEmpty(path)) {
throw new Exception("path can not be empty."); throw new IllegalStateException("path can not be empty.");
} }
FileSystemOptions options = this.getOptions(mapping); FileSystemOptions options = this.getOptions(mapping);
long length = -1L;
FileSystemManager manager = this.getFileSystemManager();
/* /*
* 需要进行两次判断,在前端使用nginx分发的情况下,可能同时触发多个文件的上传,多个文件同时上传可能会同时创建文件的存储目录,会在后台导致错误 * 需要进行两次判断,在前端使用nginx分发的情况下,可能同时触发多个文件的上传,多个文件同时上传可能会同时创建文件的存储目录,会在后台导致错误
* org.apache.commons.vfs2.FileSystemException: Could not create folder * org.apache.commons.vfs2.FileSystemException: Could not create folder
...@@ -185,6 +199,23 @@ public abstract class StorageObject extends SliceJpaObject { ...@@ -185,6 +199,23 @@ public abstract class StorageObject extends SliceJpaObject {
return length; return length;
} }
private long hdfsUpdateContent(StorageMapping mapping, byte[] bytes) throws Exception {
try (org.apache.hadoop.fs.FileSystem fileSystem = org.apache.hadoop.fs.FileSystem
.get(hdfsConfiguration(mapping))) {
org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(getPrefix(mapping), this.path());
if (fileSystem.exists(path)) {
fileSystem.delete(path, false);
}
try (org.apache.hadoop.fs.FSDataOutputStream out = fileSystem.create(path)) {
out.write(bytes);
this.setStorage(mapping.getName());
this.setLastUpdateTime(new Date());
this.setLength((long) bytes.length);
}
}
return bytes.length;
}
/** 读出内容 */ /** 读出内容 */
public byte[] readContent(StorageMapping mapping) throws Exception { public byte[] readContent(StorageMapping mapping) throws Exception {
try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) { try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
...@@ -195,6 +226,15 @@ public abstract class StorageObject extends SliceJpaObject { ...@@ -195,6 +226,15 @@ public abstract class StorageObject extends SliceJpaObject {
/** 将内容流出到output */ /** 将内容流出到output */
public Long readContent(StorageMapping mapping, OutputStream output) throws Exception { public Long readContent(StorageMapping mapping, OutputStream output) throws Exception {
if (Objects.equals(mapping.getProtocol(), StorageProtocol.hdfs)) {
return hdfsReadContent(mapping, output);
} else {
return vfsReadContent(mapping, output);
}
}
// vfs读取数据
private Long vfsReadContent(StorageMapping mapping, OutputStream output) throws Exception {
long length = -1L; long length = -1L;
FileSystemManager manager = this.getFileSystemManager(); FileSystemManager manager = this.getFileSystemManager();
String prefix = this.getPrefix(mapping); String prefix = this.getPrefix(mapping);
...@@ -206,9 +246,9 @@ public abstract class StorageObject extends SliceJpaObject { ...@@ -206,9 +246,9 @@ public abstract class StorageObject extends SliceJpaObject {
length = IOUtils.copyLarge(input, output); length = IOUtils.copyLarge(input, output);
} }
} else { } else {
throw new Exception(fo.getPublicURIString() + " not existed, object:" + this.toString() + "."); throw new FileNotFoundException(
fo.getPublicURIString() + " not existed, object:" + this.toString() + ".");
} }
// manager.closeFileSystem(fo.getFileSystem());
if (!Objects.equals(StorageProtocol.webdav, mapping.getProtocol())) { if (!Objects.equals(StorageProtocol.webdav, mapping.getProtocol())) {
/* webdav关闭会试图去关闭commons.httpClient */ /* webdav关闭会试图去关闭commons.httpClient */
manager.closeFileSystem(fo.getFileSystem()); manager.closeFileSystem(fo.getFileSystem());
...@@ -217,22 +257,74 @@ public abstract class StorageObject extends SliceJpaObject { ...@@ -217,22 +257,74 @@ public abstract class StorageObject extends SliceJpaObject {
return length; return length;
} }
// hadoop读取数据
private Long hdfsReadContent(StorageMapping mapping, OutputStream output) throws Exception {
long length = -1L;
try (org.apache.hadoop.fs.FileSystem fileSystem = org.apache.hadoop.fs.FileSystem
.get(hdfsConfiguration(mapping))) {
org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(getPrefix(mapping), this.path());
if (fileSystem.exists(path)) {
try (org.apache.hadoop.fs.FSDataInputStream inputStream = fileSystem.open(path)) {
length = IOUtils.copyLarge(inputStream, output);
}
} else {
throw new FileNotFoundException(path + " not existed, object:" + this.toString() + ".");
}
}
return length;
}
/** 检查是否存在内容 */ /** 检查是否存在内容 */
public boolean existContent(StorageMapping mapping) throws Exception { public boolean existContent(StorageMapping mapping) throws Exception {
if (Objects.equals(mapping.getProtocol(), StorageProtocol.hdfs)) {
return hdfsExistContent(mapping);
} else {
return vfsExistContent(mapping);
}
}
private boolean vfsExistContent(StorageMapping mapping) throws Exception {
FileSystemManager manager = this.getFileSystemManager(); FileSystemManager manager = this.getFileSystemManager();
String prefix = this.getPrefix(mapping); String prefix = this.getPrefix(mapping);
String path = this.path(); String path = this.path();
FileSystemOptions options = this.getOptions(mapping); FileSystemOptions options = this.getOptions(mapping);
try (FileObject fo = manager.resolveFile(prefix + PATHSEPARATOR + path, options)) { try (FileObject fo = manager.resolveFile(prefix + PATHSEPARATOR + path, options)) {
if (fo.exists() && fo.isFile()) { return (fo.exists() && fo.isFile());
return true; }
}
private boolean hdfsExistContent(StorageMapping mapping) throws Exception {
try (org.apache.hadoop.fs.FileSystem fileSystem = org.apache.hadoop.fs.FileSystem
.get(hdfsConfiguration(mapping))) {
org.apache.hadoop.fs.Path path = fileSystem.getHomeDirectory();
if (StringUtils.isNotEmpty(mapping.getPrefix())) {
path = new org.apache.hadoop.fs.Path(path, mapping.getPrefix());
} }
return false; path = new org.apache.hadoop.fs.Path(path, this.path());
return fileSystem.exists(path);
} }
} }
/** 删除内容,同时判断上一级目录(只判断一级)是否为空,为空则删除上一级目录 */
public void deleteContent(StorageMapping mapping) throws Exception { public void deleteContent(StorageMapping mapping) throws Exception {
if (Objects.equals(mapping.getProtocol(), StorageProtocol.hdfs)) {
hdfsDeleteContent(mapping);
} else {
vfsDeleteContent(mapping);
}
}
private void hdfsDeleteContent(StorageMapping mapping) throws Exception {
try (org.apache.hadoop.fs.FileSystem fileSystem = org.apache.hadoop.fs.FileSystem
.get(hdfsConfiguration(mapping))) {
org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(getPrefix(mapping), this.path());
if (fileSystem.exists(path)) {
fileSystem.delete(path, false);
}
}
}
// 删除内容,同时判断上一级目录(只判断一级)是否为空,为空则删除上一级目录
private void vfsDeleteContent(StorageMapping mapping) throws Exception {
FileSystemManager manager = this.getFileSystemManager(); FileSystemManager manager = this.getFileSystemManager();
String prefix = this.getPrefix(mapping); String prefix = this.getPrefix(mapping);
String path = this.path(DELETE_OPERATE); String path = this.path(DELETE_OPERATE);
...@@ -242,64 +334,57 @@ public abstract class StorageObject extends SliceJpaObject { ...@@ -242,64 +334,57 @@ public abstract class StorageObject extends SliceJpaObject {
fo.delete(); fo.delete();
if ((!StringUtils.startsWith(path, PATHSEPARATOR)) && (StringUtils.contains(path, PATHSEPARATOR))) { if ((!StringUtils.startsWith(path, PATHSEPARATOR)) && (StringUtils.contains(path, PATHSEPARATOR))) {
FileObject parent = fo.getParent(); FileObject parent = fo.getParent();
if ((null != parent) && parent.exists() && parent.isFolder()) { if ((null != parent) && parent.exists() && parent.isFolder()
if (parent.getChildren().length == 0) { && (parent.getChildren().length == 0)) {
parent.delete(); parent.delete();
}
} }
} }
} }
// manager.closeFileSystem(fo.getFileSystem());
if (!Objects.equals(StorageProtocol.webdav, mapping.getProtocol())) { if (!Objects.equals(StorageProtocol.webdav, mapping.getProtocol())) {
/* webdav关闭会试图去关闭commons.httpClient */ // webdav关闭会试图去关闭commons.httpClient
manager.closeFileSystem(fo.getFileSystem()); manager.closeFileSystem(fo.getFileSystem());
} }
} }
} }
/* 取得完整访问路径的前半部分 */ // 取得完整访问路径的前半部分
private String getPrefix(StorageMapping mapping) throws Exception { private String getPrefix(StorageMapping mapping) throws IllegalStateException, UnsupportedEncodingException {
String prefix = ""; String prefix = "";
if (null == mapping.getProtocol()) { if (null == mapping.getProtocol()) {
throw new Exception("storage protocol is null."); throw new IllegalStateException("storage protocol is null.");
} }
switch (mapping.getProtocol()) { switch (mapping.getProtocol()) {
// bzip2,file, ftp, ftps, gzip, hdfs, http, https, jar, ram, res, sftp,
// tar, temp, webdav, zip, cifs, mime;
case ftp: case ftp:
// ftp://[ username[: password]@] hostname[: port][ relative-path]
prefix = "ftp://" + URLEncoder.encode(mapping.getUsername(), DefaultCharset.name) + ":" prefix = "ftp://" + URLEncoder.encode(mapping.getUsername(), DefaultCharset.name) + ":"
+ URLEncoder.encode(mapping.getPassword(), DefaultCharset.name) + "@" + mapping.getHost() + ":" + URLEncoder.encode(mapping.getPassword(), DefaultCharset.name) + "@" + mapping.getHost() + ":"
+ mapping.getPort(); + mapping.getPort();
break; break;
case ftps: case ftps:
// ftps://[ username[: password]@] hostname[: port][ relative-path]
prefix = "ftps://" + URLEncoder.encode(mapping.getUsername(), DefaultCharset.name) + ":" prefix = "ftps://" + URLEncoder.encode(mapping.getUsername(), DefaultCharset.name) + ":"
+ URLEncoder.encode(mapping.getPassword(), DefaultCharset.name) + "@" + mapping.getHost() + ":" + URLEncoder.encode(mapping.getPassword(), DefaultCharset.name) + "@" + mapping.getHost() + ":"
+ mapping.getPort(); + mapping.getPort();
break; break;
case sftp: case sftp:
// ftps://[ username[: password]@] hostname[: port][ relative-path]
prefix = "sftp://" + URLEncoder.encode(mapping.getUsername(), DefaultCharset.name) + ":" prefix = "sftp://" + URLEncoder.encode(mapping.getUsername(), DefaultCharset.name) + ":"
+ URLEncoder.encode(mapping.getPassword(), DefaultCharset.name) + "@" + mapping.getHost() + ":" + URLEncoder.encode(mapping.getPassword(), DefaultCharset.name) + "@" + mapping.getHost() + ":"
+ mapping.getPort(); + mapping.getPort();
break; break;
case cifs: case cifs:
// smb://[ username[: password]@] hostname[: port][ absolute-path]
prefix = "smb://" + URLEncoder.encode(mapping.getUsername(), DefaultCharset.name) + ":" prefix = "smb://" + URLEncoder.encode(mapping.getUsername(), DefaultCharset.name) + ":"
+ URLEncoder.encode(mapping.getPassword(), DefaultCharset.name) + "@" + mapping.getHost() + ":" + URLEncoder.encode(mapping.getPassword(), DefaultCharset.name) + "@" + mapping.getHost() + ":"
+ mapping.getPort(); + mapping.getPort();
break; break;
case webdav: case webdav:
// webdav://[ username[: password]@] hostname[: port][ absolute-path]
prefix = "webdav://" + URLEncoder.encode(mapping.getUsername(), DefaultCharset.name) + ":" prefix = "webdav://" + URLEncoder.encode(mapping.getUsername(), DefaultCharset.name) + ":"
+ URLEncoder.encode(mapping.getPassword(), DefaultCharset.name) + "@" + mapping.getHost() + ":" + URLEncoder.encode(mapping.getPassword(), DefaultCharset.name) + "@" + mapping.getHost() + ":"
+ mapping.getPort(); + mapping.getPort();
break; break;
case file: case file:
// [file://] absolute-path
prefix = "file://"; prefix = "file://";
break; break;
case hdfs:
// 路径不采用带用户名的homeDirctory,直接返回
return StringUtils.isEmpty(mapping.getPrefix()) ? "/" : ("/" + mapping.getPrefix());
default: default:
break; break;
} }
...@@ -309,11 +394,9 @@ public abstract class StorageObject extends SliceJpaObject { ...@@ -309,11 +394,9 @@ public abstract class StorageObject extends SliceJpaObject {
private FileSystemOptions getOptions(StorageMapping mapping) throws Exception { private FileSystemOptions getOptions(StorageMapping mapping) throws Exception {
FileSystemOptions opts = new FileSystemOptions(); FileSystemOptions opts = new FileSystemOptions();
if (null == mapping.getProtocol()) { if (null == mapping.getProtocol()) {
throw new Exception("storage protocol is null."); throw new IllegalStateException("storage protocol is null.");
} }
switch (mapping.getProtocol()) { switch (mapping.getProtocol()) {
// bzip2,file, ftp, ftps, gzip, hdfs, http, https, jar, ram, res, sftp,
// tar, temp, webdav, zip, cifs, mime;
case sftp: case sftp:
FtpFileSystemConfigBuilder sftpBuilder = FtpFileSystemConfigBuilder.getInstance(); FtpFileSystemConfigBuilder sftpBuilder = FtpFileSystemConfigBuilder.getInstance();
sftpBuilder.setPassiveMode(opts, Config.vfs().getSftp().getPassive()); sftpBuilder.setPassiveMode(opts, Config.vfs().getSftp().getPassive());
...@@ -341,9 +424,10 @@ public abstract class StorageObject extends SliceJpaObject { ...@@ -341,9 +424,10 @@ public abstract class StorageObject extends SliceJpaObject {
* java.net.Socket.connect(Socket.java:589) * java.net.Socket.connect(Socket.java:589)
*/ */
ftpBuilder.setPassiveMode(opts, Config.vfs().getFtp().getPassive()); ftpBuilder.setPassiveMode(opts, Config.vfs().getFtp().getPassive());
// builder.setPassiveMode(opts, false); /*
// builder.setPassiveMode(opts, true); * builder.setPassiveMode(opts, false);
/** 强制不校验IP */ */
// 强制不校验IP
ftpBuilder.setRemoteVerification(opts, false); ftpBuilder.setRemoteVerification(opts, false);
// FtpFileType.BINARY is the default // FtpFileType.BINARY is the default
ftpBuilder.setFileType(opts, FtpFileType.BINARY); ftpBuilder.setFileType(opts, FtpFileType.BINARY);
...@@ -354,7 +438,7 @@ public abstract class StorageObject extends SliceJpaObject { ...@@ -354,7 +438,7 @@ public abstract class StorageObject extends SliceJpaObject {
case ftps: case ftps:
FtpsFileSystemConfigBuilder ftpsBuilder = FtpsFileSystemConfigBuilder.getInstance(); FtpsFileSystemConfigBuilder ftpsBuilder = FtpsFileSystemConfigBuilder.getInstance();
ftpsBuilder.setPassiveMode(opts, Config.vfs().getFtp().getPassive()); ftpsBuilder.setPassiveMode(opts, Config.vfs().getFtp().getPassive());
/** 强制不校验IP */ // 强制不校验IP
ftpsBuilder.setRemoteVerification(opts, false); ftpsBuilder.setRemoteVerification(opts, false);
// FtpFileType.BINARY is the default // FtpFileType.BINARY is the default
ftpsBuilder.setFileType(opts, FtpFileType.BINARY); ftpsBuilder.setFileType(opts, FtpFileType.BINARY);
...@@ -373,7 +457,6 @@ public abstract class StorageObject extends SliceJpaObject { ...@@ -373,7 +457,6 @@ public abstract class StorageObject extends SliceJpaObject {
webdavBuilder.setMaxConnectionsPerHost(opts, 200); webdavBuilder.setMaxConnectionsPerHost(opts, 200);
webdavBuilder.setMaxTotalConnections(opts, 200); webdavBuilder.setMaxTotalConnections(opts, 200);
webdavBuilder.setFollowRedirect(opts, true); webdavBuilder.setFollowRedirect(opts, true);
// webdavBuilder.setVersioning(opts, true);
break; break;
case file: case file:
break; break;
...@@ -383,4 +466,16 @@ public abstract class StorageObject extends SliceJpaObject { ...@@ -383,4 +466,16 @@ public abstract class StorageObject extends SliceJpaObject {
return opts; return opts;
} }
private org.apache.hadoop.conf.Configuration hdfsConfiguration(StorageMapping mapping) {
if ((!StringUtils.equals(System.getProperty("HADOOP_USER_NAME"), mapping.getUsername()))
&& StringUtils.isNotBlank(mapping.getUsername())) {
System.setProperty("HADOOP_USER_NAME", mapping.getUsername());
}
org.apache.hadoop.conf.Configuration configuration = new org.apache.hadoop.conf.Configuration();
configuration.set("fs.default.name",
StorageProtocol.hdfs + "://" + mapping.getHost() + ":" + mapping.getPort());
configuration.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
return configuration;
}
} }
package com.x.base.core.entity; package com.x.base.core.entity;
public enum StorageProtocol { public enum StorageProtocol {
ftp, ftps, webdav, cifs, file, sftp; ftp, ftps, webdav, cifs, file, sftp, hdfs;
public static final int length = JpaObject.length_16B; public static final int length = JpaObject.length_16B;
} }
...@@ -106,6 +106,14 @@ public class Config { ...@@ -106,6 +106,14 @@ public class Config {
public static final String DIR_COMMONS_EXT = "commons/ext"; public static final String DIR_COMMONS_EXT = "commons/ext";
public static final String DIR_COMMONS_LANGUAGE = "commons/language"; public static final String DIR_COMMONS_LANGUAGE = "commons/language";
public static final String DIR_COMMONS_FONTS = "commons/fonts"; public static final String DIR_COMMONS_FONTS = "commons/fonts";
public static final String DIR_COMMONS_HADOOP = DIR_COMMONS + "/hadoop";
public static final String DIR_COMMONS_HADOOP_WINDOWS = DIR_COMMONS_HADOOP + "/" + OS_WINDOWS;
public static final String DIR_COMMONS_HADOOP_AIX = DIR_COMMONS + "/ " + OS_AIX;
public static final String DIR_COMMONS_HADOOP_LINUX = DIR_COMMONS + "/" + OS_LINUX;
public static final String DIR_COMMONS_HADOOP_MACOS = DIR_COMMONS + "/" + OS_MACOS;
public static final String DIR_COMMONS_HADOOP_RASPI = DIR_COMMONS + "/" + OS_RASPI;
public static final String DIR_COMMONS_HADOOP_ARM = DIR_COMMONS + "/" + OS_ARM;
public static final String DIR_COMMONS_HADOOP_MIPS = DIR_COMMONS + "/" + OS_MIPS;
public static final String DIR_CONFIG = "config"; public static final String DIR_CONFIG = "config";
public static final String DIR_CONFIG_COVERTOWEBSERVER = "config/coverToWebServer"; public static final String DIR_CONFIG_COVERTOWEBSERVER = "config/coverToWebServer";
public static final String DIR_CONFIGSAMPLE = "configSample"; public static final String DIR_CONFIGSAMPLE = "configSample";
...@@ -114,11 +122,11 @@ public class Config { ...@@ -114,11 +122,11 @@ public class Config {
public static final String DIR_DYNAMIC = "dynamic"; public static final String DIR_DYNAMIC = "dynamic";
public static final String DIR_DYNAMIC_JARS = "dynamic/jars"; public static final String DIR_DYNAMIC_JARS = "dynamic/jars";
public static final String DIR_JVM = "jvm"; public static final String DIR_JVM = "jvm";
public static final String DIR_JVM_AIX = "jvm/aix"; // public static final String DIR_JVM_AIX = "jvm/aix";
public static final String DIR_JVM_LINUX = "jvm/linux"; // public static final String DIR_JVM_LINUX = "jvm/linux";
public static final String DIR_JVM_MACOS = "jvm/macos"; // public static final String DIR_JVM_MACOS = "jvm/macos";
public static final String DIR_JVM_WINDOWS = "jvm/windows"; // public static final String DIR_JVM_WINDOWS = "jvm/windows";
public static final String DIR_JVM_NEOKYLIN_LOONGSON = "jvm/neokylin_loongson"; // public static final String DIR_JVM_NEOKYLIN_LOONGSON = "jvm/neokylin_loongson";
public static final String DIR_LOCAL = "local"; public static final String DIR_LOCAL = "local";
public static final String DIR_LOCAL_BACKUP = "local/backup"; public static final String DIR_LOCAL_BACKUP = "local/backup";
public static final String DIR_LOCAL_UPDATE = "local/update"; public static final String DIR_LOCAL_UPDATE = "local/update";
...@@ -1339,4 +1347,116 @@ public class Config { ...@@ -1339,4 +1347,116 @@ public class Config {
initialContext().rebind(RESOURCE_NODE_PROCESSPLATFORMEXECUTORS, executorServices); initialContext().rebind(RESOURCE_NODE_PROCESSPLATFORMEXECUTORS, executorServices);
} }
public static boolean isWindowsJava8() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_WINDOWS));
}
public static boolean isLinuxJava8() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_LINUX));
}
public static boolean isRaspiJava8() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_RASPI));
}
public static boolean isArmJava8() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_ARM));
}
public static boolean isMipsJava8() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_MIPS));
}
public static boolean isAixJava8() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_AIX));
}
public static boolean isMacosJava8() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_MACOS));
}
public static boolean isWindowsJava11() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_WINDOWS + "_" + JAVAVERSION_JAVA11));
}
public static boolean isLinuxJava11() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_LINUX + "_" + JAVAVERSION_JAVA11));
}
public static boolean isRaspiJava11() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_RASPI + "_" + JAVAVERSION_JAVA11));
}
public static boolean isArmJava11() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_ARM + "_" + JAVAVERSION_JAVA11));
}
public static boolean isMipsJava11() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_MIPS + "_" + JAVAVERSION_JAVA11));
}
public static boolean isAixJava11() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_AIX + "_" + JAVAVERSION_JAVA11));
}
public static boolean isMacosJava11() throws Exception {
return command_java_path().startsWith(dir_jvm().toPath().resolve(OS_MACOS + "_" + JAVAVERSION_JAVA11));
}
public static Path path_commons_hadoop_windows(boolean force) throws Exception {
Path path = Paths.get(base(), DIR_COMMONS_HADOOP_WINDOWS);
if ((!Files.exists(path)) && force) {
Files.createDirectories(path);
}
return path;
}
public static Path path_commons_hadoop_linux(boolean force) throws Exception {
Path path = Paths.get(base(), DIR_COMMONS_HADOOP_LINUX);
if ((!Files.exists(path)) && force) {
Files.createDirectories(path);
}
return path;
}
public static Path path_commons_hadoop_aix(boolean force) throws Exception {
Path path = Paths.get(base(), DIR_COMMONS_HADOOP_AIX);
if ((!Files.exists(path)) && force) {
Files.createDirectories(path);
}
return path;
}
public static Path path_commons_hadoop_macos(boolean force) throws Exception {
Path path = Paths.get(base(), DIR_COMMONS_HADOOP_MACOS);
if ((!Files.exists(path)) && force) {
Files.createDirectories(path);
}
return path;
}
public static Path path_commons_hadoop_raspi(boolean force) throws Exception {
Path path = Paths.get(base(), DIR_COMMONS_HADOOP_RASPI);
if ((!Files.exists(path)) && force) {
Files.createDirectories(path);
}
return path;
}
public static Path path_commons_hadoop_arm(boolean force) throws Exception {
Path path = Paths.get(base(), DIR_COMMONS_HADOOP_ARM);
if ((!Files.exists(path)) && force) {
Files.createDirectories(path);
}
return path;
}
public static Path path_commons_hadoop_mips(boolean force) throws Exception {
Path path = Paths.get(base(), DIR_COMMONS_HADOOP_MIPS);
if ((!Files.exists(path)) && force) {
Files.createDirectories(path);
}
return path;
}
} }
...@@ -9,20 +9,22 @@ import com.x.base.core.project.tools.Crypto; ...@@ -9,20 +9,22 @@ import com.x.base.core.project.tools.Crypto;
public class ExternalStorageSource extends ConfigObject { public class ExternalStorageSource extends ConfigObject {
private static final long serialVersionUID = 5926439816241094368L;
// 无需保存 // 无需保存
private transient String _password; private transient String _password;
public ExternalStorageSource() { public ExternalStorageSource() {
this.protocol = default_protocol; this.protocol = DEFAULT_PROTOCOL;
this.username = default_username; this.username = DEFAULT_USERNAME;
this.password = default_password; this.password = DEFAULT_PASSWORD;
this.host = default_host; this.host = DEFAULT_HOST;
this.port = default_port; this.port = DEFAULT_PORT;
this.prefix = default_prefix; this.prefix = DEFAULT_PREFIX;
this.enable = default_enable; this.enable = DEFAULT_ENABLE;
this.weight = default_weight; this.weight = DEFAULT_WEIGHT;
this.name = default_name; this.name = DEFAULT_NAME;
this.deepPath = default_deepPath; this.deepPath = DEFAULT_DEEPPATH;
} }
public static ExternalStorageSource defaultInstance() { public static ExternalStorageSource defaultInstance() {
...@@ -30,18 +32,18 @@ public class ExternalStorageSource extends ConfigObject { ...@@ -30,18 +32,18 @@ public class ExternalStorageSource extends ConfigObject {
} }
public static final StorageProtocol default_protocol = StorageProtocol.webdav; public static final StorageProtocol DEFAULT_PROTOCOL = StorageProtocol.webdav;
public static final String default_username = "admin"; public static final String DEFAULT_USERNAME = "admin";
public static final String default_password = "admin"; public static final String DEFAULT_PASSWORD = "admin";
public static final String default_host = "127.0.0.1"; public static final String DEFAULT_HOST = "127.0.0.1";
public static final Integer default_port = 8080; public static final Integer DEFAULT_PORT = 8080;
public static final String default_prefix = ""; public static final String DEFAULT_PREFIX = "";
public static final Integer default_weight = 100; public static final Integer DEFAULT_WEIGHT = 100;
public static final Boolean default_enable = true; public static final Boolean DEFAULT_ENABLE = true;
public static final String default_name = "251"; public static final String DEFAULT_NAME = "251";
public static final Boolean default_deepPath = false; public static final Boolean DEFAULT_DEEPPATH = false;
@FieldDescribe("协议,可选值ftp,webdav") @FieldDescribe("协议,可选值ftp,webdav...")
private StorageProtocol protocol; private StorageProtocol protocol;
@FieldDescribe("登录用户名.") @FieldDescribe("登录用户名.")
private String username; private String username;
......
package com.x.server.console;
import java.util.Iterator;
import java.util.Map.Entry;
import java.util.Properties;
import com.x.base.core.project.config.Config;
public class Hadoop {
private Hadoop() {
// nothing
}
public static void init() throws Exception {
setHomeDir();
}
private static void setHomeDir() throws Exception {
if (Config.isWindowsJava11()) {
System.setProperty("hadoop.home.dir", Config.path_commons_hadoop_windows(false).toString());
// [qtp680227777-226] WARN org.apache.hadoop.util.NativeCodeLoader - Unable to
// load native-hadoop library for your platform... using builtin-java classes
// where applicable
// System.setProperty("log4j.logger.org.apache.hadoop.util.NativeCodeLoader",
// "ERROR");
System.setProperty("java.library.path", Config.path_commons_hadoop_windows(false).resolve("bin").toString()
+ System.getProperty("path.separator") + System.getProperty("java.library.path"));
}
}
}
...@@ -109,6 +109,8 @@ public class Main { ...@@ -109,6 +109,8 @@ public class Main {
LogTools.setSlf4jSimple(); LogTools.setSlf4jSimple();
ResourceFactory.bind(); ResourceFactory.bind();
CommandFactory.printStartHelp(); CommandFactory.printStartHelp();
// 初始化hadoop环境
Hadoop.init();
} }
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册