提交 6c009d86 编写于 作者: Z Zesong Sun 提交者: Jiang Tian

[IOTDB-189] Support compatibility of HDFS File and Java File (#365)

* [IOTDB-189] Support compatibility of HDFS file and java file

* Fix "input line is too long" in windows

* Use IoTDBFileFactory to create File and HdfsFile
上级 f0077d9f
...@@ -37,16 +37,9 @@ set JAVA_OPTS=-ea^ ...@@ -37,16 +37,9 @@ set JAVA_OPTS=-ea^
-DIOTDB_CLI_HOME=%IOTDB_CLI_HOME% -DIOTDB_CLI_HOME=%IOTDB_CLI_HOME%
REM For each jar in the IOTDB_CLI_HOME lib directory call append to build the CLASSPATH variable. REM For each jar in the IOTDB_CLI_HOME lib directory call append to build the CLASSPATH variable.
for %%i in ("%IOTDB_CLI_HOME%\lib\*.jar") do call :append "%%i" set CLASSPATH=%IOTDB_CLI_HOME%\lib\*
goto okClasspath
:append
set CLASSPATH=%CLASSPATH%;%1
goto :eof
REM ----------------------------------------------------------------------------- REM -----------------------------------------------------------------------------
:okClasspath
set PARAMETERS=%* set PARAMETERS=%*
if "%PARAMETERS%" == "" set PARAMETERS=-h 127.0.0.1 -p 6667 -u root -pw root if "%PARAMETERS%" == "" set PARAMETERS=-h 127.0.0.1 -p 6667 -u root -pw root
......
...@@ -37,19 +37,9 @@ set JAVA_OPTS=-ea^ ...@@ -37,19 +37,9 @@ set JAVA_OPTS=-ea^
-DIOTDB_CLI_HOME=%IOTDB_CLI_HOME% -DIOTDB_CLI_HOME=%IOTDB_CLI_HOME%
@REM ***** CLASSPATH library setting ***** @REM ***** CLASSPATH library setting *****
@REM Ensure that any user defined CLASSPATH variables are not used on startup set CLASSPATH=%IOTDB_CLI_HOME%\lib\*
set CLASSPATH=""
REM For each jar in the IOTDB_CLI_HOME lib directory call append to build the CLASSPATH variable.
for %%i in ("%IOTDB_CLI_HOME%\lib\*.jar") do call :append "%%i"
goto okClasspath
:append
set CLASSPATH=%CLASSPATH%;%1
goto :eof
REM ----------------------------------------------------------------------------- REM -----------------------------------------------------------------------------
:okClasspath
"%JAVA_HOME%\bin\java" -DIOTDB_CLI_HOME=%IOTDB_CLI_HOME% %JAVA_OPTS% -cp %CLASSPATH% %MAIN_CLASS% %* "%JAVA_HOME%\bin\java" -DIOTDB_CLI_HOME=%IOTDB_CLI_HOME% %JAVA_OPTS% -cp %CLASSPATH% %MAIN_CLASS% %*
......
...@@ -37,19 +37,9 @@ set JAVA_OPTS=-ea^ ...@@ -37,19 +37,9 @@ set JAVA_OPTS=-ea^
-DIOTDB_CLI_HOME=%IOTDB_CLI_HOME% -DIOTDB_CLI_HOME=%IOTDB_CLI_HOME%
@REM ***** CLASSPATH library setting ***** @REM ***** CLASSPATH library setting *****
@REM Ensure that any user defined CLASSPATH variables are not used on startup set CLASSPATH=%IOTDB_CLI_HOME%\lib\*
set CLASSPATH=""
REM For each jar in the IOTDB_CLI_HOME lib directory call append to build the CLASSPATH variable.
for %%i in ("%IOTDB_CLI_HOME%\lib\*.jar") do call :append "%%i"
goto okClasspath
:append
set CLASSPATH=%CLASSPATH%;%1
goto :eof
REM ----------------------------------------------------------------------------- REM -----------------------------------------------------------------------------
:okClasspath
"%JAVA_HOME%\bin\java" -DIOTDB_CLI_HOME=%IOTDB_CLI_HOME% %JAVA_OPTS% -cp %CLASSPATH% %MAIN_CLASS% %* "%JAVA_HOME%\bin\java" -DIOTDB_CLI_HOME=%IOTDB_CLI_HOME% %JAVA_OPTS% -cp %CLASSPATH% %MAIN_CLASS% %*
......
...@@ -55,12 +55,7 @@ public abstract class AbstractScript { ...@@ -55,12 +55,7 @@ public abstract class AbstractScript {
throw new RuntimeException("user.dir " + userDir.getAbsolutePath() + " doesn't exist."); throw new RuntimeException("user.dir " + userDir.getAbsolutePath() + " doesn't exist.");
} }
File targetDir = new File(userDir, "target"); File targetDir = new File(userDir, "target");
File[] files = targetDir.listFiles(new FileFilter() { File[] files = targetDir.listFiles(pathname -> pathname.isDirectory() && pathname.getName().startsWith("iotdb-client-"));
@Override
public boolean accept(File pathname) {
return pathname.isDirectory() && pathname.getName().startsWith("iotdb-client-");
}
});
if(files.length != 1) { if(files.length != 1) {
throw new RuntimeException( throw new RuntimeException(
"Exactly one directory starting with 'iotdb-client-' should have been found, but was " + files.length); "Exactly one directory starting with 'iotdb-client-' should have been found, but was " + files.length);
......
...@@ -19,9 +19,10 @@ ...@@ -19,9 +19,10 @@
package org.apache.iotdb.tsfile; package org.apache.iotdb.tsfile;
import java.io.File;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import java.io.File;
import org.apache.iotdb.tsfile.write.TsFileWriter; import org.apache.iotdb.tsfile.write.TsFileWriter;
import org.apache.iotdb.tsfile.write.schema.Schema; import org.apache.iotdb.tsfile.write.schema.Schema;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema; import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
......
...@@ -85,6 +85,25 @@ ...@@ -85,6 +85,25 @@
<artifactId>powermock-api-mockito2</artifactId> <artifactId>powermock-api-mockito2</artifactId>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies> </dependencies>
<build> <build>
<plugins> <plugins>
......
...@@ -121,6 +121,10 @@ timestamp_precision=ms ...@@ -121,6 +121,10 @@ timestamp_precision=ms
# If its prefix is "/", then the path is absolute. Otherwise, it is relative. # If its prefix is "/", then the path is absolute. Otherwise, it is relative.
# wal_dir=data/wal # wal_dir=data/wal
# Storage file system. Currently, Tsfile supports to be stored in LOCAL file system or HDFS.
storage_fs=LOCAL
#################### ####################
### Memory Control Configuration ### Memory Control Configuration
#################### ####################
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
*/ */
package org.apache.iotdb.db.conf; package org.apache.iotdb.db.conf;
import org.apache.iotdb.db.engine.fileSystem.FSType;
import java.io.File; import java.io.File;
import java.time.ZoneId; import java.time.ZoneId;
import java.util.ArrayList; import java.util.ArrayList;
...@@ -272,7 +273,7 @@ public class IoTDBConfig { ...@@ -272,7 +273,7 @@ public class IoTDBConfig {
* Watermark method and parameters * Watermark method and parameters
*/ */
private String watermarkMethod = "GroupBasedLSBMethod(embed_row_cycle=5,embed_lsb_num=5)"; private String watermarkMethod = "GroupBasedLSBMethod(embed_row_cycle=5,embed_lsb_num=5)";
/** /**
* How much memory (in byte) can be used by a single merge task. * How much memory (in byte) can be used by a single merge task.
*/ */
...@@ -324,6 +325,11 @@ public class IoTDBConfig { ...@@ -324,6 +325,11 @@ public class IoTDBConfig {
private MergeFileStrategy mergeFileStrategy = MergeFileStrategy.MAX_SERIES_NUM; private MergeFileStrategy mergeFileStrategy = MergeFileStrategy.MAX_SERIES_NUM;
/**
* Default storage is in local file system
*/
private FSType storageFs = FSType.LOCAL;
public IoTDBConfig() { public IoTDBConfig() {
// empty constructor // empty constructor
} }
...@@ -880,4 +886,12 @@ public class IoTDBConfig { ...@@ -880,4 +886,12 @@ public class IoTDBConfig {
} }
return null; return null;
} }
public FSType getStorageFs() {
return storageFs;
}
public void setStorageFs(String storageFs) {
this.storageFs = FSType.valueOf(storageFs);
}
} }
...@@ -18,17 +18,14 @@ ...@@ -18,17 +18,14 @@
*/ */
package org.apache.iotdb.db.conf; package org.apache.iotdb.db.conf;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.time.ZoneId;
import java.util.Properties;
import org.apache.iotdb.db.utils.FilePathUtils; import org.apache.iotdb.db.utils.FilePathUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.io.*;
import java.time.ZoneId;
import java.util.Properties;
public class IoTDBDescriptor { public class IoTDBDescriptor {
private static final Logger logger = LoggerFactory.getLogger(IoTDBDescriptor.class); private static final Logger logger = LoggerFactory.getLogger(IoTDBDescriptor.class);
...@@ -252,14 +249,14 @@ public class IoTDBDescriptor { ...@@ -252,14 +249,14 @@ public class IoTDBDescriptor {
conf.setPerformanceStatMemoryInKB(Integer conf.setPerformanceStatMemoryInKB(Integer
.parseInt(properties.getProperty("performance_stat_memory_in_kb", .parseInt(properties.getProperty("performance_stat_memory_in_kb",
Integer.toString(conf.getPerformanceStatMemoryInKB())).trim())); Integer.toString(conf.getPerformanceStatMemoryInKB())).trim()));
int maxConcurrentClientNum = Integer.parseInt(properties. int maxConcurrentClientNum = Integer.parseInt(properties.
getProperty("rpc_max_concurrent_client_num", getProperty("rpc_max_concurrent_client_num",
Integer.toString(conf.getRpcMaxConcurrentClientNum()).trim())); Integer.toString(conf.getRpcMaxConcurrentClientNum()).trim()));
if (maxConcurrentClientNum <= 0) { if (maxConcurrentClientNum <= 0) {
maxConcurrentClientNum = 65535; maxConcurrentClientNum = 65535;
} }
conf.setEnableWatermark(Boolean.parseBoolean(properties.getProperty("watermark_module_opened", conf.setEnableWatermark(Boolean.parseBoolean(properties.getProperty("watermark_module_opened",
Boolean.toString(conf.isEnableWatermark()).trim()))); Boolean.toString(conf.isEnableWatermark()).trim())));
conf.setWatermarkSecretKey( conf.setWatermarkSecretKey(
...@@ -270,7 +267,8 @@ public class IoTDBDescriptor { ...@@ -270,7 +267,8 @@ public class IoTDBDescriptor {
properties.getProperty("watermark_method", conf.getWatermarkMethod())); properties.getProperty("watermark_method", conf.getWatermarkMethod()));
conf.setRpcMaxConcurrentClientNum(maxConcurrentClientNum); conf.setRpcMaxConcurrentClientNum(maxConcurrentClientNum);
conf.setStorageFs(properties.getProperty("storage_fs"));
} catch (IOException e) { } catch (IOException e) {
logger.warn("Cannot load config file because, use default configuration", e); logger.warn("Cannot load config file because, use default configuration", e);
} catch (Exception e) { } catch (Exception e) {
......
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.engine.fileSystem;
public enum FSType {
LOCAL, HDFS
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.engine.fileSystem;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import java.io.File;
import java.net.URI;
public enum FileFactory {
INSTANCE;
private static FSType FSType = IoTDBDescriptor.getInstance().getConfig().getStorageFs();
public File getFile(String pathname) {
if (FSType.equals(FSType.HDFS)) {
return new HdfsFile(pathname);
} else {
return new File(pathname);
}
}
public File getFile(String parent, String child) {
if (FSType.equals(FSType.HDFS)) {
return new HdfsFile(parent, child);
} else {
return new File(parent, child);
}
}
public File getFile(File parent, String child) {
if (FSType.equals(FSType.HDFS)) {
return new HdfsFile(parent, child);
} else {
return new File(parent, child);
}
}
public File getFile(URI uri) {
if (FSType.equals(FSType.HDFS)) {
return new HdfsFile(uri);
} else {
return new File(uri);
}
}
}
\ No newline at end of file
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.engine.fileSystem;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.iotdb.tsfile.write.TsFileWriter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileFilter;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
public class HdfsFile extends File {
private Path hdfsPath;
private static final Logger logger = LoggerFactory.getLogger(TsFileWriter.class);
public HdfsFile(String pathname) {
super(pathname);
hdfsPath = new Path(pathname);
}
public HdfsFile(String parent, String child) {
super(parent, child);
}
public HdfsFile(File parent, String child) {
super(parent, child);
}
public HdfsFile(URI uri) {
super(uri);
}
@Override
public String getAbsolutePath() {
return hdfsPath.toUri().toString();
}
@Override
public long length() {
try {
FileSystem fs = hdfsPath.getFileSystem(new Configuration());
return fs.getFileStatus(hdfsPath).getLen();
} catch (IOException e) {
logger.error("Fail to get length of the file. ", e);
return 0;
}
}
@Override
public boolean exists() {
try {
FileSystem fs = hdfsPath.getFileSystem(new Configuration());
return fs.exists(hdfsPath);
} catch (IOException e) {
logger.error("Fail to check whether the file or directory exists. ", e);
return false;
}
}
@Override
public File[] listFiles() {
ArrayList<HdfsFile> files = new ArrayList<>();
try {
FileSystem fs = hdfsPath.getFileSystem(new Configuration());
RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(hdfsPath, true);
while (iterator.hasNext()) {
LocatedFileStatus fileStatus = iterator.next();
Path fullPath = fileStatus.getPath();
files.add(new HdfsFile(fullPath.toUri()));
}
return files.toArray(new HdfsFile[files.size()]);
} catch (IOException e) {
logger.error("Fail to list files. ", e);
return null;
}
}
@Override
public File[] listFiles(FileFilter filter) {
ArrayList<HdfsFile> files = new ArrayList<>();
try {
PathFilter pathFilter = new GlobFilter(filter.toString()); // TODO
FileSystem fs = hdfsPath.getFileSystem(new Configuration());
RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(hdfsPath, true);
while (iterator.hasNext()) {
LocatedFileStatus fileStatus = iterator.next();
Path fullPath = fileStatus.getPath();
if (pathFilter.accept(fullPath)) {
files.add(new HdfsFile(fullPath.toUri()));
}
}
return files.toArray(new HdfsFile[files.size()]);
} catch (IOException e) {
logger.error("Fail to list files. ", e);
return null;
}
}
@Override
public File getParentFile() {
return new HdfsFile(hdfsPath.getParent().toUri());
}
@Override
public boolean createNewFile() throws IOException {
FileSystem fs = hdfsPath.getFileSystem(new Configuration());
return fs.createNewFile(hdfsPath);
}
@Override
public boolean delete() {
try {
FileSystem fs = hdfsPath.getFileSystem(new Configuration());
return fs.delete(hdfsPath, true);
} catch (IOException e) {
logger.error("Fail to delete file. ", e);
return false;
}
}
@Override
public boolean mkdirs() {
try {
FileSystem fs = hdfsPath.getFileSystem(new Configuration());
return fs.mkdirs(hdfsPath);
} catch (IOException e) {
logger.error("Fail to create directory. ", e);
return false;
}
}
}
...@@ -19,19 +19,6 @@ ...@@ -19,19 +19,6 @@
package org.apache.iotdb.db.engine.merge.manage; package org.apache.iotdb.db.engine.merge.manage;
import static org.apache.iotdb.db.engine.merge.task.MergeTask.MERGE_SUFFIX;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import org.apache.iotdb.db.engine.modification.Modification; import org.apache.iotdb.db.engine.modification.Modification;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource; import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.db.query.reader.IPointReader; import org.apache.iotdb.db.query.reader.IPointReader;
...@@ -40,6 +27,7 @@ import org.apache.iotdb.db.utils.MergeUtils; ...@@ -40,6 +27,7 @@ import org.apache.iotdb.db.utils.MergeUtils;
import org.apache.iotdb.tsfile.common.conf.TSFileConfig; import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData; import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.db.engine.fileSystem.FileFactory;
import org.apache.iotdb.tsfile.read.TsFileSequenceReader; import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
import org.apache.iotdb.tsfile.read.common.Chunk; import org.apache.iotdb.tsfile.read.common.Chunk;
import org.apache.iotdb.tsfile.read.common.Path; import org.apache.iotdb.tsfile.read.common.Path;
...@@ -51,6 +39,14 @@ import org.apache.iotdb.tsfile.write.writer.RestorableTsFileIOWriter; ...@@ -51,6 +39,14 @@ import org.apache.iotdb.tsfile.write.writer.RestorableTsFileIOWriter;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import static org.apache.iotdb.db.engine.merge.task.MergeTask.MERGE_SUFFIX;
/** /**
* MergeResource manages files and caches of readers, writers, MeasurementSchemas and * MergeResource manages files and caches of readers, writers, MeasurementSchemas and
* modifications to avoid unnecessary object creations and file openings. * modifications to avoid unnecessary object creations and file openings.
...@@ -102,7 +98,8 @@ public class MergeResource { ...@@ -102,7 +98,8 @@ public class MergeResource {
public RestorableTsFileIOWriter getMergeFileWriter(TsFileResource resource) throws IOException { public RestorableTsFileIOWriter getMergeFileWriter(TsFileResource resource) throws IOException {
RestorableTsFileIOWriter writer = fileWriterCache.get(resource); RestorableTsFileIOWriter writer = fileWriterCache.get(resource);
if (writer == null) { if (writer == null) {
writer = new RestorableTsFileIOWriter(new File(resource.getFile().getPath() + MERGE_SUFFIX)); writer = new RestorableTsFileIOWriter(FileFactory.INSTANCE
.getFile(resource.getFile().getPath() + MERGE_SUFFIX));
fileWriterCache.put(resource, writer); fileWriterCache.put(resource, writer);
} }
return writer; return writer;
......
...@@ -74,6 +74,7 @@ import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData; ...@@ -74,6 +74,7 @@ import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType; import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.db.engine.fileSystem.FileFactory;
import org.apache.iotdb.tsfile.read.common.Path; import org.apache.iotdb.tsfile.read.common.Path;
import org.apache.iotdb.tsfile.utils.Pair; import org.apache.iotdb.tsfile.utils.Pair;
import org.apache.iotdb.tsfile.write.schema.Schema; import org.apache.iotdb.tsfile.write.schema.Schema;
...@@ -239,7 +240,7 @@ public class StorageGroupProcessor { ...@@ -239,7 +240,7 @@ public class StorageGroupProcessor {
private List<TsFileResource> getAllFiles(List<String> folders) throws IOException { private List<TsFileResource> getAllFiles(List<String> folders) throws IOException {
List<File> tsFiles = new ArrayList<>(); List<File> tsFiles = new ArrayList<>();
for (String baseDir : folders) { for (String baseDir : folders) {
File fileFolder = new File(baseDir, storageGroupName); File fileFolder = FileFactory.INSTANCE.getFile(baseDir, storageGroupName);
if (!fileFolder.exists()) { if (!fileFolder.exists()) {
continue; continue;
} }
...@@ -494,11 +495,11 @@ public class StorageGroupProcessor { ...@@ -494,11 +495,11 @@ public class StorageGroupProcessor {
+ TSFILE_SUFFIX; + TSFILE_SUFFIX;
if (sequence) { if (sequence) {
return new TsFileProcessor(storageGroupName, new File(filePath), return new TsFileProcessor(storageGroupName, FileFactory.INSTANCE.getFile(filePath),
schema, versionController, this::closeUnsealedTsFileProcessor, schema, versionController, this::closeUnsealedTsFileProcessor,
this::updateLatestFlushTimeCallback, sequence); this::updateLatestFlushTimeCallback, sequence);
} else { } else {
return new TsFileProcessor(storageGroupName, new File(filePath), return new TsFileProcessor(storageGroupName, FileFactory.INSTANCE.getFile(filePath),
schema, versionController, this::closeUnsealedTsFileProcessor, schema, versionController, this::closeUnsealedTsFileProcessor,
() -> true, sequence); () -> true, sequence);
} }
......
...@@ -18,14 +18,7 @@ ...@@ -18,14 +18,7 @@
*/ */
package org.apache.iotdb.db.engine.storagegroup; package org.apache.iotdb.db.engine.storagegroup;
import java.io.BufferedInputStream; import java.io.*;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
...@@ -37,6 +30,7 @@ import org.apache.commons.io.FileUtils; ...@@ -37,6 +30,7 @@ import org.apache.commons.io.FileUtils;
import org.apache.iotdb.db.engine.modification.ModificationFile; import org.apache.iotdb.db.engine.modification.ModificationFile;
import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk; import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData; import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
import org.apache.iotdb.db.engine.fileSystem.FileFactory;
import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
public class TsFileResource { public class TsFileResource {
...@@ -124,8 +118,8 @@ public class TsFileResource { ...@@ -124,8 +118,8 @@ public class TsFileResource {
ReadWriteIOUtils.write(entry.getValue(), outputStream); ReadWriteIOUtils.write(entry.getValue(), outputStream);
} }
} }
File src = new File(file + RESOURCE_SUFFIX + TEMP_SUFFIX); File src = FileFactory.INSTANCE.getFile(file + RESOURCE_SUFFIX + TEMP_SUFFIX);
File dest = new File(file + RESOURCE_SUFFIX); File dest = FileFactory.INSTANCE.getFile(file + RESOURCE_SUFFIX);
dest.delete(); dest.delete();
FileUtils.moveFile(src, dest); FileUtils.moveFile(src, dest);
} }
...@@ -167,7 +161,7 @@ public class TsFileResource { ...@@ -167,7 +161,7 @@ public class TsFileResource {
} }
public boolean fileExists() { public boolean fileExists() {
return new File(file+RESOURCE_SUFFIX).exists(); return FileFactory.INSTANCE.getFile(file + RESOURCE_SUFFIX).exists();
} }
public void forceUpdateEndTime(String device, long time) { public void forceUpdateEndTime(String device, long time) {
...@@ -242,8 +236,8 @@ public class TsFileResource { ...@@ -242,8 +236,8 @@ public class TsFileResource {
public void remove() { public void remove() {
file.delete(); file.delete();
new File(file.getPath() + RESOURCE_SUFFIX).delete(); FileFactory.INSTANCE.getFile(file.getPath() + RESOURCE_SUFFIX).delete();
new File(file.getPath() + ModificationFile.FILE_SUFFIX).delete(); FileFactory.INSTANCE.getFile(file.getPath() + ModificationFile.FILE_SUFFIX).delete();
} }
@Override @Override
......
...@@ -18,12 +18,7 @@ ...@@ -18,12 +18,7 @@
*/ */
package org.apache.iotdb.db.sync.receiver; package org.apache.iotdb.db.sync.receiver;
import java.io.BufferedReader; import java.io.*;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.math.BigInteger; import java.math.BigInteger;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.nio.channels.FileChannel; import java.nio.channels.FileChannel;
...@@ -66,6 +61,7 @@ import org.apache.iotdb.tsfile.file.metadata.TsDeviceMetadataIndex; ...@@ -66,6 +61,7 @@ import org.apache.iotdb.tsfile.file.metadata.TsDeviceMetadataIndex;
import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType; import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.db.engine.fileSystem.FileFactory;
import org.apache.iotdb.tsfile.read.ReadOnlyTsFile; import org.apache.iotdb.tsfile.read.ReadOnlyTsFile;
import org.apache.iotdb.tsfile.read.TsFileSequenceReader; import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
import org.apache.iotdb.tsfile.read.common.Field; import org.apache.iotdb.tsfile.read.common.Field;
...@@ -149,7 +145,7 @@ public class SyncServiceImpl implements SyncService.Iface { ...@@ -149,7 +145,7 @@ public class SyncServiceImpl implements SyncService.Iface {
fileNodeStartTime.set(new HashMap<>()); fileNodeStartTime.set(new HashMap<>());
fileNodeEndTime.set(new HashMap<>()); fileNodeEndTime.set(new HashMap<>());
try { try {
FileUtils.deleteDirectory(new File(syncDataPath)); FileUtils.deleteDirectory(FileFactory.INSTANCE.getFile(syncDataPath));
} catch (IOException e) { } catch (IOException e) {
logger.error("cannot delete directory {} ", syncFolderPath); logger.error("cannot delete directory {} ", syncFolderPath);
return false; return false;
...@@ -157,7 +153,7 @@ public class SyncServiceImpl implements SyncService.Iface { ...@@ -157,7 +153,7 @@ public class SyncServiceImpl implements SyncService.Iface {
for (String bufferWritePath : bufferWritePaths) { for (String bufferWritePath : bufferWritePaths) {
bufferWritePath = FilePathUtils.regularizePath(bufferWritePath); bufferWritePath = FilePathUtils.regularizePath(bufferWritePath);
String backupPath = bufferWritePath + SYNC_SERVER + File.separator; String backupPath = bufferWritePath + SYNC_SERVER + File.separator;
File backupDirectory = new File(backupPath, this.uuid.get()); File backupDirectory = FileFactory.INSTANCE.getFile(backupPath, this.uuid.get());
if (backupDirectory.exists() && backupDirectory.list().length != 0) { if (backupDirectory.exists() && backupDirectory.list().length != 0) {
try { try {
FileUtils.deleteDirectory(backupDirectory); FileUtils.deleteDirectory(backupDirectory);
...@@ -206,7 +202,7 @@ public class SyncServiceImpl implements SyncService.Iface { ...@@ -206,7 +202,7 @@ public class SyncServiceImpl implements SyncService.Iface {
/** sync metadata, include storage group and timeseries **/ /** sync metadata, include storage group and timeseries **/
return Boolean.toString(loadMetadata()); return Boolean.toString(loadMetadata());
} else if (status == SyncDataStatus.PROCESSING_STATUS) { } else if (status == SyncDataStatus.PROCESSING_STATUS) {
File file = new File(schemaFromSenderPath.get()); File file = FileFactory.INSTANCE.getFile(schemaFromSenderPath.get());
if (!file.getParentFile().exists()) { if (!file.getParentFile().exists()) {
try { try {
file.getParentFile().mkdirs(); file.getParentFile().mkdirs();
...@@ -233,7 +229,7 @@ public class SyncServiceImpl implements SyncService.Iface { ...@@ -233,7 +229,7 @@ public class SyncServiceImpl implements SyncService.Iface {
} }
md5OfReceiver = (new BigInteger(1, md.digest())).toString(16); md5OfReceiver = (new BigInteger(1, md.digest())).toString(16);
if (!md5.equals(md5OfReceiver)) { if (!md5.equals(md5OfReceiver)) {
FileUtils.forceDelete(new File(schemaFromSenderPath.get())); FileUtils.forceDelete(FileFactory.INSTANCE.getFile(schemaFromSenderPath.get()));
} }
} catch (Exception e) { } catch (Exception e) {
logger.error("Receiver cannot generate md5 {}", schemaFromSenderPath.get(), e); logger.error("Receiver cannot generate md5 {}", schemaFromSenderPath.get(), e);
...@@ -246,7 +242,7 @@ public class SyncServiceImpl implements SyncService.Iface { ...@@ -246,7 +242,7 @@ public class SyncServiceImpl implements SyncService.Iface {
* Load metadata from sender * Load metadata from sender
*/ */
private boolean loadMetadata() { private boolean loadMetadata() {
if (new File(schemaFromSenderPath.get()).exists()) { if (FileFactory.INSTANCE.getFile(schemaFromSenderPath.get()).exists()) {
try (BufferedReader br = new BufferedReader( try (BufferedReader br = new BufferedReader(
new java.io.FileReader(schemaFromSenderPath.get()))) { new java.io.FileReader(schemaFromSenderPath.get()))) {
String metadataOperation; String metadataOperation;
...@@ -331,7 +327,7 @@ public class SyncServiceImpl implements SyncService.Iface { ...@@ -331,7 +327,7 @@ public class SyncServiceImpl implements SyncService.Iface {
syncDataPath = FilePathUtils.regularizePath(syncDataPath); syncDataPath = FilePathUtils.regularizePath(syncDataPath);
filePath = syncDataPath + filePath; filePath = syncDataPath + filePath;
if (status == SyncDataStatus.PROCESSING_STATUS) { // there are still data stream to add if (status == SyncDataStatus.PROCESSING_STATUS) { // there are still data stream to add
File file = new File(filePath); File file = FileFactory.INSTANCE.getFile(filePath);
if (!file.getParentFile().exists()) { if (!file.getParentFile().exists()) {
try { try {
file.getParentFile().mkdirs(); file.getParentFile().mkdirs();
...@@ -363,7 +359,7 @@ public class SyncServiceImpl implements SyncService.Iface { ...@@ -363,7 +359,7 @@ public class SyncServiceImpl implements SyncService.Iface {
logger.info(String.format("Receiver has received %d files from sender", fileNum.get())); logger.info(String.format("Receiver has received %d files from sender", fileNum.get()));
} else { } else {
FileUtils.forceDelete(new File(filePath)); FileUtils.forceDelete(FileFactory.INSTANCE.getFile(filePath));
} }
} catch (Exception e) { } catch (Exception e) {
logger.error("Receiver cannot generate md5 {}", filePath, e); logger.error("Receiver cannot generate md5 {}", filePath, e);
...@@ -389,7 +385,7 @@ public class SyncServiceImpl implements SyncService.Iface { ...@@ -389,7 +385,7 @@ public class SyncServiceImpl implements SyncService.Iface {
* Get all tsfiles' info which are sent from sender, it is preparing for merging these data * Get all tsfiles' info which are sent from sender, it is preparing for merging these data
*/ */
public void getFileNodeInfo() throws IOException { public void getFileNodeInfo() throws IOException {
File dataFileRoot = new File(syncDataPath); File dataFileRoot = FileFactory.INSTANCE.getFile(syncDataPath);
File[] files = dataFileRoot.listFiles(); File[] files = dataFileRoot.listFiles();
int processedNum = 0; int processedNum = 0;
for (File storageGroupPB : files) { for (File storageGroupPB : files) {
...@@ -469,7 +465,7 @@ public class SyncServiceImpl implements SyncService.Iface { ...@@ -469,7 +465,7 @@ public class SyncServiceImpl implements SyncService.Iface {
String header = syncDataPath; String header = syncDataPath;
String relativePath = path.substring(header.length()); String relativePath = path.substring(header.length());
TsFileResource fileNode = new TsFileResource( TsFileResource fileNode = new TsFileResource(
new File(DirectoryManager.getInstance().getNextFolderIndexForSequenceFile() + FileFactory.INSTANCE.getFile(DirectoryManager.getInstance().getNextFolderIndexForSequenceFile() +
File.separator + relativePath), startTimeMap, endTimeMap File.separator + relativePath), startTimeMap, endTimeMap
); );
// call interface of load external file // call interface of load external file
...@@ -718,7 +714,7 @@ public class SyncServiceImpl implements SyncService.Iface { ...@@ -718,7 +714,7 @@ public class SyncServiceImpl implements SyncService.Iface {
fileNodeEndTime.remove(); fileNodeEndTime.remove();
schemaFromSenderPath.remove(); schemaFromSenderPath.remove();
try { try {
FileUtils.deleteDirectory(new File(syncFolderPath)); FileUtils.deleteDirectory(FileFactory.INSTANCE.getFile(syncFolderPath));
} catch (IOException e) { } catch (IOException e) {
logger.error("can not delete directory {}", syncFolderPath, e); logger.error("can not delete directory {}", syncFolderPath, e);
} }
......
...@@ -22,8 +22,11 @@ package org.apache.iotdb.db.tools; ...@@ -22,8 +22,11 @@ package org.apache.iotdb.db.tools;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Comparator;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource; import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.db.qp.constant.DatetimeUtils; import org.apache.iotdb.db.qp.constant.DatetimeUtils;
import org.apache.iotdb.db.engine.fileSystem.FileFactory;
/** /**
* this tool can analyze the tsfile.resource files from a folder. * this tool can analyze the tsfile.resource files from a folder.
...@@ -36,10 +39,9 @@ public class TsFileResourcePrinter { ...@@ -36,10 +39,9 @@ public class TsFileResourcePrinter {
if (args.length >= 1) { if (args.length >= 1) {
folder = args[0]; folder = args[0];
} }
File folderFile = new File(folder); File folderFile = FileFactory.INSTANCE.getFile(folder);
File[] files = folderFile.listFiles((dir, name) -> name.endsWith(".tsfile.resource")); File[] files = folderFile.listFiles(file -> file.getName().endsWith(".tsfile.resource"));
Arrays.sort(files, (x, y) -> Long Arrays.sort(files, Comparator.comparingLong(x -> Long.valueOf(x.getName().split("-")[0])));
.compare(Long.valueOf(x.getName().split("-")[0]), Long.valueOf(y.getName().split("-")[0])));
for (File file : files) { for (File file : files) {
printResource(file.getAbsolutePath()); printResource(file.getAbsolutePath());
...@@ -49,7 +51,7 @@ public class TsFileResourcePrinter { ...@@ -49,7 +51,7 @@ public class TsFileResourcePrinter {
public static void printResource(String filename) throws IOException { public static void printResource(String filename) throws IOException {
filename = filename.substring(0, filename.length() - 9); filename = filename.substring(0, filename.length() - 9);
TsFileResource resource = new TsFileResource(new File(filename)); TsFileResource resource = new TsFileResource(FileFactory.INSTANCE.getFile(filename));
System.err.println(String.format("analyzing %s ...", filename)); System.err.println(String.format("analyzing %s ...", filename));
resource.deSerialize(); resource.deSerialize();
......
...@@ -38,6 +38,7 @@ import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData; ...@@ -38,6 +38,7 @@ import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
import org.apache.iotdb.tsfile.file.metadata.TsDeviceMetadata; import org.apache.iotdb.tsfile.file.metadata.TsDeviceMetadata;
import org.apache.iotdb.tsfile.file.metadata.TsDeviceMetadataIndex; import org.apache.iotdb.tsfile.file.metadata.TsDeviceMetadataIndex;
import org.apache.iotdb.tsfile.file.metadata.TsFileMetaData; import org.apache.iotdb.tsfile.file.metadata.TsFileMetaData;
import org.apache.iotdb.db.engine.fileSystem.FileFactory;
import org.apache.iotdb.tsfile.read.TsFileSequenceReader; import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
import org.apache.iotdb.tsfile.write.schema.Schema; import org.apache.iotdb.tsfile.write.schema.Schema;
import org.apache.iotdb.tsfile.write.writer.RestorableTsFileIOWriter; import org.apache.iotdb.tsfile.write.writer.RestorableTsFileIOWriter;
...@@ -81,7 +82,7 @@ public class TsFileRecoverPerformer { ...@@ -81,7 +82,7 @@ public class TsFileRecoverPerformer {
this.logReplayer = new LogReplayer(logNodePrefix, insertFilePath, tsFileResource.getModFile(), this.logReplayer = new LogReplayer(logNodePrefix, insertFilePath, tsFileResource.getModFile(),
versionController, versionController,
tsFileResource, schema, recoverMemTable, acceptUnseq); tsFileResource, schema, recoverMemTable, acceptUnseq);
File insertFile = new File(insertFilePath); File insertFile = FileFactory.INSTANCE.getFile(insertFilePath);
if (!insertFile.exists()) { if (!insertFile.exists()) {
logger.error("TsFile {} is missing, will skip its recovery.", insertFilePath); logger.error("TsFile {} is missing, will skip its recovery.", insertFilePath);
return; return;
...@@ -141,7 +142,7 @@ public class TsFileRecoverPerformer { ...@@ -141,7 +142,7 @@ public class TsFileRecoverPerformer {
// clean logs // clean logs
try { try {
MultiFileLogNodeManager.getInstance() MultiFileLogNodeManager.getInstance()
.deleteNode(logNodePrefix + new File(insertFilePath).getName()); .deleteNode(logNodePrefix + FileFactory.INSTANCE.getFile(insertFilePath).getName());
} catch (IOException e) { } catch (IOException e) {
throw new ProcessorException(e); throw new ProcessorException(e);
} }
......
...@@ -21,12 +21,12 @@ package org.apache.iotdb.db.engine.memtable; ...@@ -21,12 +21,12 @@ package org.apache.iotdb.db.engine.memtable;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.File;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import org.apache.iotdb.db.engine.MetadataManagerHelper; import org.apache.iotdb.db.engine.MetadataManagerHelper;
import org.apache.iotdb.db.engine.flush.MemTableFlushTask; import org.apache.iotdb.db.engine.flush.MemTableFlushTask;
import org.apache.iotdb.db.utils.EnvironmentUtils; import org.apache.iotdb.db.utils.EnvironmentUtils;
import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData; import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
import org.apache.iotdb.db.engine.fileSystem.FileFactory;
import org.apache.iotdb.tsfile.write.writer.RestorableTsFileIOWriter; import org.apache.iotdb.tsfile.write.writer.RestorableTsFileIOWriter;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
...@@ -45,7 +45,7 @@ public class MemTableFlushTaskTest { ...@@ -45,7 +45,7 @@ public class MemTableFlushTaskTest {
public void setUp() throws Exception { public void setUp() throws Exception {
MetadataManagerHelper.initMetadata(); MetadataManagerHelper.initMetadata();
EnvironmentUtils.envSetUp(); EnvironmentUtils.envSetUp();
writer = new RestorableTsFileIOWriter(new File(filePath)); writer = new RestorableTsFileIOWriter(FileFactory.INSTANCE.getFile(filePath));
memTable = new PrimitiveMemTable(); memTable = new PrimitiveMemTable();
} }
......
...@@ -40,6 +40,7 @@ import org.apache.iotdb.tsfile.exception.write.WriteProcessException; ...@@ -40,6 +40,7 @@ import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType; import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.db.engine.fileSystem.FileFactory;
import org.apache.iotdb.tsfile.write.TsFileWriter; import org.apache.iotdb.tsfile.write.TsFileWriter;
import org.apache.iotdb.tsfile.write.record.TSRecord; import org.apache.iotdb.tsfile.write.record.TSRecord;
import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint; import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
...@@ -114,18 +115,18 @@ abstract class MergeTest { ...@@ -114,18 +115,18 @@ abstract class MergeTest {
private void prepareFiles(int seqFileNum, int unseqFileNum) throws IOException, WriteProcessException { private void prepareFiles(int seqFileNum, int unseqFileNum) throws IOException, WriteProcessException {
for (int i = 0; i < seqFileNum; i++) { for (int i = 0; i < seqFileNum; i++) {
File file = new File(i + "seq.tsfile"); File file = FileFactory.INSTANCE.getFile(i + "seq.tsfile");
TsFileResource tsFileResource = new TsFileResource(file); TsFileResource tsFileResource = new TsFileResource(file);
seqResources.add(tsFileResource); seqResources.add(tsFileResource);
prepareFile(tsFileResource, i * ptNum, ptNum, 0); prepareFile(tsFileResource, i * ptNum, ptNum, 0);
} }
for (int i = 0; i < unseqFileNum; i++) { for (int i = 0; i < unseqFileNum; i++) {
File file = new File(i + "unseq.tsfile"); File file = FileFactory.INSTANCE.getFile(i + "unseq.tsfile");
TsFileResource tsFileResource = new TsFileResource(file); TsFileResource tsFileResource = new TsFileResource(file);
unseqResources.add(tsFileResource); unseqResources.add(tsFileResource);
prepareFile(tsFileResource, i * ptNum, ptNum * (i + 1) / unseqFileNum, 10000); prepareFile(tsFileResource, i * ptNum, ptNum * (i + 1) / unseqFileNum, 10000);
} }
File file = new File(unseqFileNum + "unseq.tsfile"); File file = FileFactory.INSTANCE.getFile(unseqFileNum + "unseq.tsfile");
TsFileResource tsFileResource = new TsFileResource(file); TsFileResource tsFileResource = new TsFileResource(file);
unseqResources.add(tsFileResource); unseqResources.add(tsFileResource);
prepareFile(tsFileResource, 0, ptNum * unseqFileNum, 20000); prepareFile(tsFileResource, 0, ptNum * unseqFileNum, 20000);
......
...@@ -22,7 +22,6 @@ import static junit.framework.TestCase.assertTrue; ...@@ -22,7 +22,6 @@ import static junit.framework.TestCase.assertTrue;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
...@@ -42,6 +41,7 @@ import org.apache.iotdb.tsfile.exception.write.WriteProcessException; ...@@ -42,6 +41,7 @@ import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
import org.apache.iotdb.tsfile.file.metadata.ChunkGroupMetaData; import org.apache.iotdb.tsfile.file.metadata.ChunkGroupMetaData;
import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData; import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.db.engine.fileSystem.FileFactory;
import org.apache.iotdb.tsfile.utils.Pair; import org.apache.iotdb.tsfile.utils.Pair;
import org.apache.iotdb.tsfile.write.record.TSRecord; import org.apache.iotdb.tsfile.write.record.TSRecord;
import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint; import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
...@@ -76,7 +76,7 @@ public class TsFileProcessorTest { ...@@ -76,7 +76,7 @@ public class TsFileProcessorTest {
@Test @Test
public void testWriteAndFlush() public void testWriteAndFlush()
throws WriteProcessException, IOException, TsFileProcessorException { throws WriteProcessException, IOException, TsFileProcessorException {
processor = new TsFileProcessor(storageGroup, new File(filePath), processor = new TsFileProcessor(storageGroup, FileFactory.INSTANCE.getFile(filePath),
SchemaUtils.constructSchema(deviceId), SysTimeVersionController.INSTANCE, x -> { SchemaUtils.constructSchema(deviceId), SysTimeVersionController.INSTANCE, x -> {
}, },
() -> true, true); () -> true, true);
...@@ -123,7 +123,7 @@ public class TsFileProcessorTest { ...@@ -123,7 +123,7 @@ public class TsFileProcessorTest {
@Test @Test
public void testWriteAndRestoreMetadata() public void testWriteAndRestoreMetadata()
throws IOException { throws IOException {
processor = new TsFileProcessor(storageGroup, new File(filePath), processor = new TsFileProcessor(storageGroup, FileFactory.INSTANCE.getFile(filePath),
SchemaUtils.constructSchema(deviceId), SysTimeVersionController.INSTANCE, x -> { SchemaUtils.constructSchema(deviceId), SysTimeVersionController.INSTANCE, x -> {
}, },
() -> true, true); () -> true, true);
...@@ -168,7 +168,7 @@ public class TsFileProcessorTest { ...@@ -168,7 +168,7 @@ public class TsFileProcessorTest {
RestorableTsFileIOWriter tsFileIOWriter = processor.getWriter(); RestorableTsFileIOWriter tsFileIOWriter = processor.getWriter();
List<ChunkGroupMetaData> chunkGroupMetaDataList = tsFileIOWriter.getChunkGroupMetaDatas(); List<ChunkGroupMetaData> chunkGroupMetaDataList = tsFileIOWriter.getChunkGroupMetaDatas();
RestorableTsFileIOWriter restorableTsFileIOWriter = new RestorableTsFileIOWriter( RestorableTsFileIOWriter restorableTsFileIOWriter = new RestorableTsFileIOWriter(
new File(filePath)); FileFactory.INSTANCE.getFile(filePath));
List<ChunkGroupMetaData> restoredChunkGroupMetaDataList = restorableTsFileIOWriter List<ChunkGroupMetaData> restoredChunkGroupMetaDataList = restorableTsFileIOWriter
.getChunkGroupMetaDatas(); .getChunkGroupMetaDatas();
assertEquals(chunkGroupMetaDataList.size(), restoredChunkGroupMetaDataList.size()); assertEquals(chunkGroupMetaDataList.size(), restoredChunkGroupMetaDataList.size());
...@@ -190,7 +190,7 @@ public class TsFileProcessorTest { ...@@ -190,7 +190,7 @@ public class TsFileProcessorTest {
@Test @Test
public void testMultiFlush() public void testMultiFlush()
throws WriteProcessException, IOException, TsFileProcessorException { throws WriteProcessException, IOException, TsFileProcessorException {
processor = new TsFileProcessor(storageGroup, new File(filePath), processor = new TsFileProcessor(storageGroup, FileFactory.INSTANCE.getFile(filePath),
SchemaUtils.constructSchema(deviceId), SysTimeVersionController.INSTANCE, x -> { SchemaUtils.constructSchema(deviceId), SysTimeVersionController.INSTANCE, x -> {
}, },
() -> true, true); () -> true, true);
...@@ -226,7 +226,7 @@ public class TsFileProcessorTest { ...@@ -226,7 +226,7 @@ public class TsFileProcessorTest {
@Test @Test
public void testWriteAndClose() public void testWriteAndClose()
throws WriteProcessException, IOException { throws WriteProcessException, IOException {
processor = new TsFileProcessor(storageGroup, new File(filePath), processor = new TsFileProcessor(storageGroup, FileFactory.INSTANCE.getFile(filePath),
SchemaUtils.constructSchema(deviceId), SysTimeVersionController.INSTANCE, SchemaUtils.constructSchema(deviceId), SysTimeVersionController.INSTANCE,
unsealedTsFileProcessor -> { unsealedTsFileProcessor -> {
TsFileResource resource = unsealedTsFileProcessor.getTsFileResource(); TsFileResource resource = unsealedTsFileProcessor.getTsFileResource();
......
...@@ -22,10 +22,11 @@ import static org.junit.Assert.fail; ...@@ -22,10 +22,11 @@ import static org.junit.Assert.fail;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files;
import org.apache.iotdb.db.conf.IoTDBConfig; import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor; import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.engine.storagegroup.TsFileResource; import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import org.apache.iotdb.db.engine.fileSystem.FileFactory;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
...@@ -60,7 +61,7 @@ public class FileReaderManagerTest { ...@@ -60,7 +61,7 @@ public class FileReaderManagerTest {
TsFileResource[] tsFileResources = new TsFileResource[MAX_FILE_SIZE + 1]; TsFileResource[] tsFileResources = new TsFileResource[MAX_FILE_SIZE + 1];
for (int i = 1; i <= MAX_FILE_SIZE; i++) { for (int i = 1; i <= MAX_FILE_SIZE; i++) {
File file = new File(filePath + i); File file = FileFactory.INSTANCE.getFile(filePath + i);
file.createNewFile(); file.createNewFile();
tsFileResources[i] = new TsFileResource(file); tsFileResources[i] = new TsFileResource(file);
} }
...@@ -115,7 +116,7 @@ public class FileReaderManagerTest { ...@@ -115,7 +116,7 @@ public class FileReaderManagerTest {
t2.join(); t2.join();
for (int i = 1; i <= MAX_FILE_SIZE; i++) { for (int i = 1; i <= MAX_FILE_SIZE; i++) {
TsFileResource tsFile = new TsFileResource(new File(filePath + i)); TsFileResource tsFile = new TsFileResource(FileFactory.INSTANCE.getFile(filePath + i));
Assert.assertTrue(manager.contains(tsFile, false)); Assert.assertTrue(manager.contains(tsFile, false));
} }
...@@ -135,8 +136,8 @@ public class FileReaderManagerTest { ...@@ -135,8 +136,8 @@ public class FileReaderManagerTest {
FileReaderManager.getInstance().closeAndRemoveAllOpenedReaders(); FileReaderManager.getInstance().closeAndRemoveAllOpenedReaders();
for (int i = 1; i < MAX_FILE_SIZE; i++) { for (int i = 1; i < MAX_FILE_SIZE; i++) {
File file = new File(filePath + i); File file = FileFactory.INSTANCE.getFile(filePath + i);
boolean result = Files.deleteIfExists(file.toPath()); boolean result = !file.exists() || file.delete();
if (!result) { if (!result) {
fail(); fail();
} }
......
...@@ -43,6 +43,7 @@ import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager; ...@@ -43,6 +43,7 @@ import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager;
import org.apache.iotdb.db.writelog.node.WriteLogNode; import org.apache.iotdb.db.writelog.node.WriteLogNode;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.db.engine.fileSystem.FileFactory;
import org.apache.iotdb.tsfile.read.common.Path; import org.apache.iotdb.tsfile.read.common.Path;
import org.apache.iotdb.tsfile.write.schema.Schema; import org.apache.iotdb.tsfile.write.schema.Schema;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema; import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
...@@ -53,8 +54,8 @@ public class LogReplayerTest { ...@@ -53,8 +54,8 @@ public class LogReplayerTest {
@Test @Test
public void test() throws IOException, ProcessorException { public void test() throws IOException, ProcessorException {
String logNodePrefix = "testLogNode"; String logNodePrefix = "testLogNode";
File tsFile = new File("temp", "test.ts"); File tsFile = FileFactory.INSTANCE.getFile("temp", "test.ts");
File modF = new File("test.mod"); File modF = FileFactory.INSTANCE.getFile("test.mod");
ModificationFile modFile = new ModificationFile(modF.getPath()); ModificationFile modFile = new ModificationFile(modF.getPath());
VersionController versionController = new VersionController() { VersionController versionController = new VersionController() {
@Override @Override
......
...@@ -35,6 +35,7 @@ import org.apache.iotdb.db.writelog.node.WriteLogNode; ...@@ -35,6 +35,7 @@ import org.apache.iotdb.db.writelog.node.WriteLogNode;
import org.apache.iotdb.tsfile.exception.write.WriteProcessException; import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.db.engine.fileSystem.FileFactory;
import org.apache.iotdb.tsfile.read.ReadOnlyTsFile; import org.apache.iotdb.tsfile.read.ReadOnlyTsFile;
import org.apache.iotdb.tsfile.read.TsFileSequenceReader; import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
import org.apache.iotdb.tsfile.read.common.Field; import org.apache.iotdb.tsfile.read.common.Field;
...@@ -76,7 +77,7 @@ public class SeqTsFileRecoverTest { ...@@ -76,7 +77,7 @@ public class SeqTsFileRecoverTest {
@Before @Before
public void setup() throws IOException, WriteProcessException { public void setup() throws IOException, WriteProcessException {
tsF = new File("temp", "test.ts"); tsF = FileFactory.INSTANCE.getFile("temp", "test.ts");
tsF.getParentFile().mkdirs(); tsF.getParentFile().mkdirs();
schema = new Schema(); schema = new Schema();
......
...@@ -37,6 +37,7 @@ import org.apache.iotdb.tsfile.exception.write.WriteProcessException; ...@@ -37,6 +37,7 @@ import org.apache.iotdb.tsfile.exception.write.WriteProcessException;
import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData; import org.apache.iotdb.tsfile.file.metadata.ChunkMetaData;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.db.engine.fileSystem.FileFactory;
import org.apache.iotdb.tsfile.read.TsFileSequenceReader; import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
import org.apache.iotdb.tsfile.read.common.Chunk; import org.apache.iotdb.tsfile.read.common.Chunk;
import org.apache.iotdb.tsfile.read.common.Path; import org.apache.iotdb.tsfile.read.common.Path;
...@@ -79,7 +80,7 @@ public class UnseqTsFileRecoverTest { ...@@ -79,7 +80,7 @@ public class UnseqTsFileRecoverTest {
@Before @Before
public void setup() throws IOException, WriteProcessException { public void setup() throws IOException, WriteProcessException {
tsF = new File("temp", "test.ts"); tsF = FileFactory.INSTANCE.getFile("temp", "test.ts");
tsF.getParentFile().mkdirs(); tsF.getParentFile().mkdirs();
schema = new Schema(); schema = new Schema();
......
...@@ -20,9 +20,7 @@ package org.apache.iotdb.tsfile.write.writer; ...@@ -20,9 +20,7 @@ package org.apache.iotdb.tsfile.write.writer;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry;
import org.apache.iotdb.tsfile.exception.write.TsFileNotCompleteException; import org.apache.iotdb.tsfile.exception.write.TsFileNotCompleteException;
import org.apache.iotdb.tsfile.file.metadata.TsDeviceMetadata; import org.apache.iotdb.tsfile.file.metadata.TsDeviceMetadata;
import org.apache.iotdb.tsfile.file.metadata.TsDeviceMetadataIndex; import org.apache.iotdb.tsfile.file.metadata.TsDeviceMetadataIndex;
......
...@@ -230,7 +230,7 @@ public class TsFileIOWriter { ...@@ -230,7 +230,7 @@ public class TsFileIOWriter {
out.write(chunk.getData()); out.write(chunk.getData());
endChunk(chunkMetadata.getNumOfPoints()); endChunk(chunkMetadata.getNumOfPoints());
} }
/** /**
* end chunk and write some log. * end chunk and write some log.
* *
...@@ -376,12 +376,8 @@ public class TsFileIOWriter { ...@@ -376,12 +376,8 @@ public class TsFileIOWriter {
} }
/** /**
<<<<<<< HEAD
* close the outputStream or file channel without writing FileMetadata.
=======
* close the outputStream or file channel without writing FileMetadata. This is just used for * close the outputStream or file channel without writing FileMetadata. This is just used for
* Testing. * Testing.
>>>>>>> master
*/ */
public void close() throws IOException { public void close() throws IOException {
canWrite = false; canWrite = false;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册