提交 3e41ef82 编写于 作者: J Jiang Tian 提交者: Jialin Qiao

[IOTDB-104]fix that MManager is incorrectly recovered when system reboots (#192)

* fix that MManager is incorrectly recovered when system reboots

* clean overflow files when storage group is deleted
上级 36484ac3
......@@ -45,6 +45,7 @@ import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
import org.apache.iotdb.db.exception.BufferWriteProcessorException;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.FileNodeProcessorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.metadata.MManager;
......@@ -243,7 +244,7 @@ public class FileNodeManager implements IStatistic, IService {
List<String> filenodeNames = null;
try {
filenodeNames = MManager.getInstance().getAllFileNames();
} catch (PathErrorException e) {
} catch (MetadataErrorException e) {
LOGGER.error("Restoring all FileNodes failed.", e);
return;
}
......@@ -780,7 +781,7 @@ public class FileNodeManager implements IStatistic, IService {
List<String> allFileNodeNames;
try {
allFileNodeNames = MManager.getInstance().getAllFileNames();
} catch (PathErrorException e) {
} catch (MetadataErrorException e) {
LOGGER.error("Get all storage group seriesPath error,", e);
throw new FileNodeManagerException(e);
}
......@@ -884,6 +885,7 @@ public class FileNodeManager implements IStatistic, IService {
FileUtils.deleteDirectory(new File(fileNodePath));
cleanBufferWrite(processorName);
cleanOverflow(processorName);
MultiFileLogNodeManager.getInstance()
.deleteNode(processorName + IoTDBConstant.BUFFERWRITE_LOG_NODE_SUFFIX);
......@@ -897,6 +899,24 @@ public class FileNodeManager implements IStatistic, IService {
}
}
private void cleanOverflow(String processorName) throws IOException {
String overflowDirPath = TsFileDBConf.getOverflowDataDir();
String overflowPath = standardizeDir(overflowDirPath) + processorName;
File overflowDir = new File(overflowPath);
// free and close the streams under this overflow directory
if (!overflowDir.exists()) {
return;
}
File[] overflowSubDirs = overflowDir.listFiles();
if (overflowSubDirs == null) {
return;
}
for (File overflowSubDir : overflowSubDirs) {
closeAndRemoveReader(overflowSubDir);
}
FileUtils.deleteDirectory(overflowDir);
}
private void cleanBufferWrite(String processorName) throws IOException {
List<String> bufferwritePathList = directories.getAllTsFileFolders();
for (String bufferwritePath : bufferwritePathList) {
......@@ -906,16 +926,20 @@ public class FileNodeManager implements IStatistic, IService {
if (!bufferDir.exists()) {
continue;
}
File[] bufferFiles = bufferDir.listFiles();
if (bufferFiles != null) {
for (File bufferFile : bufferFiles) {
FileReaderManager.getInstance().closeFileAndRemoveReader(bufferFile.getPath());
}
}
closeAndRemoveReader(bufferDir);
FileUtils.deleteDirectory(new File(bufferwritePath));
}
}
private void closeAndRemoveReader(File folder) throws IOException {
File[] files = folder.listFiles();
if (files != null) {
for (File file : files) {
FileReaderManager.getInstance().closeFileAndRemoveReader(file.getPath());
}
}
}
private void deleteFileNodeBlocked(String processorName) throws FileNodeManagerException {
LOGGER.info("Forced to delete the filenode processor {}", processorName);
FileNodeProcessor processor = processorMap.get(processorName);
......
......@@ -26,4 +26,11 @@ public class ArgsErrorException extends Exception {
super(msg);
}
public ArgsErrorException(String message, Throwable cause) {
super(message, cause);
}
public ArgsErrorException(Throwable cause) {
super(cause);
}
}
......@@ -24,12 +24,19 @@ package org.apache.iotdb.db.exception;
*
* @author kangrong
*/
public class MetadataArgsErrorException extends ArgsErrorException {
public class MetadataErrorException extends Exception {
private static final long serialVersionUID = 3415275599091623570L;
public MetadataArgsErrorException(String msg) {
public MetadataErrorException(String msg) {
super(msg);
}
public MetadataErrorException(String message, Throwable cause) {
super(message, cause);
}
public MetadataErrorException(Throwable cause) {
super(cause);
}
}
......@@ -25,7 +25,7 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
......@@ -52,9 +52,9 @@ public class MGraph implements Serializable {
/**
* Add a {@code PTree} to current {@code MGraph}.
*/
public void addAPTree(String ptreeRootName) throws MetadataArgsErrorException {
public void addAPTree(String ptreeRootName) throws MetadataErrorException {
if (MetadataConstant.ROOT.equalsIgnoreCase(ptreeRootName)) {
throw new MetadataArgsErrorException("Property Tree's root name should not be 'root'");
throw new MetadataErrorException("Property Tree's root name should not be 'root'");
}
PTree ptree = new PTree(ptreeRootName, mtree);
ptreeMap.put(ptreeRootName, ptree);
......@@ -89,7 +89,7 @@ public class MGraph implements Serializable {
/**
* Add a seriesPath to {@code PTree}.
*/
public void addPathToPTree(String path) throws PathErrorException, MetadataArgsErrorException {
public void addPathToPTree(String path) throws PathErrorException {
String[] nodes = path.trim().split(DOUB_SEPARATOR);
if (nodes.length == 0) {
throw new PathErrorException("Timeseries is null.");
......
......@@ -31,13 +31,15 @@ import java.io.ObjectOutputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.monitor.MonitorConstants;
import org.apache.iotdb.db.utils.RandomDeleteCache;
import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
import org.apache.iotdb.tsfile.exception.cache.CacheException;
......@@ -45,9 +47,10 @@ import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.tsfile.read.common.Path;
import org.apache.iotdb.tsfile.utils.Pair;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
/**
* This class takes the responsibility of serialization of all the metadata info and persistent it
......@@ -141,7 +144,7 @@ public class MManager {
}
logWriter = new BufferedWriter(new FileWriter(logFile, true));
writeToLog = true;
} catch (PathErrorException | MetadataArgsErrorException
} catch (PathErrorException | MetadataErrorException
| ClassNotFoundException | IOException e) {
mgraph = new MGraph(ROOT_NAME);
LOGGER.error("Cannot read MGraph from file, using an empty new one");
......@@ -160,7 +163,7 @@ public class MManager {
}
private void initFromLog(File logFile)
throws IOException, PathErrorException, MetadataArgsErrorException {
throws IOException, PathErrorException, MetadataErrorException {
// init the metadata from the operation log
mgraph = new MGraph(ROOT_NAME);
if (logFile.exists()) {
......@@ -187,32 +190,28 @@ public class MManager {
}
private void operation(String cmd)
throws PathErrorException, IOException, MetadataArgsErrorException {
throws PathErrorException, IOException, MetadataErrorException {
//see addPathToMTree() to get the detailed format of the cmd
String[] args = cmd.trim().split(",");
switch (args[0]) {
case MetadataOperationType.ADD_PATH_TO_MTREE:
String[] leftArgs;
Map<String, String> props = null;
if (args.length > 5) {
String[] kv = new String[2];
String[] kv;
props = new HashMap<>(args.length - 5 + 1, 1);
leftArgs = new String[args.length - 5];
for (int k = 5; k < args.length; k++) {
kv = args[k].split("=");
props.put(kv[0], kv[1]);
}
} else {
//when ????
leftArgs = new String[0];
}
addPathToMTree(args[1], TSDataType.deserialize(Short.valueOf(args[2])),
addPathToMTree(new Path(args[1]), TSDataType.deserialize(Short.valueOf(args[2])),
TSEncoding.deserialize(Short.valueOf(args[3])),
CompressionType.deserialize(Short.valueOf(args[4])),
props);
break;
case MetadataOperationType.DELETE_PATH_FROM_MTREE:
deletePathFromMTree(args[1]);
deletePathsFromMTree(Collections.singletonList(new Path(args[1])));
break;
case MetadataOperationType.SET_STORAGE_LEVEL_TO_MTREE:
setStorageLevelToMTree(args[1]);
......@@ -250,19 +249,85 @@ public class MManager {
}
}
public boolean addPathToMTree(String path, TSDataType dataType, TSEncoding encoding,
CompressionType compressor, Map<String, String> props)
throws MetadataErrorException {
return addPathToMTree(new Path(path), dataType, encoding, compressor, props);
}
/**
* <p> Add one timeseries to metadata. Must invoke the<code>pathExist</code> and
* <code>getFileNameByPath</code> method first to check timeseries. </p>
* <p> Add one timeseries to metadata.
*
* @param path the timeseries seriesPath
* @param dataType the datetype {@code DataType} for the timeseries
* @param encoding the encoding function {@code Encoding} for the timeseries
* @param compressor the compressor function {@code Compressor} for the time series
* @return whether the measurement occurs for the first time in this storage group (if true,
* the measurement should be registered to the StorageGroupManager too)
*/
public void addPathToMTree(String path, TSDataType dataType, TSEncoding encoding,
public boolean addPathToMTree(Path path, TSDataType dataType, TSEncoding encoding,
CompressionType compressor, Map<String, String> props)
throws MetadataErrorException {
if (pathExist(path.getFullPath())) {
throw new MetadataErrorException(
String.format("Timeseries %s already exist", path.getFullPath()));
}
if (!checkFileNameByPath(path.getFullPath())) {
throw new MetadataErrorException("Storage group should be created first");
}
// optimize the speed of adding timeseries
String fileNodePath;
try {
fileNodePath = getFileNameByPath(path.getFullPath());
} catch (PathErrorException e) {
throw new MetadataErrorException(e);
}
// the two map is stored in the storage group node
Map<String, MeasurementSchema> schemaMap = getSchemaMapForOneFileNode(fileNodePath);
Map<String, Integer> numSchemaMap = getNumSchemaMapForOneFileNode(fileNodePath);
String lastNode = path.getMeasurement();
boolean isNewMeasurement = true;
// Thread safety: just one thread can access/modify the schemaMap
synchronized (schemaMap) {
if (schemaMap.containsKey(lastNode)) {
isNewMeasurement = false;
MeasurementSchema columnSchema = schemaMap.get(lastNode);
if (!columnSchema.getType().equals(dataType)
|| !columnSchema.getEncodingType().equals(encoding)) {
throw new MetadataErrorException(String.format(
"The resultDataType or encoding of the last node %s is conflicting "
+ "in the storage group %s", lastNode, fileNodePath));
}
try {
addPathToMTreeInternal(path.getFullPath(), dataType, encoding, compressor, props);
} catch (IOException | PathErrorException e) {
throw new MetadataErrorException(e);
}
numSchemaMap.put(lastNode, numSchemaMap.get(lastNode) + 1);
} else {
try {
addPathToMTreeInternal(path.getFullPath(), dataType, encoding, compressor, props);
} catch (PathErrorException | IOException e) {
throw new MetadataErrorException(e);
}
MeasurementSchema columnSchema;
try {
columnSchema = getSchemaForOnePath(path.toString());
} catch (PathErrorException e) {
throw new MetadataErrorException(e);
}
schemaMap.put(lastNode, columnSchema);
numSchemaMap.put(lastNode, 1);
}
return isNewMeasurement;
}
}
private void addPathToMTreeInternal(String path, TSDataType dataType, TSEncoding encoding,
CompressionType compressor, Map<String, String> props)
throws PathErrorException, IOException {
lock.writeLock().lock();
try {
mgraph.addPathToMTree(path, dataType, encoding, compressor, props);
......@@ -287,7 +352,7 @@ public class MManager {
* <p> Add one timeseries to metadata. Must invoke the<code>pathExist</code> and
* <code>getFileNameByPath</code> method first to check timeseries. </p>
*
* this is just for compatibility
* this is just for compatibility TEST ONLY
*
* @param path the timeseries seriesPath
* @param dataType the datetype {@code DataType} for the timeseries
......@@ -298,13 +363,117 @@ public class MManager {
TSDataType tsDataType = TSDataType.valueOf(dataType);
TSEncoding tsEncoding = TSEncoding.valueOf(encoding);
CompressionType type = CompressionType.valueOf(TSFileConfig.compressor);
addPathToMTree(path, tsDataType, tsEncoding, type, Collections.emptyMap());
addPathToMTreeInternal(path, tsDataType, tsEncoding, type, Collections.emptyMap());
}
private List<String> collectPaths(List<Path> paths) throws MetadataErrorException {
Set<String> pathSet = new HashSet<>();
// Attention: Monitor storage group seriesPath is not allowed to be deleted
for (Path p : paths) {
List<String> subPaths;
subPaths = getPaths(p.getFullPath());
if (subPaths.isEmpty()) {
throw new MetadataErrorException(String
.format("There are no timeseries in the prefix of %s seriesPath",
p.getFullPath()));
}
List<String> newSubPaths = new ArrayList<>();
for (String eachSubPath : subPaths) {
String filenodeName;
try {
filenodeName = getFileNameByPath(eachSubPath);
} catch (PathErrorException e) {
throw new MetadataErrorException(e);
}
if (MonitorConstants.STAT_STORAGE_GROUP_PREFIX.equals(filenodeName)) {
continue;
}
newSubPaths.add(eachSubPath);
}
pathSet.addAll(newSubPaths);
}
for (String p : pathSet) {
if (!pathExist(p)) {
throw new MetadataErrorException(String.format(
"Timeseries %s does not exist and cannot be deleted", p));
}
}
return new ArrayList<>(pathSet);
}
public Pair<Set<String>, Set<String>> deletePathFromMTree(String deletePath)
throws MetadataErrorException {
return deletePathFromMTree(new Path(deletePath));
}
public Pair<Set<String>, Set<String>> deletePathFromMTree(Path deletePath)
throws MetadataErrorException {
return deletePathsFromMTree(Collections.singletonList(deletePath));
}
/**
* delete given paths from metadata and data.
* @param deletePathList list of paths to be deleted
* @return the first set contains StorageGroups that are affected by this deletion but
* still have remaining timeseries, so these StorageGroups should be closed to make sure the data
* deletion is persisted; the second set contains StorageGroups that contain no more timeseries
* after this deletion and files of such StorageGroups should be deleted to reclaim disk space.
* @throws MetadataErrorException
*/
public Pair<Set<String>, Set<String>> deletePathsFromMTree(List<Path> deletePathList)
throws MetadataErrorException {
if (deletePathList != null && !deletePathList.isEmpty()) {
List<String> fullPath = collectPaths(deletePathList);
Set<String> closeFileNodes = new HashSet<>();
Set<String> deleteFielNodes = new HashSet<>();
for (String p : fullPath) {
String filenode;
try {
filenode = getFileNameByPath(p);
} catch (PathErrorException e) {
throw new MetadataErrorException(e);
}
closeFileNodes.add(filenode);
// the two map is stored in the storage group node
Map<String, MeasurementSchema> schemaMap = getSchemaMapForOneFileNode(filenode);
Map<String, Integer> numSchemaMap = getNumSchemaMapForOneFileNode(filenode);
// Thread safety: just one thread can access/modify the schemaMap
synchronized (schemaMap) {
// TODO: don't delete the storage group seriesPath recursively
Path path = new Path(p);
String measurementId = path.getMeasurement();
if (numSchemaMap.get(measurementId) == 1) {
numSchemaMap.remove(measurementId);
schemaMap.remove(measurementId);
} else {
numSchemaMap.put(measurementId, numSchemaMap.get(measurementId) - 1);
}
String deleteNameSpacePath;
try {
deleteNameSpacePath = deletePathFromMTreeInternal(p);
} catch (PathErrorException | IOException e) {
throw new MetadataErrorException(e);
}
if (deleteNameSpacePath != null) {
deleteFielNodes.add(deleteNameSpacePath);
}
}
}
closeFileNodes.removeAll(deleteFielNodes);
return new Pair<>(closeFileNodes, deleteFielNodes);
}
return new Pair<>(Collections.emptySet(), Collections.emptySet());
}
/**
* function for deleting a given path from mTree.
*
* @return the related storage group name if there is no path in the storage group anymore;
* otherwise null
*/
public String deletePathFromMTree(String path) throws PathErrorException, IOException {
private String deletePathFromMTreeInternal(String path) throws PathErrorException, IOException {
lock.writeLock().lock();
try {
checkAndGetDataTypeCache.clear();
......@@ -325,7 +494,7 @@ public class MManager {
/**
* function for setting storage level of the given path to mTree.
*/
public void setStorageLevelToMTree(String path) throws PathErrorException, IOException {
public void setStorageLevelToMTree(String path) throws MetadataErrorException {
lock.writeLock().lock();
try {
......@@ -338,7 +507,9 @@ public class MManager {
logWriter.newLine();
logWriter.flush();
}
} finally {
} catch (IOException | PathErrorException e) {
throw new MetadataErrorException(e);
} finally{
lock.writeLock().unlock();
}
}
......@@ -359,7 +530,7 @@ public class MManager {
/**
* function for adding a pTree.
*/
public void addAPTree(String ptreeRootName) throws IOException, MetadataArgsErrorException {
public void addAPTree(String ptreeRootName) throws IOException, MetadataErrorException {
lock.writeLock().lock();
try {
......@@ -379,7 +550,7 @@ public class MManager {
* function for adding a given path to pTree.
*/
public void addPathToPTree(String path)
throws PathErrorException, IOException, MetadataArgsErrorException {
throws PathErrorException, IOException {
lock.writeLock().lock();
try {
......@@ -674,7 +845,7 @@ public class MManager {
/**
* function for getting all file names.
*/
public List<String> getAllFileNames() throws PathErrorException {
public List<String> getAllFileNames() throws MetadataErrorException {
lock.readLock().lock();
try {
......@@ -690,13 +861,13 @@ public class MManager {
*
* @return List of String represented all file names
*/
public List<String> getAllFileNamesByPath(String path) throws PathErrorException {
public List<String> getAllFileNamesByPath(String path) throws MetadataErrorException {
lock.readLock().lock();
try {
return mgraph.getAllFileNamesByPath(path);
} catch (PathErrorException e) {
throw new PathErrorException(e);
throw new MetadataErrorException(e);
} finally {
lock.readLock().unlock();
}
......@@ -706,10 +877,12 @@ public class MManager {
* return a HashMap contains all the paths separated by File Name.
*/
public Map<String, ArrayList<String>> getAllPathGroupByFileName(String path)
throws PathErrorException {
throws MetadataErrorException {
lock.readLock().lock();
try {
return mgraph.getAllPathGroupByFilename(path);
} catch (PathErrorException e) {
throw new MetadataErrorException(e);
} finally {
lock.readLock().unlock();
}
......@@ -719,7 +892,7 @@ public class MManager {
* Return all paths for given seriesPath if the seriesPath is abstract. Or return the seriesPath
* itself.
*/
public List<String> getPaths(String path) throws PathErrorException {
public List<String> getPaths(String path) throws MetadataErrorException {
lock.readLock().lock();
try {
......
......@@ -33,8 +33,7 @@ import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.engine.filenode.FileNodeManager;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.StartupException;
import org.apache.iotdb.db.metadata.MManager;
import org.apache.iotdb.db.monitor.MonitorConstants.FileNodeManagerStatConstants;
......@@ -46,6 +45,7 @@ import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.tsfile.read.common.Path;
import org.apache.iotdb.tsfile.write.record.TSRecord;
import org.apache.iotdb.tsfile.write.record.datapoint.LongDataPoint;
import org.slf4j.Logger;
......@@ -88,7 +88,7 @@ public class StatMonitor implements IService {
if (!mmanager.pathExist(prefix)) {
mmanager.setStorageLevelToMTree(prefix);
}
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
LOGGER.error("MManager cannot set storage level to MTree.", e);
}
}
......@@ -164,12 +164,12 @@ public class StatMonitor implements IService {
}
if (!mManager.pathExist(entry.getKey())) {
mManager.addPathToMTree(entry.getKey(), TSDataType.valueOf(entry.getValue()),
mManager.addPathToMTree(new Path(entry.getKey()), TSDataType.valueOf(entry.getValue()),
TSEncoding.valueOf("RLE"), CompressionType.valueOf(TSFileConfig.compressor),
Collections.emptyMap());
}
}
} catch (IOException | PathErrorException e) {
} catch (MetadataErrorException e) {
LOGGER.error("Initialize the metadata error.", e);
}
}
......
......@@ -22,6 +22,7 @@ import java.time.ZoneId;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.exception.ArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.qp.IllegalASTFormatException;
import org.apache.iotdb.db.exception.qp.LogicalOperatorException;
......@@ -61,13 +62,15 @@ public class QueryProcessor {
}
public PhysicalPlan parseSQLToPhysicalPlan(String sqlStr)
throws QueryProcessorException, ArgsErrorException, ProcessorException {
throws QueryProcessorException, ArgsErrorException, ProcessorException,
MetadataErrorException {
IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
return parseSQLToPhysicalPlan(sqlStr, config.getZoneID());
}
public PhysicalPlan parseSQLToPhysicalPlan(String sqlStr, ZoneId zoneId)
throws QueryProcessorException, ArgsErrorException, ProcessorException {
throws QueryProcessorException, ArgsErrorException, ProcessorException,
MetadataErrorException {
AstNode astNode = parseSQLToAST(sqlStr);
Operator operator = parseASTToOperator(astNode, zoneId);
operator = logicalOptimize(operator, executor);
......@@ -86,7 +89,7 @@ public class QueryProcessor {
* @throws ArgsErrorException
*/
private RootOperator parseASTToOperator(AstNode astNode, ZoneId zoneId)
throws QueryProcessorException, ArgsErrorException {
throws QueryProcessorException, ArgsErrorException, MetadataErrorException {
LogicalGenerator generator = new LogicalGenerator(zoneId);
return generator.getLogicalPlan(astNode);
}
......
......@@ -22,6 +22,7 @@ import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.qp.physical.PhysicalPlan;
......@@ -137,7 +138,7 @@ public interface IQueryProcessExecutor {
/**
* Get all paths of a full path
*/
List<String> getAllPaths(String originPath) throws PathErrorException;
List<String> getAllPaths(String originPath) throws MetadataErrorException;
int getFetchSize();
......
......@@ -24,7 +24,6 @@ import static org.apache.iotdb.db.conf.IoTDBConstant.USER;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
......@@ -38,11 +37,11 @@ import org.apache.iotdb.db.auth.entity.User;
import org.apache.iotdb.db.engine.filenode.FileNodeManager;
import org.apache.iotdb.db.exception.ArgsErrorException;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.metadata.MManager;
import org.apache.iotdb.db.metadata.MNode;
import org.apache.iotdb.db.monitor.MonitorConstants;
import org.apache.iotdb.db.qp.constant.SQLConstant;
import org.apache.iotdb.db.qp.logical.sys.AuthorOperator;
import org.apache.iotdb.db.qp.logical.sys.AuthorOperator.AuthorType;
......@@ -58,7 +57,6 @@ import org.apache.iotdb.db.qp.physical.sys.MetadataPlan;
import org.apache.iotdb.db.qp.physical.sys.PropertyPlan;
import org.apache.iotdb.db.query.context.QueryContext;
import org.apache.iotdb.db.query.dataset.AuthDataSet;
import org.apache.iotdb.db.query.executor.EngineQueryRouter;
import org.apache.iotdb.db.query.fill.IFill;
import org.apache.iotdb.db.utils.AuthUtils;
import org.apache.iotdb.db.utils.LoadDataUtils;
......@@ -75,7 +73,6 @@ import org.apache.iotdb.tsfile.utils.Binary;
import org.apache.iotdb.tsfile.utils.Pair;
import org.apache.iotdb.tsfile.write.record.TSRecord;
import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
......@@ -311,7 +308,7 @@ public class OverflowQPExecutor extends QueryProcessExecutor {
}
@Override
public List<String> getAllPaths(String originPath) throws PathErrorException {
public List<String> getAllPaths(String originPath) throws MetadataErrorException {
return MManager.getInstance().getPaths(originPath);
}
......@@ -421,126 +418,21 @@ public class OverflowQPExecutor extends QueryProcessExecutor {
try {
switch (namespaceType) {
case ADD_PATH:
if (mManager.pathExist(path.getFullPath())) {
throw new ProcessorException(
String.format("Timeseries %s already exist", path.getFullPath()));
}
if (!mManager.checkFileNameByPath(path.getFullPath())) {
throw new ProcessorException("Storage group should be created first");
}
// optimize the speed of adding timeseries
String fileNodePath = mManager.getFileNameByPath(path.getFullPath());
// the two map is stored in the storage group node
Map<String, MeasurementSchema> schemaMap = mManager
.getSchemaMapForOneFileNode(fileNodePath);
Map<String, Integer> numSchemaMap = mManager.getNumSchemaMapForOneFileNode(fileNodePath);
String lastNode = path.getMeasurement();
boolean isNewMeasurement = true;
// Thread safety: just one thread can access/modify the schemaMap
synchronized (schemaMap) {
if (schemaMap.containsKey(lastNode)) {
isNewMeasurement = false;
MeasurementSchema columnSchema = schemaMap.get(lastNode);
if (!columnSchema.getType().equals(dataType)
|| !columnSchema.getEncodingType().equals(encoding)) {
throw new ProcessorException(String.format(
"The resultDataType or encoding of the last node %s is conflicting "
+ "in the storage group %s", lastNode, fileNodePath));
}
mManager.addPathToMTree(path.getFullPath(), dataType, encoding, compressor, props);
numSchemaMap.put(lastNode, numSchemaMap.get(lastNode) + 1);
} else {
mManager.addPathToMTree(path.getFullPath(), dataType, encoding, compressor, props);
MeasurementSchema columnSchema = mManager.getSchemaForOnePath(path.toString());
schemaMap.put(lastNode, columnSchema);
numSchemaMap.put(lastNode, 1);
}
try {
if (isNewMeasurement) {
// add time series to schema
fileNodeManager.addTimeSeries(path, dataType, encoding, compressor, props);
//TODO fileNodeManager.addTimeSeries(
//TODO path, resultDataType, encoding, compressor, encodingArgs);
}
// fileNodeManager.closeOneFileNode(namespacePath);
} catch (FileNodeManagerException e) {
throw new ProcessorException(e);
}
boolean isNewMeasurement = mManager.addPathToMTree(path, dataType, encoding, compressor
, props);
if (isNewMeasurement) {
fileNodeManager.addTimeSeries(path, dataType, encoding, compressor, props);
}
break;
case DELETE_PATH:
if (deletePathList != null && !deletePathList.isEmpty()) {
Set<String> pathSet = new HashSet<>();
// Attention: Monitor storage group seriesPath is not allowed to be deleted
for (Path p : deletePathList) {
List<String> subPaths = mManager.getPaths(p.getFullPath());
if (subPaths.isEmpty()) {
throw new ProcessorException(String
.format("There are no timeseries in the prefix of %s seriesPath",
p.getFullPath()));
}
List<String> newSubPaths = new ArrayList<>();
for (String eachSubPath : subPaths) {
String filenodeName = mManager.getFileNameByPath(eachSubPath);
if (MonitorConstants.STAT_STORAGE_GROUP_PREFIX.equals(filenodeName)) {
continue;
}
newSubPaths.add(eachSubPath);
}
pathSet.addAll(newSubPaths);
}
for (String p : pathSet) {
if (!mManager.pathExist(p)) {
throw new ProcessorException(String.format(
"Timeseries %s does not exist and cannot be delete its metadata and data", p));
}
}
List<String> fullPath = new ArrayList<>(pathSet);
try {
deleteDataOfTimeSeries(fullPath);
} catch (ProcessorException e) {
throw new ProcessorException(e);
}
Set<String> closeFileNodes = new HashSet<>();
Set<String> deleteFielNodes = new HashSet<>();
for (String p : fullPath) {
String nameSpacePath = null;
try {
nameSpacePath = mManager.getFileNameByPath(p);
} catch (PathErrorException e) {
throw new ProcessorException(e);
}
closeFileNodes.add(nameSpacePath);
// the two map is stored in the storage group node
schemaMap = mManager.getSchemaMapForOneFileNode(nameSpacePath);
numSchemaMap = mManager.getNumSchemaMapForOneFileNode(nameSpacePath);
// Thread safety: just one thread can access/modify the schemaMap
synchronized (schemaMap) {
// TODO: don't delete the storage group seriesPath
// recursively
path = new Path(p);
String measurementId = path.getMeasurement();
if (numSchemaMap.get(measurementId) == 1) {
numSchemaMap.remove(measurementId);
schemaMap.remove(measurementId);
} else {
numSchemaMap.put(measurementId, numSchemaMap.get(measurementId) - 1);
}
String deleteNameSpacePath = mManager.deletePathFromMTree(p);
if (deleteNameSpacePath != null) {
deleteFielNodes.add(deleteNameSpacePath);
}
}
}
closeFileNodes.removeAll(deleteFielNodes);
for (String deleteFileNode : deleteFielNodes) {
// close processor
fileNodeManager.deleteOneFileNode(deleteFileNode);
}
for (String closeFileNode : closeFileNodes) {
fileNodeManager.closeOneFileNode(closeFileNode);
}
deleteDataOfTimeSeries(deletePathList);
Pair<Set<String>, Set<String>> closeDeletedStorageGroupPair =
mManager.deletePathsFromMTree(deletePathList);
for (String closeStorageGroup : closeDeletedStorageGroupPair.left) {
fileNodeManager.closeOneFileNode(closeStorageGroup);
}
for (String deleteStorageGroup : closeDeletedStorageGroupPair.right) {
fileNodeManager.deleteOneFileNode(deleteStorageGroup);
}
break;
case SET_FILE_LEVEL:
......@@ -549,8 +441,8 @@ public class OverflowQPExecutor extends QueryProcessExecutor {
default:
throw new ProcessorException("unknown namespace type:" + namespaceType);
}
} catch (PathErrorException | IOException | FileNodeManagerException e) {
throw new ProcessorException(e.getMessage());
} catch (FileNodeManagerException | MetadataErrorException e) {
throw new ProcessorException(e);
}
return true;
}
......@@ -560,10 +452,10 @@ public class OverflowQPExecutor extends QueryProcessExecutor {
*
* @param pathList deleted paths
*/
private void deleteDataOfTimeSeries(List<String> pathList) throws ProcessorException {
for (String p : pathList) {
private void deleteDataOfTimeSeries(List<Path> pathList) throws ProcessorException {
for (Path p : pathList) {
DeletePlan deletePlan = new DeletePlan();
deletePlan.addPath(new Path(p));
deletePlan.addPath(p);
deletePlan.setDeleteTime(Long.MAX_VALUE);
processNonQuery(deletePlan);
}
......@@ -594,7 +486,7 @@ public class OverflowQPExecutor extends QueryProcessExecutor {
default:
throw new ProcessorException("unknown namespace type:" + propertyType);
}
} catch (PathErrorException | IOException | ArgsErrorException e) {
} catch (PathErrorException | IOException | MetadataErrorException e) {
throw new ProcessorException("meet error in " + propertyType + " . " + e.getMessage());
}
return true;
......
......@@ -24,6 +24,7 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.metadata.MManager;
......@@ -124,7 +125,7 @@ public abstract class QueryProcessExecutor implements IQueryProcessExecutor {
result &= delete(new Path(path), deleteTime);
}
return result;
} catch (PathErrorException e) {
} catch (MetadataErrorException e) {
throw new ProcessorException(e);
}
}
......
......@@ -28,7 +28,7 @@ import java.util.List;
import java.util.Map;
import org.antlr.runtime.Token;
import org.apache.iotdb.db.exception.ArgsErrorException;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.qp.IllegalASTFormatException;
import org.apache.iotdb.db.exception.qp.LogicalOperatorException;
import org.apache.iotdb.db.exception.qp.QueryProcessorException;
......@@ -80,7 +80,7 @@ public class LogicalGenerator {
}
public RootOperator getLogicalPlan(AstNode astNode)
throws QueryProcessorException, ArgsErrorException {
throws QueryProcessorException, ArgsErrorException, MetadataErrorException {
analyze(astNode);
return initializedOperator;
}
......@@ -91,7 +91,8 @@ public class LogicalGenerator {
* @throws QueryProcessorException exception in query process
* @throws ArgsErrorException args error
*/
private void analyze(AstNode astNode) throws QueryProcessorException, ArgsErrorException {
private void analyze(AstNode astNode)
throws QueryProcessorException, ArgsErrorException, MetadataErrorException {
Token token = astNode.getToken();
if (token == null) {
throw new QueryProcessorException("given token is null");
......@@ -368,7 +369,7 @@ public class LogicalGenerator {
initializedOperator = propertyOperator;
}
private void analyzeMetadataCreate(AstNode astNode) throws MetadataArgsErrorException {
private void analyzeMetadataCreate(AstNode astNode) throws MetadataErrorException {
Path series = parsePath(astNode.getChild(0).getChild(0));
AstNode paramNode = astNode.getChild(1);
String dataType = paramNode.getChild(0).getChild(0).getText();
......@@ -1066,7 +1067,7 @@ public class LogicalGenerator {
}
private void checkMetadataArgs(String dataType, String encoding, String compressor)
throws MetadataArgsErrorException {
throws MetadataErrorException {
// final String rle = "RLE";
// final String plain = "PLAIN";
// final String ts2Diff = "TS_2DIFF";
......@@ -1075,29 +1076,29 @@ public class LogicalGenerator {
TSDataType tsDataType;
TSEncoding tsEncoding;
if (dataType == null) {
throw new MetadataArgsErrorException("data type cannot be null");
throw new MetadataErrorException("data type cannot be null");
}
try {
tsDataType = TSDataType.valueOf(dataType);
} catch (Exception e) {
throw new MetadataArgsErrorException(String.format("data type %s not support", dataType));
throw new MetadataErrorException(String.format("data type %s not support", dataType));
}
if (encoding == null) {
throw new MetadataArgsErrorException("encoding type cannot be null");
throw new MetadataErrorException("encoding type cannot be null");
}
try {
tsEncoding = TSEncoding.valueOf(encoding);
} catch (Exception e) {
throw new MetadataArgsErrorException(String.format("encoding %s is not support", encoding));
throw new MetadataErrorException(String.format("encoding %s is not support", encoding));
}
try {
CompressionType.valueOf(compressor);
} catch (Exception e) {
throw new MetadataArgsErrorException(String.format("compressor %s is not support", compressor));
throw new MetadataErrorException(String.format("compressor %s is not support", compressor));
}
boolean throwExp = false;
switch (tsDataType) {
......@@ -1129,7 +1130,7 @@ public class LogicalGenerator {
throwExp = true;
}
if (throwExp) {
throw new MetadataArgsErrorException(
throw new MetadataErrorException(
String.format("encoding %s does not support %s", encoding, dataType));
}
}
......
......@@ -22,6 +22,7 @@ import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.qp.LogicalOperatorException;
import org.apache.iotdb.db.exception.qp.LogicalOptimizeException;
......@@ -334,7 +335,7 @@ public class ConcatPathOptimizer implements ILogicalOptimizer {
for (String pathStr : pathMap.keySet()) {
retPaths.add(new Path(pathStr));
}
} catch (PathErrorException e) {
} catch (MetadataErrorException e) {
throw new LogicalOptimizeException("error when remove star: ", e);
}
return retPaths;
......@@ -353,7 +354,7 @@ public class ConcatPathOptimizer implements ILogicalOptimizer {
newAggregations.add(afterConcatAggregations.get(i));
}
}
} catch (PathErrorException e) {
} catch (MetadataErrorException e) {
throw new LogicalOptimizeException("error when remove star: ", e);
}
}
......
......@@ -27,11 +27,13 @@ import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.engine.filenode.FileNodeManager;
import org.apache.iotdb.db.engine.memcontrol.BasicMemController;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.RecoverException;
import org.apache.iotdb.db.exception.StartupException;
import org.apache.iotdb.db.exception.builder.ExceptionBuilder;
import org.apache.iotdb.db.metadata.MManager;
import org.apache.iotdb.db.metadata.Metadata;
import org.apache.iotdb.db.monitor.StatMonitor;
import org.apache.iotdb.db.sync.receiver.SyncServiceManager;
import org.apache.iotdb.db.writelog.manager.MultiFileLogNodeManager;
......@@ -145,7 +147,7 @@ public class IoTDB implements IoTDBMBean {
List<String> filenodeNames = null;
try {
filenodeNames = MManager.getInstance().getAllFileNames();
} catch (PathErrorException e) {
} catch (MetadataErrorException e) {
throw new RecoverException(e);
}
for (String filenodeName : filenodeNames) {
......
......@@ -42,6 +42,7 @@ import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.engine.filenode.FileNodeManager;
import org.apache.iotdb.db.exception.ArgsErrorException;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.qp.IllegalASTFormatException;
......@@ -345,7 +346,7 @@ public class TSServiceImpl implements TSIService.Iface, ServerContext {
case "ALL_COLUMNS":
try {
resp.setColumnsList(getPaths(req.getColumnPath()));
} catch (PathErrorException | InterruptedException | ProcessorException e) {
} catch (InterruptedException | ProcessorException | MetadataErrorException e) {
status = getErrorStatus(String
.format("Failed to fetch %s's all columns because: %s", req.getColumnPath(), e));
resp.setStatus(status);
......@@ -394,7 +395,7 @@ public class TSServiceImpl implements TSIService.Iface, ServerContext {
}
protected List<String> getPaths(String path)
throws PathErrorException, InterruptedException, ProcessorException {
throws MetadataErrorException, InterruptedException, ProcessorException {
return MManager.getInstance().getPaths(path);
}
......@@ -831,7 +832,7 @@ public class TSServiceImpl implements TSIService.Iface, ServerContext {
try {
physicalPlan = processor.parseSQLToPhysicalPlan(statement, zoneIds.get());
physicalPlan.setProposer(username.get());
} catch (QueryProcessorException | ArgsErrorException e) {
} catch (QueryProcessorException | ArgsErrorException | MetadataErrorException e) {
LOGGER.error("meet error while parsing SQL to physical plan!", e);
return getTSExecuteStatementResp(TS_StatusCode.ERROR_STATUS, e.getMessage());
}
......
......@@ -47,7 +47,7 @@ import org.apache.iotdb.db.engine.filenode.FileNodeManager;
import org.apache.iotdb.db.engine.filenode.OverflowChangeType;
import org.apache.iotdb.db.engine.filenode.TsFileResource;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.metadata.MManager;
......@@ -274,7 +274,7 @@ public class SyncServiceImpl implements SyncService.Iface {
* @param cmd metadata operation
*/
private void operation(String cmd)
throws PathErrorException, IOException, MetadataArgsErrorException {
throws PathErrorException, IOException, MetadataErrorException {
String[] args = cmd.trim().split(",");
switch (args[0]) {
case MetadataOperationType.ADD_PATH_TO_MTREE:
......@@ -285,13 +285,13 @@ public class SyncServiceImpl implements SyncService.Iface {
kv = args[k].split("=");
props.put(kv[0], kv[1]);
}
metadataManger.addPathToMTree(args[1], TSDataType.deserialize(Short.valueOf(args[2])),
metadataManger.addPathToMTree(new Path(args[1]), TSDataType.deserialize(Short.valueOf(args[2])),
TSEncoding.deserialize(Short.valueOf(args[3])),
CompressionType.deserialize(Short.valueOf(args[4])),
props);
break;
case MetadataOperationType.DELETE_PATH_FROM_MTREE:
metadataManger.deletePathFromMTree(args[1]);
metadataManger.deletePathsFromMTree(Collections.singletonList(new Path(args[1])));
break;
case MetadataOperationType.SET_STORAGE_LEVEL_TO_MTREE:
metadataManger.setStorageLevelToMTree(args[1]);
......
......@@ -24,6 +24,7 @@ import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.tsfile.read.common.Path;
/**
* @author liukun
......@@ -43,49 +44,49 @@ public class MetadataManagerHelper {
CompressionType compressionType =CompressionType.valueOf(TSFileConfig.compressor);
mmanager.addPathToMTree("root.vehicle.d0.s0", TSDataType.valueOf("INT32"),
mmanager.addPathToMTree(new Path("root.vehicle.d0.s0"), TSDataType.valueOf("INT32"),
TSEncoding.valueOf("RLE"), compressionType,
Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d0.s1", TSDataType.valueOf("INT64"),
mmanager.addPathToMTree(new Path("root.vehicle.d0.s1"), TSDataType.valueOf("INT64"),
TSEncoding.valueOf("RLE"), compressionType,
Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d0.s2", TSDataType.valueOf("FLOAT"),
mmanager.addPathToMTree(new Path("root.vehicle.d0.s2"), TSDataType.valueOf("FLOAT"),
TSEncoding.valueOf("RLE"), compressionType,
Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d0.s3", TSDataType.valueOf("DOUBLE"),
mmanager.addPathToMTree(new Path("root.vehicle.d0.s3"), TSDataType.valueOf("DOUBLE"),
TSEncoding.valueOf("RLE"), compressionType,
Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d0.s4", TSDataType.valueOf("BOOLEAN"),
mmanager.addPathToMTree(new Path("root.vehicle.d0.s4"), TSDataType.valueOf("BOOLEAN"),
TSEncoding.valueOf("PLAIN"), compressionType,
Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d0.s5", TSDataType.valueOf("TEXT"),
mmanager.addPathToMTree(new Path("root.vehicle.d0.s5"), TSDataType.valueOf("TEXT"),
TSEncoding.valueOf("PLAIN"), compressionType,
Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d1.s0", TSDataType.valueOf("INT32"),
mmanager.addPathToMTree(new Path("root.vehicle.d1.s0"), TSDataType.valueOf("INT32"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d1.s1", TSDataType.valueOf("INT64"),
mmanager.addPathToMTree(new Path("root.vehicle.d1.s1"), TSDataType.valueOf("INT64"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d1.s2", TSDataType.valueOf("FLOAT"),
mmanager.addPathToMTree(new Path("root.vehicle.d1.s2"), TSDataType.valueOf("FLOAT"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d1.s3", TSDataType.valueOf("DOUBLE"),
mmanager.addPathToMTree(new Path("root.vehicle.d1.s3"), TSDataType.valueOf("DOUBLE"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d1.s4", TSDataType.valueOf("BOOLEAN"),
mmanager.addPathToMTree(new Path("root.vehicle.d1.s4"), TSDataType.valueOf("BOOLEAN"),
TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d1.s5", TSDataType.valueOf("TEXT"),
mmanager.addPathToMTree(new Path("root.vehicle.d1.s5"), TSDataType.valueOf("TEXT"),
TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d2.s0", TSDataType.valueOf("INT32"),
mmanager.addPathToMTree(new Path("root.vehicle.d2.s0"), TSDataType.valueOf("INT32"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d2.s1", TSDataType.valueOf("INT64"),
mmanager.addPathToMTree(new Path("root.vehicle.d2.s1"), TSDataType.valueOf("INT64"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d2.s2", TSDataType.valueOf("FLOAT"),
mmanager.addPathToMTree(new Path("root.vehicle.d2.s2"), TSDataType.valueOf("FLOAT"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d2.s3", TSDataType.valueOf("DOUBLE"),
mmanager.addPathToMTree(new Path("root.vehicle.d2.s3"), TSDataType.valueOf("DOUBLE"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d2.s4", TSDataType.valueOf("BOOLEAN"),
mmanager.addPathToMTree(new Path("root.vehicle.d2.s4"), TSDataType.valueOf("BOOLEAN"),
TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d2.s5", TSDataType.valueOf("TEXT"),
mmanager.addPathToMTree(new Path("root.vehicle.d2.s5"), TSDataType.valueOf("TEXT"),
TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
} catch (Exception e) {
......@@ -101,26 +102,26 @@ public class MetadataManagerHelper {
mmanager.setStorageLevelToMTree("root.vehicle");
CompressionType compressionType =CompressionType.valueOf(TSFileConfig.compressor);
mmanager.addPathToMTree("root.vehicle.d0.s0", TSDataType.valueOf("INT32"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d0.s1", TSDataType.valueOf("INT64"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d0.s2", TSDataType.valueOf("FLOAT"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d0.s3", TSDataType.valueOf("DOUBLE"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d0.s4", TSDataType.valueOf("BOOLEAN"), TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d0.s5", TSDataType.valueOf("TEXT"), TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d1.s0", TSDataType.valueOf("INT32"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d1.s1", TSDataType.valueOf("INT64"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d1.s2", TSDataType.valueOf("FLOAT"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d1.s3", TSDataType.valueOf("DOUBLE"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d1.s4", TSDataType.valueOf("BOOLEAN"), TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d1.s5", TSDataType.valueOf("TEXT"), TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d2.s0", TSDataType.valueOf("INT32"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d2.s1", TSDataType.valueOf("INT64"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d2.s2", TSDataType.valueOf("FLOAT"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d2.s3", TSDataType.valueOf("DOUBLE"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d2.s4", TSDataType.valueOf("BOOLEAN"), TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree("root.vehicle.d2.s5", TSDataType.valueOf("TEXT"), TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d0.s0"), TSDataType.valueOf("INT32"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d0.s1"), TSDataType.valueOf("INT64"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d0.s2"), TSDataType.valueOf("FLOAT"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d0.s3"), TSDataType.valueOf("DOUBLE"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d0.s4"), TSDataType.valueOf("BOOLEAN"), TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d0.s5"), TSDataType.valueOf("TEXT"), TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d1.s0"), TSDataType.valueOf("INT32"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d1.s1"), TSDataType.valueOf("INT64"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d1.s2"), TSDataType.valueOf("FLOAT"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d1.s3"), TSDataType.valueOf("DOUBLE"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d1.s4"), TSDataType.valueOf("BOOLEAN"), TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d1.s5"), TSDataType.valueOf("TEXT"), TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d2.s0"), TSDataType.valueOf("INT32"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d2.s1"), TSDataType.valueOf("INT64"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d2.s2"), TSDataType.valueOf("FLOAT"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d2.s3"), TSDataType.valueOf("DOUBLE"), TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d2.s4"), TSDataType.valueOf("BOOLEAN"), TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
mmanager.addPathToMTree(new Path("root.vehicle.d2.s5"), TSDataType.valueOf("TEXT"), TSEncoding.valueOf("PLAIN"), compressionType, Collections.emptyMap());
} catch (Exception e) {
......
......@@ -23,7 +23,7 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.iotdb.db.engine.filenode.FileNodeManager;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.metadata.MManager;
import org.apache.iotdb.db.sync.test.RandomNum;
......@@ -61,7 +61,7 @@ public class FileNodeManagerBenchmark {
}
}
private static void prepare() throws MetadataArgsErrorException, PathErrorException, IOException {
private static void prepare() throws MetadataErrorException, PathErrorException, IOException {
MManager manager = MManager.getInstance();
manager.setStorageLevelToMTree(prefix);
for (String device : devices) {
......@@ -77,7 +77,7 @@ public class FileNodeManagerBenchmark {
}
public static void main(String[] args)
throws InterruptedException, IOException, MetadataArgsErrorException,
throws InterruptedException, IOException, MetadataErrorException,
PathErrorException, FileNodeManagerException {
tearDown();
prepare();
......
......@@ -35,7 +35,7 @@ import org.apache.iotdb.db.engine.filenode.FileNodeManager;
import org.apache.iotdb.db.engine.modification.io.LocalTextModificationAccessor;
import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.StartupException;
import org.apache.iotdb.db.metadata.MManager;
......@@ -69,7 +69,7 @@ public class DeletionFileNodeTest {
}
@Before
public void setup() throws MetadataArgsErrorException,
public void setup() throws MetadataErrorException,
PathErrorException, IOException, FileNodeManagerException, StartupException {
EnvironmentUtils.envSetUp();
......
......@@ -29,7 +29,7 @@ import java.util.List;
import org.apache.iotdb.db.engine.filenode.FileNodeManager;
import org.apache.iotdb.db.engine.memcontrol.BasicMemController.UsageLevel;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.StartupException;
import org.apache.iotdb.db.metadata.MManager;
......@@ -64,7 +64,7 @@ public class DeletionQueryTest {
}
@Before
public void setup() throws MetadataArgsErrorException,
public void setup() throws MetadataErrorException,
PathErrorException, IOException, FileNodeManagerException, StartupException {
EnvironmentUtils.envSetUp();
......
......@@ -23,10 +23,9 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.utils.EnvironmentUtils;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
......@@ -89,14 +88,14 @@ public class MManagerAdvancedTest {
assertEquals(6, paths.size());
paths = mmanager.getPaths("root.vehicle.d2");
assertEquals(0, paths.size());
} catch (PathErrorException e) {
} catch (MetadataErrorException | PathErrorException e) {
e.printStackTrace();
fail(e.getMessage());
}
}
@Test
public void testCache() throws PathErrorException, IOException, MetadataArgsErrorException {
public void testCache() throws PathErrorException, IOException, MetadataErrorException {
mmanager.addPathToMTree("root.vehicle.d2.s0", "DOUBLE", "RLE");
mmanager.addPathToMTree("root.vehicle.d2.s1", "BOOLEAN", "PLAIN");
mmanager.addPathToMTree("root.vehicle.d2.s2.g0", "TEXT", "PLAIN");
......@@ -125,7 +124,7 @@ public class MManagerAdvancedTest {
@Test
public void testGetNextLevelPath()
throws PathErrorException, IOException, MetadataArgsErrorException {
throws PathErrorException, IOException, MetadataErrorException {
mmanager.addPathToMTree("root.vehicle.d2.s0", "DOUBLE", "RLE");
mmanager.addPathToMTree("root.vehicle.d2.s1", "BOOLEAN", "PLAIN");
mmanager.addPathToMTree("root.vehicle.d2.s2.g0", "TEXT", "PLAIN");
......
......@@ -23,11 +23,10 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.utils.EnvironmentUtils;
import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
......@@ -64,24 +63,25 @@ public class MManagerBasicTest {
try {
manager.setStorageLevelToMTree("root.laptop.d1");
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
e.printStackTrace();
fail(e.getMessage());
}
try {
manager.setStorageLevelToMTree("root.laptop");
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
Assert.assertEquals(
"The seriesPath of root.laptop already exist, it can't be set to the storage group",
"org.apache.iotdb.db.exception.PathErrorException: The seriesPath of"
+ " root.laptop already exist, it can't be set to the storage group",
e.getMessage());
}
try {
manager.addPathToMTree("root.laptop.d1.s0", TSDataType.valueOf("INT32"),
manager.addPathToMTree(new Path("root.laptop.d1.s0"), TSDataType.valueOf("INT32"),
TSEncoding.valueOf("RLE"), compressionType, Collections
.emptyMap());
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
e.printStackTrace();
fail(e.getMessage());
}
......@@ -90,16 +90,16 @@ public class MManagerBasicTest {
assertEquals(manager.pathExist("root.laptop.d1.s0"), true);
assertEquals(manager.pathExist("root.laptop.d1.s1"), false);
try {
manager.addPathToMTree("root.laptop.d1.s1", TSDataType.valueOf("INT32"),
manager.addPathToMTree(new Path("root.laptop.d1.s1"), TSDataType.valueOf("INT32"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
} catch (PathErrorException | IOException e1) {
} catch (MetadataErrorException e1) {
e1.printStackTrace();
fail(e1.getMessage());
}
assertEquals(manager.pathExist("root.laptop.d1.s1"), true);
try {
manager.deletePathFromMTree("root.laptop.d1.s1");
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
e.printStackTrace();
fail(e.getMessage());
}
......@@ -108,7 +108,7 @@ public class MManagerBasicTest {
assertEquals(manager.pathExist("root.laptop.d1.s1"), false);
try {
manager.deletePathFromMTree("root.laptop.d1.s0");
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
e.printStackTrace();
fail(e.getMessage());
}
......@@ -121,21 +121,21 @@ public class MManagerBasicTest {
// try {
// manager.setStorageLevelToMTree("root.laptop");
// } catch (PathErrorException | IOException e) {
// } catch (MetadataErrorException e) {
// fail(e.getMessage());
// }
try {
manager.addPathToMTree("root.laptop.d1.s1", TSDataType.valueOf("INT32"),
manager.addPathToMTree(new Path("root.laptop.d1.s1"), TSDataType.valueOf("INT32"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
} catch (PathErrorException | IOException e1) {
} catch (MetadataErrorException e1) {
e1.printStackTrace();
fail(e1.getMessage());
}
try {
manager.addPathToMTree("root.laptop.d1.s0", TSDataType.valueOf("INT32"),
manager.addPathToMTree(new Path("root.laptop.d1.s0"), TSDataType.valueOf("INT32"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
} catch (PathErrorException | IOException e1) {
} catch (MetadataErrorException e1) {
e1.printStackTrace();
fail(e1.getMessage());
}
......@@ -145,20 +145,20 @@ public class MManagerBasicTest {
try {
manager.deletePathFromMTree("root.laptop.d1.s0");
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
e.printStackTrace();
fail(e.getMessage());
}
try {
manager.deletePathFromMTree("root.laptop.d1.s1");
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
e.printStackTrace();
fail(e.getMessage());
}
try {
manager.setStorageLevelToMTree("root.laptop.d2");
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
Assert.assertEquals(
String.format("The seriesPath of %s already exist, it can't be set to the storage group",
"root.laptop.d2"),
......@@ -178,46 +178,46 @@ public class MManagerBasicTest {
}
try {
manager.addPathToMTree("root.laptop.d2.s1", TSDataType.valueOf("INT32"),
manager.addPathToMTree(new Path("root.laptop.d2.s1"), TSDataType.valueOf("INT32"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
} catch (PathErrorException | IOException e1) {
} catch (MetadataErrorException e1) {
e1.printStackTrace();
fail(e1.getMessage());
}
try {
manager.addPathToMTree("root.laptop.d2.s0", TSDataType.valueOf("INT32"),
manager.addPathToMTree(new Path("root.laptop.d2.s0"), TSDataType.valueOf("INT32"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
} catch (PathErrorException | IOException e1) {
} catch (MetadataErrorException e1) {
e1.printStackTrace();
fail(e1.getMessage());
}
try {
manager.deletePathFromMTree("root.laptop.d2.s0");
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
e.printStackTrace();
fail(e.getMessage());
}
try {
manager.deletePathFromMTree("root.laptop.d2.s1");
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
e.printStackTrace();
fail(e.getMessage());
}
try {
manager.addPathToMTree("root.laptop.d1.s0", TSDataType.valueOf("INT32"),
manager.addPathToMTree(new Path("root.laptop.d1.s0"), TSDataType.valueOf("INT32"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
} catch (PathErrorException | IOException e1) {
} catch (MetadataErrorException e1) {
e1.printStackTrace();
fail(e1.getMessage());
}
try {
manager.addPathToMTree("root.laptop.d1.s1", TSDataType.valueOf("INT32"),
manager.addPathToMTree(new Path("root.laptop.d1.s1"), TSDataType.valueOf("INT32"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
} catch (PathErrorException | IOException e1) {
} catch (MetadataErrorException e1) {
e1.printStackTrace();
fail(e1.getMessage());
}
......@@ -232,9 +232,9 @@ public class MManagerBasicTest {
}
try {
manager.addPathToMTree("root.laptop.d1.s2", TSDataType.valueOf("INT32"),
manager.addPathToMTree(new Path("root.laptop.d1.s2"), TSDataType.valueOf("INT32"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
} catch (PathErrorException | IOException e1) {
} catch (MetadataErrorException e1) {
e1.printStackTrace();
fail(e1.getMessage());
}
......@@ -248,9 +248,9 @@ public class MManagerBasicTest {
}
try {
manager.addPathToMTree("root.laptop.d1.s3", TSDataType.valueOf("INT32"),
manager.addPathToMTree(new Path("root.laptop.d1.s3"), TSDataType.valueOf("INT32"),
TSEncoding.valueOf("RLE"), compressionType, Collections.emptyMap());
} catch (PathErrorException | IOException e1) {
} catch (MetadataErrorException e1) {
e1.printStackTrace();
fail(e1.getMessage());
}
......@@ -284,7 +284,7 @@ public class MManagerBasicTest {
assertEquals(true, manager.checkStorageLevelOfMTree("root.laptop.d2"));
assertEquals(false, manager.checkStorageLevelOfMTree("root.laptop.d3"));
assertEquals(false, manager.checkStorageLevelOfMTree("root.laptop"));
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
e.printStackTrace();
fail(e.getMessage());
}
......@@ -297,9 +297,9 @@ public class MManagerBasicTest {
try {
manager.setStorageLevelToMTree("root.laptop.d1");
manager.setStorageLevelToMTree("root.laptop.d2");
manager.addPathToMTree("root.laptop.d1.s1", TSDataType.INT32, TSEncoding.PLAIN,
manager.addPathToMTree(new Path("root.laptop.d1.s1"), TSDataType.INT32, TSEncoding.PLAIN,
CompressionType.GZIP, null);
manager.addPathToMTree("root.laptop.d1.s1", TSDataType.INT32, TSEncoding.PLAIN,
manager.addPathToMTree(new Path("root.laptop.d2.s1"), TSDataType.INT32, TSEncoding.PLAIN,
CompressionType.GZIP, null);
List<String> list = new ArrayList<>();
......@@ -311,7 +311,7 @@ public class MManagerBasicTest {
list.add("root.laptop.d2");
assertEquals(list, manager.getAllFileNamesByPath("root.laptop"));
assertEquals(list, manager.getAllFileNamesByPath("root"));
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
e.printStackTrace();
fail(e.getMessage());
}
......@@ -339,7 +339,7 @@ public class MManagerBasicTest {
assertTrue(manager.getAllFileNamesByPath("root.vehicle1.device2").isEmpty());
assertTrue(manager.getAllFileNamesByPath("root.vehicle1.device3").isEmpty());
assertFalse(manager.getAllFileNamesByPath("root.vehicle1.device").isEmpty());
} catch (PathErrorException | IOException e) {
} catch (MetadataErrorException e) {
e.printStackTrace();
fail(e.getMessage());
}
......
......@@ -20,7 +20,7 @@ package org.apache.iotdb.db.metadata;
import java.io.IOException;
import java.util.Random;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.tsfile.exception.cache.CacheException;
import org.junit.Before;
......@@ -110,7 +110,7 @@ public class MManagerEfficiencyTest {
}
public void testRandom()
throws MetadataArgsErrorException, PathErrorException, IOException, CacheException {
throws MetadataErrorException, PathErrorException, IOException, CacheException {
long count = 0;
paths = shuffle(paths);
String tp;
......@@ -129,7 +129,7 @@ public class MManagerEfficiencyTest {
}
}
private void insert() throws MetadataArgsErrorException, PathErrorException, IOException {
private void insert() throws MetadataErrorException, PathErrorException, IOException {
for (String group : storageGroup) {
for (String device : devices) {
for (String sensor : sensors) {
......
......@@ -21,16 +21,9 @@ package org.apache.iotdb.db.metadata;
import static org.junit.Assert.*;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.utils.EnvironmentUtils;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
......@@ -85,7 +78,7 @@ public class MetadataTest {
Metadata combineMetadata = Metadata.combineMetadatas(new Metadata[]{metadata1, metadata2});
assertTrue(metadata.equals(combineMetadata));
} catch (PathErrorException | IOException e) {
} catch (PathErrorException | IOException | MetadataErrorException e) {
e.printStackTrace();
fail(e.getMessage());
}
......
......@@ -19,6 +19,7 @@
package org.apache.iotdb.db.qp.bench;
import org.apache.iotdb.db.exception.ArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.qp.QueryProcessorException;
import org.apache.iotdb.db.qp.QueryProcessor;
......@@ -34,7 +35,8 @@ public class QueryParseBenchmark {
private static String insertSQL = "insert into root.perform.group_3.d_6(timestamp,s_0,s_1,s_2,s_3,s_4,s_5,s_6,s_7,s_8,s_9,s_10,s_11,s_12,s_13,s_14,s_15,s_16,s_17,s_18,s_19,s_20,s_21,s_22,s_23,s_24,s_25,s_26,s_27,s_28,s_29,s_30,s_31,s_32,s_33,s_34,s_35,s_36,s_37,s_38,s_39,s_40,s_41,s_42,s_43,s_44,s_45,s_46,s_47,s_48,s_49,s_50,s_51,s_52,s_53,s_54,s_55,s_56,s_57,s_58,s_59,s_60,s_61,s_62,s_63,s_64,s_65,s_66,s_67,s_68,s_69,s_70,s_71,s_72,s_73,s_74,s_75,s_76,s_77,s_78,s_79,s_80,s_81,s_82,s_83,s_84,s_85,s_86,s_87,s_88,s_89,s_90,s_91,s_92,s_93,s_94,s_95,s_96,s_97,s_98,s_99) values(1535558845000,6.651756751280603,6.651756751280603,787.74,0.0,0.0,787.74,6.651756751280603,0.0,33960.24564285714,33960.24564285714,787.74,787.74,33960.24564285714,6.651756751280603,372.5645996161876,19.185434416303107,787.74,728.2850866795781,33960.24564285714,6.651756751280603,33960.24564285714,38.51408765504662,1160.4112366716558,0.0,33960.24564285714,33960.24564285714,6.651756751280603,33960.24564285714,787.74,6.651756751280603,0.0,6.651756751280603,33960.24564285714,787.74,787.74,656.2029144236096,593.309858647129,787.74,0.0,787.74,0.0,33960.24564285714,33960.24564285714,1127.350328754438,33960.24564285714,33960.24564285714,33960.24564285714,787.74,1180.971235067114,661.7070251519021,33960.24564285714,0.0,0.0,1205.10868101537,792.940812264317,0.0,573.1607619219858,6.651756751280603,156.47709039629825,33960.24564285714,809.2345221425896,0.0,6.651756751280603,787.74,6.651756751280603,33960.24564285714,33960.24564285714,965.7042945108797,787.74,6.651756751280603,6.651756751280603,787.74,6.651756751280603,0.0,6.651756751280603,0.0,6.651756751280603,317.6444399747479,787.74,0.0,787.74,6.651756751280603,33960.24564285714,1028.5886481610971,107.99305540655205,0.0,33960.24564285714,787.74,252.59133785592041,725.8255243198603,443.50233343042555,787.74,6.651756751280603,6.651756751280603,33960.24564285714,33960.24564285714,33960.24564285714,787.74,33960.24564285714,441.68146442890685)";
public static void main(String[] args)
throws ArgsErrorException, QueryProcessorException, ProcessorException {
throws ArgsErrorException, QueryProcessorException, ProcessorException,
MetadataErrorException {
QueryProcessor queryProcessor = new QueryProcessor(new MemIntQpExecutor());
long startTime = System.currentTimeMillis();
for (int i = 0; i < numOfBenchSQL; i++) {
......
......@@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
import java.util.Arrays;
import java.util.Collection;
import org.apache.iotdb.db.exception.ArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.qp.QueryProcessorException;
import org.apache.iotdb.db.qp.QueryProcessor;
......@@ -70,7 +71,8 @@ public class TSPlanContextAuthorTest {
@Test
public void testAnalyzeAuthor()
throws QueryProcessorException, ArgsErrorException, ProcessorException {
throws QueryProcessorException, ArgsErrorException, ProcessorException,
MetadataErrorException {
QueryProcessor processor = new QueryProcessor(new MemIntQpExecutor());
AuthorPlan author = (AuthorPlan) processor.parseSQLToPhysicalPlan(inputSQL);
if (author == null) {
......
......@@ -24,6 +24,7 @@ import static org.junit.Assert.assertEquals;
import java.util.Arrays;
import java.util.Collection;
import org.apache.iotdb.db.exception.ArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.qp.QueryProcessorException;
import org.apache.iotdb.db.qp.QueryProcessor;
......@@ -85,7 +86,8 @@ public class TSPlanContextPropertyTest {
@Test
public void testAnalyzeMetadata()
throws QueryProcessorException, ArgsErrorException, ProcessorException {
throws QueryProcessorException, ArgsErrorException, ProcessorException,
MetadataErrorException {
QueryProcessor processor = new QueryProcessor(new MemIntQpExecutor());
PropertyPlan plan = (PropertyPlan) processor.parseSQLToPhysicalPlan(inputSQL);
assertEquals(propertyType, plan.getPropertyType());
......
......@@ -25,6 +25,7 @@ import java.io.IOException;
import java.util.Arrays;
import org.apache.iotdb.db.auth.AuthException;
import org.apache.iotdb.db.exception.ArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.qp.QueryProcessorException;
import org.apache.iotdb.db.qp.QueryProcessor;
......@@ -51,7 +52,8 @@ public class PhysicalPlanLogTransferTest {
@Test
public void operatorToLog()
throws IOException, ArgsErrorException, ProcessorException, QueryProcessorException {
throws IOException, ArgsErrorException, ProcessorException, QueryProcessorException,
MetadataErrorException {
/** Insert Plan test **/
byte[] insertPlanBytesTest = PhysicalPlanLogTransfer.operatorToLog(insertPlan);
Codec<InsertPlan> insertPlanCodec = CodecInstances.multiInsertPlanCodec;
......@@ -104,7 +106,8 @@ public class PhysicalPlanLogTransferTest {
@Test
public void logToOperator()
throws IOException, ArgsErrorException, ProcessorException, QueryProcessorException, AuthException {
throws IOException, ArgsErrorException, ProcessorException, QueryProcessorException,
AuthException, MetadataErrorException {
/** Insert Plan test **/
byte[] insertPlanBytesTest = PhysicalPlanLogTransfer.operatorToLog(insertPlan);
......@@ -133,7 +136,8 @@ public class PhysicalPlanLogTransferTest {
assertEquals(metadataPlanTest, metadataPlan);
/** Author Plan test **/
String sql = "grant role xm privileges 'SET_STORAGE_GROUP','DELETE_TIMESERIES' on root.vehicle.device.sensor";
String sql = "grant role xm privileges 'SET_STORAGE_GROUP','DELETE_TIMESERIES' "
+ "on root.vehicle.device.sensor";
AuthorPlan authorPlan = (AuthorPlan) processor.parseSQLToPhysicalPlan(sql);
byte[] authorPlanBytesTest = PhysicalPlanLogTransfer.operatorToLog(authorPlan);
AuthorPlan authorPlanTest = (AuthorPlan) PhysicalPlanLogTransfer
......
......@@ -21,6 +21,7 @@ package org.apache.iotdb.db.qp.plan;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.exception.ArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.qp.IllegalASTFormatException;
import org.apache.iotdb.db.exception.qp.LogicalOperatorException;
import org.apache.iotdb.db.exception.qp.LogicalOptimizeException;
......@@ -53,7 +54,8 @@ public class LogicalPlanSmallTest {
}
@Test
public void testSlimit1() throws QueryProcessorException, ArgsErrorException {
public void testSlimit1()
throws QueryProcessorException, ArgsErrorException, MetadataErrorException {
String sqlStr = "select * from root.vehicle.d1 where s1 < 20 and time <= now() slimit 10";
AstNode astTree;
try {
......@@ -70,7 +72,8 @@ public class LogicalPlanSmallTest {
}
@Test(expected = LogicalOperatorException.class)
public void testSlimit2() throws QueryProcessorException, ArgsErrorException {
public void testSlimit2()
throws QueryProcessorException, ArgsErrorException, MetadataErrorException {
String sqlStr = "select * from root.vehicle.d1 where s1 < 20 and time <= now() slimit 1111111111111111111111";
AstNode astTree;
try {
......@@ -86,7 +89,8 @@ public class LogicalPlanSmallTest {
}
@Test(expected = LogicalOperatorException.class)
public void testSlimit3() throws QueryProcessorException, ArgsErrorException {
public void testSlimit3()
throws QueryProcessorException, ArgsErrorException, MetadataErrorException {
String sqlStr = "select * from root.vehicle.d1 where s1 < 20 and time <= now() slimit 0";
AstNode astTree;
try {
......@@ -102,7 +106,8 @@ public class LogicalPlanSmallTest {
}
@Test
public void testSoffset() throws QueryProcessorException, ArgsErrorException {
public void testSoffset()
throws QueryProcessorException, ArgsErrorException, MetadataErrorException {
String sqlStr = "select * from root.vehicle.d1 where s1 < 20 and time <= now() slimit 10 soffset 1";
AstNode astTree;
try {
......@@ -120,7 +125,8 @@ public class LogicalPlanSmallTest {
}
@Test(expected = LogicalOptimizeException.class)
public void testSlimitLogicalOptimize() throws QueryProcessorException, ArgsErrorException {
public void testSlimitLogicalOptimize()
throws QueryProcessorException, ArgsErrorException, MetadataErrorException {
String sqlStr = "select s1 from root.vehicle.d1 where s1 < 20 and time <= now() slimit 10 soffset 1";
AstNode astTree;
try {
......@@ -157,7 +163,8 @@ public class LogicalPlanSmallTest {
}
@Test(expected = LogicalOperatorException.class)
public void testLimit1() throws QueryProcessorException, ArgsErrorException {
public void testLimit1()
throws QueryProcessorException, ArgsErrorException, MetadataErrorException {
String sqlStr = "select s1 from root.vehicle.d1 where s1 < 20 and time <= now() limit 111111111111111111111111";
AstNode astTree;
try {
......@@ -173,7 +180,8 @@ public class LogicalPlanSmallTest {
}
@Test(expected = LogicalOperatorException.class)
public void testLimit2() throws QueryProcessorException, ArgsErrorException {
public void testLimit2()
throws QueryProcessorException, ArgsErrorException, MetadataErrorException {
String sqlStr = "select s1 from root.vehicle.d1 where s1 < 20 and time <= now() limit 0";
AstNode astTree;
try {
......
......@@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.apache.iotdb.db.exception.ArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.qp.QueryProcessorException;
import org.apache.iotdb.db.qp.QueryProcessor;
......@@ -77,7 +78,7 @@ public class PhysicalPlanTest {
@Test
public void testMetadata()
throws QueryProcessorException, ArgsErrorException, ProcessorException {
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String metadata = "create timeseries root.vehicle.d1.s1 with datatype=INT32,encoding=RLE";
QueryProcessor processor = new QueryProcessor(new MemIntQpExecutor());
MetadataPlan plan = (MetadataPlan) processor.parseSQLToPhysicalPlan(metadata);
......@@ -86,7 +87,8 @@ public class PhysicalPlanTest {
}
@Test
public void testAuthor() throws QueryProcessorException, ArgsErrorException, ProcessorException {
public void testAuthor()
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String sql = "grant role xm privileges 'SET_STORAGE_GROUP','DELETE_TIMESERIES' on root.vehicle.d1.s1";
QueryProcessor processor = new QueryProcessor(new MemIntQpExecutor());
AuthorPlan plan = (AuthorPlan) processor.parseSQLToPhysicalPlan(sql);
......@@ -99,7 +101,7 @@ public class PhysicalPlanTest {
@Test
public void testProperty()
throws QueryProcessorException, ArgsErrorException, ProcessorException {
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String sql = "add label label1021 to property propropro";
QueryProcessor processor = new QueryProcessor(new MemIntQpExecutor());
PropertyPlan plan = (PropertyPlan) processor.parseSQLToPhysicalPlan(sql);
......@@ -113,7 +115,7 @@ public class PhysicalPlanTest {
@Test
public void testAggregation()
throws QueryProcessorException, ArgsErrorException, ProcessorException {
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String sqlStr = "select sum(d1.s1) " + "from root.vehicle "
+ "where time <= 51 or !(time != 100 and time < 460)";
PhysicalPlan plan = processor.parseSQLToPhysicalPlan(sqlStr);
......@@ -126,7 +128,7 @@ public class PhysicalPlanTest {
@Test
public void testGroupBy1()
throws QueryProcessorException, ArgsErrorException, ProcessorException {
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String sqlStr =
"select count(s1) " + "from root.vehicle.d1 " + "where s1 < 20 and time <= now() "
+ "group by(10m, 44, [1,3], [4,5])";
......@@ -140,7 +142,7 @@ public class PhysicalPlanTest {
@Test
public void testGroupBy2()
throws QueryProcessorException, ArgsErrorException, ProcessorException {
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String sqlStr =
"select count(s1) " + "from root.vehicle.d1 " + "where s1 < 20 and time <= now() "
+ "group by(111ms, [123,2017-6-2T12:00:12+07:00], [55555, now()])";
......@@ -153,7 +155,8 @@ public class PhysicalPlanTest {
}
@Test
public void testFill1() throws QueryProcessorException, ArgsErrorException, ProcessorException {
public void testFill1()
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String sqlStr = "SELECT s1 FROM root.vehicle.d1 WHERE time = 5000 Fill(int32[linear, 5m, 5m], boolean[previous, 5m])";
PhysicalPlan plan = processor.parseSQLToPhysicalPlan(sqlStr);
if (!plan.isQuery()) {
......@@ -170,7 +173,8 @@ public class PhysicalPlanTest {
}
@Test
public void testFill2() throws QueryProcessorException, ArgsErrorException, ProcessorException {
public void testFill2()
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String sqlStr = "SELECT s1 FROM root.vehicle.d1 WHERE time = 5000 Fill(int32[linear], boolean[previous])";
PhysicalPlan plan = processor.parseSQLToPhysicalPlan(sqlStr);
if (!plan.isQuery()) {
......@@ -205,7 +209,8 @@ public class PhysicalPlanTest {
}
@Test
public void testQuery1() throws QueryProcessorException, ArgsErrorException, ProcessorException {
public void testQuery1()
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String sqlStr = "SELECT s1 FROM root.vehicle.d1 WHERE time > 5000";
PhysicalPlan plan = processor.parseSQLToPhysicalPlan(sqlStr);
IExpression queryFilter = ((QueryPlan) plan).getExpression();
......@@ -214,7 +219,8 @@ public class PhysicalPlanTest {
}
@Test
public void testQuery2() throws QueryProcessorException, ArgsErrorException, ProcessorException {
public void testQuery2()
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String sqlStr = "SELECT s1 FROM root.vehicle.d1 WHERE time > 50 and time <= 100";
PhysicalPlan plan = processor.parseSQLToPhysicalPlan(sqlStr);
IExpression queryFilter = ((QueryPlan) plan).getExpression();
......@@ -225,7 +231,8 @@ public class PhysicalPlanTest {
}
@Test
public void testQuery3() throws QueryProcessorException, ArgsErrorException, ProcessorException {
public void testQuery3()
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String sqlStr = "SELECT s1 FROM root.vehicle.d1 WHERE time > 50 and time <= 100 or s1 < 10";
PhysicalPlan plan = processor.parseSQLToPhysicalPlan(sqlStr);
IExpression queryFilter = ((QueryPlan) plan).getExpression();
......@@ -237,7 +244,8 @@ public class PhysicalPlanTest {
}
@Test
public void testQuery4() throws QueryProcessorException, ArgsErrorException, ProcessorException {
public void testQuery4()
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String sqlStr = "SELECT s1 FROM root.vehicle.d1 WHERE time > 50 and time <= 100 and s1 < 10";
PhysicalPlan plan = processor.parseSQLToPhysicalPlan(sqlStr);
IExpression queryFilter = ((QueryPlan) plan).getExpression();
......@@ -253,7 +261,8 @@ public class PhysicalPlanTest {
}
@Test
public void testQuery5() throws QueryProcessorException, ArgsErrorException, ProcessorException {
public void testQuery5()
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String sqlStr = "SELECT s1 FROM root.vehicle.d1 WHERE s1 > 20 or s1 < 10";
PhysicalPlan plan = processor.parseSQLToPhysicalPlan(sqlStr);
IExpression queryFilter = ((QueryPlan) plan).getExpression();
......@@ -264,7 +273,8 @@ public class PhysicalPlanTest {
}
@Test
public void testQuery6() throws QueryProcessorException, ArgsErrorException, ProcessorException {
public void testQuery6()
throws QueryProcessorException, ArgsErrorException, ProcessorException, MetadataErrorException {
String sqlStr = "SELECT s1 FROM root.vehicle.d1 WHERE time > 20 or time < 10";
PhysicalPlan plan = processor.parseSQLToPhysicalPlan(sqlStr);
IExpression queryFilter = ((QueryPlan) plan).getExpression();
......
......@@ -29,6 +29,7 @@ import java.util.List;
import java.util.Map;
import org.apache.iotdb.db.exception.ArgsErrorException;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.qp.QueryProcessorException;
import org.apache.iotdb.db.qp.QueryProcessor;
......@@ -97,13 +98,13 @@ public class QPUpdateTest {
String sql = "UPDATE root.laptop SET d1.s1 = -33000, d2.s1 = 'string' WHERE time < 100";
try {
plan = processor.parseSQLToPhysicalPlan(sql);
} catch (QueryProcessorException e) {
} catch (QueryProcessorException | MetadataErrorException e) {
assertEquals("UPDATE clause doesn't support multi-update yet.", e.getMessage());
}
sql = "UPDATE root.laptop SET d1.s1 = -33000 WHERE time < 100";
try {
plan = processor.parseSQLToPhysicalPlan(sql);
} catch (QueryProcessorException e) {
} catch (QueryProcessorException | MetadataErrorException e) {
assertTrue(false);
}
assertEquals("UpdatePlan: paths: root.laptop.d1.s1\n" + " value:-33000\n" + " filter: \n"
......@@ -114,8 +115,8 @@ public class QPUpdateTest {
}
private void testUpdate()
throws QueryProcessorException, ArgsErrorException, ProcessorException, IOException,
FileNodeManagerException, QueryFilterOptimizationException {
throws QueryProcessorException, ArgsErrorException, ProcessorException, IOException,
FileNodeManagerException, QueryFilterOptimizationException, MetadataErrorException {
String sqlStr = "update root.qp_update_test.device_1.sensor_1 set value = 33000 where time >= 10 and time <= 10";
PhysicalPlan plan1 = processor.parseSQLToPhysicalPlan(sqlStr);
boolean upRet = processor.getExecutor().processNonQuery(plan1);
......@@ -135,8 +136,8 @@ public class QPUpdateTest {
}
private void testDeletePaths()
throws QueryProcessorException, ProcessorException, ArgsErrorException, IOException,
FileNodeManagerException, QueryFilterOptimizationException {
throws QueryProcessorException, ProcessorException, ArgsErrorException, IOException,
FileNodeManagerException, QueryFilterOptimizationException, MetadataErrorException {
String sqlStr = "delete from root.qp_update_test.device_1 where time < 15";
PhysicalPlan plan1 = processor.parseSQLToPhysicalPlan(sqlStr);
boolean upRet = processor.getExecutor().processNonQuery(plan1);
......@@ -159,8 +160,8 @@ public class QPUpdateTest {
}
private void testDelete()
throws QueryProcessorException, ProcessorException, ArgsErrorException, IOException,
FileNodeManagerException, QueryFilterOptimizationException {
throws QueryProcessorException, ProcessorException, ArgsErrorException, IOException,
FileNodeManagerException, QueryFilterOptimizationException, MetadataErrorException {
String sqlStr = "delete from root.qp_update_test.device_1.sensor_1 where time < 15";
PhysicalPlan plan1 = processor.parseSQLToPhysicalPlan(sqlStr);
boolean upRet = processor.getExecutor().processNonQuery(plan1);
......@@ -183,8 +184,8 @@ public class QPUpdateTest {
}
private void testInsert()
throws QueryProcessorException, ProcessorException, ArgsErrorException, IOException,
FileNodeManagerException, QueryFilterOptimizationException {
throws QueryProcessorException, ProcessorException, ArgsErrorException, IOException,
FileNodeManagerException, QueryFilterOptimizationException, MetadataErrorException {
String sqlStr = "insert into root.qp_update_test.device_1 (timestamp, sensor_1, sensor_2) values (13, 50, 40)";
PhysicalPlan plan1 = processor.parseSQLToPhysicalPlan(sqlStr);
......
......@@ -26,6 +26,7 @@ import java.util.List;
import java.util.Map;
import org.antlr.runtime.RecognitionException;
import org.apache.iotdb.db.exception.ArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.qp.QueryProcessorException;
import org.apache.iotdb.db.qp.QueryProcessor;
......@@ -108,7 +109,8 @@ public class TestConcatOptimizer {
@Test
public void testConcat1()
throws QueryProcessorException, RecognitionException, ArgsErrorException, ProcessorException {
throws QueryProcessorException, RecognitionException, ArgsErrorException, ProcessorException,
MetadataErrorException {
String inputSQL = "select s1 from root.laptop.d1";
PhysicalPlan plan = processor.parseSQLToPhysicalPlan(inputSQL);
assertEquals("root.laptop.d1.s1", plan.getPaths().get(0).toString());
......@@ -116,7 +118,8 @@ public class TestConcatOptimizer {
@Test
public void testConcat2()
throws QueryProcessorException, RecognitionException, ArgsErrorException, ProcessorException {
throws QueryProcessorException, RecognitionException, ArgsErrorException,
ProcessorException, MetadataErrorException {
String inputSQL = "select s1 from root.laptop.*";
PhysicalPlan plan = processor.parseSQLToPhysicalPlan(inputSQL);
assertEquals("root.laptop.d1.s1", plan.getPaths().get(0).toString());
......@@ -126,7 +129,8 @@ public class TestConcatOptimizer {
@Test
public void testConcat3()
throws QueryProcessorException, RecognitionException, ArgsErrorException, ProcessorException {
throws QueryProcessorException, RecognitionException, ArgsErrorException,
ProcessorException, MetadataErrorException {
String inputSQL = "select s1 from root.laptop.d1 where s1 < 10";
PhysicalPlan plan = processor.parseSQLToPhysicalPlan(inputSQL);
SingleSeriesExpression seriesExpression = new SingleSeriesExpression(
......
......@@ -20,11 +20,10 @@ package org.apache.iotdb.db.writelog;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.exception.FileNodeManagerException;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.RecoverException;
import org.apache.iotdb.db.metadata.MManager;
......@@ -123,7 +122,7 @@ public class PerformanceTest {
@Test
public void recoverTest()
throws IOException, RecoverException, FileNodeManagerException, PathErrorException,
MetadataArgsErrorException {
MetadataErrorException {
// this test write 1000000 * 3 logs , recover from them and report elapsed time
if (skip) {
return;
......@@ -136,7 +135,7 @@ public class PerformanceTest {
try {
MManager.getInstance().setStorageLevelToMTree("root.logTestDevice");
} catch (PathErrorException ignored) {
} catch (MetadataErrorException ignored) {
}
MManager.getInstance().addPathToMTree("root.logTestDevice.s1",
TSDataType.DOUBLE.name(),
......
......@@ -28,10 +28,12 @@ import java.util.Arrays;
import java.util.List;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.ProcessorException;
import org.apache.iotdb.db.exception.RecoverException;
import org.apache.iotdb.db.metadata.MManager;
import org.apache.iotdb.db.metadata.Metadata;
import org.apache.iotdb.db.qp.physical.PhysicalPlan;
import org.apache.iotdb.db.qp.physical.crud.DeletePlan;
import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
......@@ -76,7 +78,7 @@ public class RecoverTest {
try {
MManager.getInstance().setStorageLevelToMTree("root.testLogNode");
} catch (PathErrorException ignored) {
} catch (MetadataErrorException ignored) {
}
ExclusiveWriteLogNode logNode = new ExclusiveWriteLogNode("root.testLogNode",
tempRestore.getPath(),
......@@ -148,7 +150,7 @@ public class RecoverTest {
try {
MManager.getInstance().setStorageLevelToMTree("root.testLogNode");
} catch (PathErrorException ignored) {
} catch (MetadataErrorException ignored) {
}
ExclusiveWriteLogNode logNode = new ExclusiveWriteLogNode("root.testLogNode",
tempRestore.getPath(),
......@@ -226,7 +228,7 @@ public class RecoverTest {
try {
MManager.getInstance().setStorageLevelToMTree("root.testLogNode");
} catch (PathErrorException ignored) {
} catch (MetadataErrorException ignored) {
}
ExclusiveWriteLogNode logNode = new ExclusiveWriteLogNode("root.testLogNode",
tempRestore.getPath(),
......
......@@ -22,10 +22,9 @@ import static junit.framework.TestCase.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.exception.MetadataArgsErrorException;
import org.apache.iotdb.db.exception.MetadataErrorException;
import org.apache.iotdb.db.exception.PathErrorException;
import org.apache.iotdb.db.exception.RecoverException;
import org.apache.iotdb.db.metadata.MManager;
......@@ -98,7 +97,7 @@ public class WriteLogNodeManagerTest {
}
@Test
public void testRecoverAll() throws IOException, RecoverException, MetadataArgsErrorException {
public void testRecoverAll() throws IOException, RecoverException, MetadataErrorException {
// this test create 5 log nodes and recover them
File tempRestore = File.createTempFile("managerTest", "restore");
File tempProcessorStore = File.createTempFile("managerTest", "processorStore");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册