未验证 提交 379f2c71 编写于 作者: M Marcos_Zyk 提交者: GitHub

Fix sonar check in schema module (#10297)

上级 34dfb499
......@@ -24,10 +24,6 @@ import org.apache.iotdb.rpc.TSStatusCode;
public class AlignedTimeseriesException extends MetadataException {
/**
* @param message excepetion message
* @param path related timeseries path
*/
public AlignedTimeseriesException(String message, String path) {
super(String.format("%s (Path: %s)", message, path));
errorCode = TSStatusCode.ALIGNED_TIMESERIES_ERROR.getStatusCode();
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.exception.metadata;
import org.apache.iotdb.commons.exception.MetadataException;
......@@ -31,16 +32,17 @@ public class DataTypeMismatchException extends MetadataException {
Object value) {
super(
String.format(
"data type of %s.%s is not consistent, registered type %s, inserting type %s, timestamp %s, value %s",
"data type of %s.%s is not consistent, "
+ "registered type %s, inserting type %s, timestamp %s, value %s",
deviceName,
measurementName,
typeInSchema,
insertType,
time,
value == null
? "null"
: value.toString().length() < 100
? value.toString()
: value.toString().substring(0, 100)));
value == null ? "null" : processValue(value.toString())));
}
private static String processValue(String value) {
return value.length() < 100 ? value : value.substring(0, 100);
}
}
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.exception.metadata;
import org.apache.iotdb.commons.exception.MetadataException;
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.exception.metadata;
import org.apache.iotdb.commons.exception.MetadataException;
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.exception.metadata;
import org.apache.iotdb.commons.exception.MetadataException;
......
......@@ -26,7 +26,7 @@ import org.apache.iotdb.rpc.TSStatusCode;
public class MeasurementAlreadyExistException extends MetadataException {
private MeasurementPath measurementPath;
private final MeasurementPath measurementPath;
public MeasurementAlreadyExistException(String path, MeasurementPath measurementPath) {
super(
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.exception.metadata;
import org.apache.iotdb.commons.exception.MetadataException;
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.exception.metadata;
import org.apache.iotdb.commons.exception.MetadataException;
......@@ -26,7 +27,10 @@ public class SchemaQuotaExceededException extends MetadataException {
public SchemaQuotaExceededException(ClusterSchemaQuotaLevel level, long limit) {
super(
String.format(
"The current metadata capacity has exceeded the cluster quota. The cluster quota is set at the %s level, with a limit number of %d. Please review your configuration or delete some existing time series to comply with the quota.",
"The current metadata capacity has exceeded the cluster quota. "
+ "The cluster quota is set at the %s level, with a limit number of %d. "
+ "Please review your configuration "
+ "or delete some existing time series to comply with the quota.",
level.toString(), limit),
TSStatusCode.SCHEMA_QUOTA_EXCEEDED.getStatusCode());
}
......
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.exception.metadata;
import org.apache.iotdb.commons.exception.MetadataException;
import org.apache.iotdb.rpc.TSStatusCode;
public class SeriesNumberOverflowException extends MetadataException {
public SeriesNumberOverflowException() {
super("exceed max allowed series number.", TSStatusCode.SERIES_OVERFLOW.getStatusCode());
}
}
......@@ -28,8 +28,10 @@ public class SeriesOverflowException extends MetadataException {
public SeriesOverflowException(long memoryUsage, long seriesNum) {
super(
String.format(
"There are too many timeseries in memory. Current memory usage is %s and series num is %s. "
+ "Please increase MAX_HEAP_SIZE in datanode-env.sh/bat, restart and create timeseries again.",
"There are too many timeseries in memory. "
+ "Current memory usage is %s and series num is %s. "
+ "Please increase MAX_HEAP_SIZE in datanode-env.sh/bat, "
+ "restart and create timeseries again.",
memoryUsage, seriesNum),
TSStatusCode.SERIES_OVERFLOW.getStatusCode());
}
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.exception.metadata.cache;
import org.apache.iotdb.commons.exception.MetadataException;
......
......@@ -17,6 +17,7 @@
* under the License.
*
*/
package org.apache.iotdb.db.exception.metadata.schemafile;
import org.apache.iotdb.commons.exception.MetadataException;
......
......@@ -39,7 +39,8 @@ public class TemplateImcompatibeException extends MetadataException {
public TemplateImcompatibeException(String templateName, PartialPath templateSetPath) {
super(
String.format(
"Cannot set schema template [%s] to path [%s] since there's timeseries under path [%s].",
"Cannot set schema template [%s] to path [%s] "
+ "since there's timeseries under path [%s].",
templateName, templateSetPath, templateSetPath),
TSStatusCode.TEMPLATE_INCOMPATIBLE.getStatusCode());
this.isUserException = true;
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.exception.metadata.view;
import org.apache.iotdb.commons.exception.MetadataException;
......@@ -29,13 +30,13 @@ public class BrokenViewException extends MetadataException {
super(
String.format(
"View is broken! The source path [%s] maps to unmatched %s path(s): %s.",
sourcePath, matchedPaths.size(), matchedPaths.toString()));
sourcePath, matchedPaths.size(), matchedPaths));
}
public BrokenViewException(String viewPath, String sourcePath, List<PartialPath> matchedPaths) {
super(
String.format(
"View [%s] is broken! The source path [%s] maps to unmatched %s path(s): %s.",
viewPath, sourcePath, matchedPaths.size(), matchedPaths.toString()));
viewPath, sourcePath, matchedPaths.size(), matchedPaths));
}
}
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.exception.metadata.view;
import org.apache.iotdb.commons.exception.MetadataException;
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.exception.metadata.view;
import org.apache.iotdb.commons.exception.MetadataException;
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.exception.metadata.view;
public class ViewContainsAggregationException extends UnsupportedViewException {
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.exception.metadata.view;
import org.apache.iotdb.commons.exception.MetadataException;
......
......@@ -43,7 +43,7 @@ import java.util.Collection;
import java.util.Collections;
import java.util.List;
/** store id table schema in append only file */
/** Store id table schema in append only file. */
public class AppendOnlyDiskSchemaManager implements IDiskSchemaManager {
private static final String FILE_NAME = "SeriesKeyMapping.meta";
......@@ -200,7 +200,7 @@ public class AppendOnlyDiskSchemaManager implements IDiskSchemaManager {
}
/**
* get DiskSchemaEntries from disk file
* Get DiskSchemaEntries from disk file.
*
* @param offsets the offset of each record on the disk file
* @return DiskSchemaEntries
......@@ -220,10 +220,10 @@ public class AppendOnlyDiskSchemaManager implements IDiskSchemaManager {
}
/**
* delete DiskSchemaEntry on disk
* Delete DiskSchemaEntry on disk
*
* @param offset the offset of a record on the disk file
* @throws MetadataException
* @throws MetadataException metadata exception
*/
@Override
public void deleteDiskSchemaEntryByOffset(long offset) throws MetadataException {
......
......@@ -28,8 +28,6 @@ import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.metadata.idtable.entry.DeviceEntry;
import org.apache.iotdb.db.metadata.idtable.entry.DeviceIDFactory;
import org.apache.iotdb.db.metadata.idtable.entry.DiskSchemaEntry;
import org.apache.iotdb.db.metadata.idtable.entry.IDeviceID;
import org.apache.iotdb.db.metadata.idtable.entry.SchemaEntry;
import org.apache.iotdb.db.metadata.idtable.entry.TimeseriesID;
import org.apache.iotdb.db.metadata.plan.schemaregion.write.ICreateAlignedTimeSeriesPlan;
......@@ -42,18 +40,17 @@ import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
public interface IDTable {
/** logger */
// logger
Logger logger = LoggerFactory.getLogger(IDTable.class);
/** iotdb config */
// iotdb confi
IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
/**
* create aligned timeseries
* Create aligned timeseries.
*
* @param plan create aligned timeseries plan
* @throws MetadataException if the device is not aligned, throw it
......@@ -61,7 +58,7 @@ public interface IDTable {
void createAlignedTimeseries(ICreateAlignedTimeSeriesPlan plan) throws MetadataException;
/**
* create timeseries
* Create timeseries.
*
* @param plan create timeseries plan
* @throws MetadataException if the device is aligned, throw it
......@@ -69,28 +66,23 @@ public interface IDTable {
void createTimeseries(ICreateTimeSeriesPlan plan) throws MetadataException;
/**
* Delete all timeseries matching the given paths
* Delete all timeseries matching the given paths.
*
* @param fullPaths paths to be deleted
* @return deletion failed Timeseries
* @throws MetadataException
* @throws MetadataException metadata exception
*/
Pair<Integer, Set<String>> deleteTimeseries(List<PartialPath> fullPaths) throws MetadataException;
/**
* check inserting timeseries existence and fill their measurement mnode
* Clear id table and close file.
*
* @param plan insert plan
* @return reusable device id
* @throws MetadataException if insert plan's aligned value is inconsistent with device
* @throws IOException IOException
*/
// IDeviceID getSeriesSchemas(InsertPlan plan) throws MetadataException;
/** clear id table and close file */
void clear() throws IOException;
/**
* get device entry from device path
* Get device entry from device path.
*
* @param deviceName device name of the time series
* @return device entry of the timeseries
......@@ -98,7 +90,7 @@ public interface IDTable {
DeviceEntry getDeviceEntry(String deviceName);
/**
* get schema from device and measurements
* Get schema from device and measurements.
*
* @param deviceName device name of the time series
* @param measurementName measurement name of the time series
......@@ -107,26 +99,27 @@ public interface IDTable {
IMeasurementSchema getSeriesSchema(String deviceName, String measurementName);
/**
* get all device entries
* Get all device entries.
*
* @return all device entries
*/
List<DeviceEntry> getAllDeviceEntry();
/**
* put schema entry to id table, currently used in recover
* Put schema entry to id table, currently used in recover.
*
* @param devicePath device path (can be device id formed path)
* @param measurement measurement name
* @param schemaEntry schema entry to put
* @param isAligned is the device aligned
* @throws MetadataException metadata exception
*/
void putSchemaEntry(
String devicePath, String measurement, SchemaEntry schemaEntry, boolean isAligned)
throws MetadataException;
/**
* translate query path's device path to device id
* Translate query path's device path to device id.
*
* @param fullPath full query path
* @return translated query path
......@@ -160,18 +153,6 @@ public interface IDTable {
}
}
/**
* get DiskSchemaEntries from disk file
*
* @param schemaEntries get the disk pointers from schemaEntries
* @return DiskSchemaEntries
*/
@TestOnly
List<DiskSchemaEntry> getDiskSchemaEntries(List<SchemaEntry> schemaEntries);
@TestOnly
Map<IDeviceID, DeviceEntry>[] getIdTables();
@TestOnly
IDiskSchemaManager getIDiskSchemaManager();
}
......@@ -26,10 +26,8 @@ import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.metadata.idtable.entry.DeviceEntry;
import org.apache.iotdb.db.metadata.idtable.entry.DeviceIDFactory;
import org.apache.iotdb.db.metadata.idtable.entry.DiskSchemaEntry;
import org.apache.iotdb.db.metadata.idtable.entry.IDeviceID;
import org.apache.iotdb.db.metadata.idtable.entry.SchemaEntry;
import org.apache.iotdb.db.metadata.idtable.entry.TimeseriesID;
import org.apache.iotdb.db.metadata.plan.schemaregion.write.ICreateAlignedTimeSeriesPlan;
import org.apache.iotdb.db.metadata.plan.schemaregion.write.ICreateTimeSeriesPlan;
import org.apache.iotdb.tsfile.utils.Pair;
......@@ -54,18 +52,16 @@ public class IDTableHashmapImpl implements IDTable {
// number of table slot
private static final int NUM_OF_SLOTS = 256;
/** logger */
// logger
private static final Logger logger = LoggerFactory.getLogger(IDTableHashmapImpl.class);
/**
* 256 hashmap for avoiding rehash performance issue and lock competition device ID ->
* (measurement name -> schema entry)
*/
// 256 hashmap for avoiding rehash performance issue and lock competition
// device ID -> (measurement name -> schema entry)
private Map<IDeviceID, DeviceEntry>[] idTables;
/** disk schema manager to manage disk schema entry */
// disk schema manager to manage disk schema entry
private IDiskSchemaManager IDiskSchemaManager;
/** iotdb config */
// iotdb config
protected static IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
public IDTableHashmapImpl(File storageGroupDir) {
......@@ -80,7 +76,7 @@ public class IDTableHashmapImpl implements IDTable {
}
/**
* create aligned timeseries
* Create aligned timeseries.
*
* @param plan create aligned timeseries plan
* @throws MetadataException if the device is not aligned, throw it
......@@ -107,7 +103,7 @@ public class IDTableHashmapImpl implements IDTable {
}
/**
* create timeseries
* Create timeseries.
*
* @param plan create timeseries plan
* @throws MetadataException if the device is aligned, throw it
......@@ -128,11 +124,11 @@ public class IDTableHashmapImpl implements IDTable {
}
/**
* Delete all timeseries matching the given paths
* Delete all timeseries matching the given paths.
*
* @param fullPaths paths to be deleted
* @return deletion failed Timeseries
* @throws MetadataException
* @throws MetadataException metadata exception
*/
@Override
public synchronized Pair<Integer, Set<String>> deleteTimeseries(List<PartialPath> fullPaths)
......@@ -158,7 +154,6 @@ public class IDTableHashmapImpl implements IDTable {
for (Pair<PartialPath, Long> pair : deletedPairs) {
try {
getIDiskSchemaManager().deleteDiskSchemaEntryByOffset(pair.right);
DeviceEntry deviceEntry = getDeviceEntry(pair.left.getDevice());
Map<String, SchemaEntry> map = getDeviceEntry(pair.left.getDevice()).getMeasurementMap();
map.keySet().remove(pair.left.getMeasurement());
deletedNum++;
......@@ -177,7 +172,7 @@ public class IDTableHashmapImpl implements IDTable {
}
/**
* get device entry from device path
* Get device entry from device path.
*
* @param deviceName device name of the time series
* @return device entry of the timeseries
......@@ -192,7 +187,7 @@ public class IDTableHashmapImpl implements IDTable {
}
/**
* get schema from device and measurements
* Get schema from device and measurements.
*
* @param deviceName device name of the time series
* @param measurementName measurement name of the time series
......@@ -237,27 +232,12 @@ public class IDTableHashmapImpl implements IDTable {
}
/**
* get DiskSchemaEntries from disk file
*
* @param schemaEntries get the disk pointers from schemaEntries
* @return DiskSchemaEntries
*/
@Override
@TestOnly
public synchronized List<DiskSchemaEntry> getDiskSchemaEntries(List<SchemaEntry> schemaEntries) {
List<Long> offsets = new ArrayList<>(schemaEntries.size());
for (SchemaEntry schemaEntry : schemaEntries) {
offsets.add(schemaEntry.getDiskPointer());
}
return getIDiskSchemaManager().getDiskSchemaEntriesByOffset(offsets);
}
/**
* get device id from device path and check is aligned,
* Get device id from device path and check is aligned.
*
* @param deviceName device name of the time series
* @param isAligned whether the insert plan is aligned
* @return device entry of the timeseries
* @throws MetadataException metadata exception
*/
private DeviceEntry getDeviceEntryWithAlignedCheck(String deviceName, boolean isAligned)
throws MetadataException {
......@@ -287,7 +267,7 @@ public class IDTableHashmapImpl implements IDTable {
}
/**
* calculate slot that this deviceID should in
* Calculate slot that this deviceID should be in.
*
* @param deviceID device id
* @return slot number
......@@ -297,38 +277,6 @@ public class IDTableHashmapImpl implements IDTable {
return Math.abs(hashVal == Integer.MIN_VALUE ? 0 : hashVal) % NUM_OF_SLOTS;
}
/**
* get schema entry
*
* @param timeseriesID the timeseries ID
* @return schema entry of the timeseries
* @throws MetadataException throw if this timeseries is not exist
*/
private SchemaEntry getSchemaEntry(TimeseriesID timeseriesID) throws MetadataException {
IDeviceID deviceID = timeseriesID.getDeviceID();
int slot = calculateSlot(deviceID);
DeviceEntry deviceEntry = idTables[slot].get(deviceID);
if (deviceEntry == null) {
throw new MetadataException(
"get non exist timeseries's schema entry, timeseries id is: " + timeseriesID);
}
SchemaEntry schemaEntry = deviceEntry.getSchemaEntry(timeseriesID.getMeasurement());
if (schemaEntry == null) {
throw new MetadataException(
"get non exist timeseries's schema entry, timeseries id is: " + timeseriesID);
}
return schemaEntry;
}
@Override
@TestOnly
public Map<IDeviceID, DeviceEntry>[] getIdTables() {
return idTables;
}
@Override
@TestOnly
public IDiskSchemaManager getIDiskSchemaManager() {
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.metadata.idtable;
import org.apache.iotdb.commons.exception.MetadataException;
......@@ -33,16 +34,16 @@ import java.io.File;
import java.io.IOException;
import java.util.HashMap;
/** This class manages one id table for each logical database */
/** This class manages one id table for each logical database. */
public class IDTableManager {
/** logger */
// logger
Logger logger = LoggerFactory.getLogger(IDTableManager.class);
/** database path -> id table */
// database path -> id table
HashMap<String, IDTable> idTableMap;
/** system dir */
// system dir
private final String systemDir =
FilePathUtils.regularizePath(IoTDBDescriptor.getInstance().getConfig().getSystemDir())
+ "databases";
......@@ -58,7 +59,7 @@ public class IDTableManager {
}
/**
* get instance
* Get instance.
*
* @return instance of the factory
*/
......@@ -72,10 +73,11 @@ public class IDTableManager {
// endregion
/**
* get id table by device path
* Get id table by device path.
*
* @param devicePath device path
* @return id table belongs to path's database
* @throws UnsupportedOperationException not supported yet
*/
public synchronized IDTable getIDTable(PartialPath devicePath) {
// try {
......@@ -95,7 +97,7 @@ public class IDTableManager {
}
/**
* get id table by database path
* Get id table by database path.
*
* @param sgPath database path
* @return id table belongs to path's database
......@@ -109,11 +111,12 @@ public class IDTableManager {
}
/**
* get schema from device and measurements
* Get schema from device and measurements.
*
* @param deviceName device name of the time series
* @param measurementName measurement name of the time series
* @return schema entry of the time series
* @throws MetadataException path may not exist
*/
public synchronized IMeasurementSchema getSeriesSchema(String deviceName, String measurementName)
throws MetadataException {
......@@ -127,7 +130,11 @@ public class IDTableManager {
throw new PathNotExistException(new PartialPath(deviceName, measurementName).toString());
}
/** clear id table map */
/**
* Clear id table map.
*
* @throws IOException IOException
*/
public void clear() throws IOException {
for (IDTable idTable : idTableMap.values()) {
idTable.clear();
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.metadata.idtable;
import org.apache.iotdb.commons.exception.MetadataException;
......@@ -26,11 +27,11 @@ import java.io.IOException;
import java.util.Collection;
import java.util.List;
/** This class manages IO of id table's schema entry */
/** This class manages IO of id table's schema entry. */
public interface IDiskSchemaManager {
/**
* serialize a disk schema entry
* Serialize a disk schema entry.
*
* @param schemaEntry disk schema entry
* @return disk position of that entry
......@@ -38,22 +39,23 @@ public interface IDiskSchemaManager {
long serialize(DiskSchemaEntry schemaEntry);
/**
* recover id table from log file
* Recover id table from log file.
*
* @param idTable id table need to be recovered
*/
void recover(IDTable idTable);
/**
* get all disk schema entries from file
* Get all disk schema entries from file.
*
* @return collection of all disk schema entires
* @throws IOException IOException
*/
@TestOnly
Collection<DiskSchemaEntry> getAllSchemaEntry() throws IOException;
/**
* get DiskSchemaEntries from disk file
* Get DiskSchemaEntries from disk file.
*
* @param offsets the offset of each record on the disk file
* @return DiskSchemaEntries
......@@ -61,13 +63,17 @@ public interface IDiskSchemaManager {
List<DiskSchemaEntry> getDiskSchemaEntriesByOffset(List<Long> offsets);
/**
* delete DiskSchemaEntries on disk
* Delete DiskSchemaEntries on disk.
*
* @param offset the offset of a record on the disk file
* @throws MetadataException
* @throws MetadataException metadata exception
*/
void deleteDiskSchemaEntryByOffset(long offset) throws MetadataException;
/** close file and free resource */
/**
* Close file and free resource.
*
* @throws IOException IOException
*/
void close() throws IOException;
}
......@@ -26,7 +26,7 @@ import java.io.InputStream;
import java.io.OutputStream;
/**
* the disk schema entry of schema entry of id table. This is a po class, so every field is public
* The disk schema entry of schema entry of id table. This is a po class, so every field is public.
*/
public class DiskSchemaEntry {
......
......@@ -21,15 +21,15 @@ package org.apache.iotdb.db.metadata.idtable.entry;
import java.nio.ByteBuffer;
/** device id interface */
/** Device id interface. */
public interface IDeviceID {
/**
* to string format
* To string format.
*
* @return string format device id
*/
public String toStringID();
String toStringID();
public void serialize(ByteBuffer byteBuffer);
void serialize(ByteBuffer byteBuffer);
}
......@@ -24,7 +24,7 @@ import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
import java.nio.ByteBuffer;
import java.util.Objects;
/** Using device id path as id */
/** Using device id path as id. */
public class PlainDeviceID implements IDeviceID {
String deviceID;
......
......@@ -28,7 +28,7 @@ import java.nio.ByteBuffer;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
/** Using sha 256 hash value of device path as device ID */
/** Using sha 256 hash value of device path as device ID. */
public class SHA256DeviceID implements IDeviceID {
// four long value form a 32 bytes sha 256 value
long l1;
......@@ -38,13 +38,13 @@ public class SHA256DeviceID implements IDeviceID {
private static final String SEPARATOR = "_";
/** using lots of message digest for improving parallelism */
// using lots of message digest for improving parallelism
private static MessageDigest[] md;
/** number of message digest, for improve parallelism */
// number of message digest, for improve parallelism
private static final int MD_NUM = 256;
/** logger */
// logger
private static Logger logger = LoggerFactory.getLogger(IDTable.class);
static {
......@@ -70,7 +70,7 @@ public class SHA256DeviceID implements IDeviceID {
}
/**
* build device id from a sha 256 string, like "1#1#1#1"
* Build device id from a sha 256 string, like "1#1#1#1".
*
* @param deviceID a sha 256 string
*/
......@@ -86,7 +86,7 @@ public class SHA256DeviceID implements IDeviceID {
}
/**
* build device id from a device path
* Build device id from a device path.
*
* @param deviceID device path
*/
......@@ -105,7 +105,7 @@ public class SHA256DeviceID implements IDeviceID {
l4 = toLong(hashVal, 24);
}
/** The probability that each bit of sha 256 is 0 or 1 is equal */
/** The probability that each bit of sha 256 is 0 or 1 is equal. */
public int hashCode() {
return (int) l1;
}
......@@ -133,7 +133,7 @@ public class SHA256DeviceID implements IDeviceID {
}
/**
* calculate slot that this deviceID should in
* Calculate slot that this deviceID should in.
*
* @param deviceID device id
* @return slot number
......
......@@ -35,7 +35,7 @@ import static org.apache.iotdb.db.utils.EncodingInferenceUtils.getDefaultEncodin
/**
* Schema entry of id table <br>
* Notice that this class is also a last cache container for last cache
* Notice that this class is also a last cache container for last cache.
*/
public class SchemaEntry {
......@@ -45,7 +45,7 @@ public class SchemaEntry {
/* 1 byte of type */
private long schema;
/** This static field will not occupy memory */
// This static field will not occupy memory
private static IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
@TestOnly
......@@ -75,7 +75,7 @@ public class SchemaEntry {
IDeviceID deviceID,
PartialPath fullPath,
boolean isAligned,
IDiskSchemaManager IDiskSchemaManager) {
IDiskSchemaManager diskSchemaManager) {
schema |= dataType.serialize();
schema |= (((long) encoding.serialize()) << 8);
schema |= (((long) compressionType.serialize()) << 16);
......@@ -91,12 +91,12 @@ public class SchemaEntry {
encoding.serialize(),
compressionType.serialize(),
isAligned);
schema |= (IDiskSchemaManager.serialize(diskSchemaEntry) << 24);
schema |= (diskSchemaManager.serialize(diskSchemaEntry) << 24);
}
}
/**
* get disk pointer of ts from long value of schema
* Get disk pointer of ts from long value of schema.
*
* @return disk pointer
*/
......@@ -105,7 +105,7 @@ public class SchemaEntry {
}
/**
* get ts data type from long value of schema
* Get ts data type from long value of schema.
*
* @return ts data type
*/
......@@ -114,7 +114,7 @@ public class SchemaEntry {
}
/**
* get ts encoding from long value of schema
* Get ts encoding from long value of schema.
*
* @return ts encoding
*/
......@@ -123,7 +123,7 @@ public class SchemaEntry {
}
/**
* get compression type from long value of schema
* Get compression type from long value of schema.
*
* @return compression type
*/
......
......@@ -16,17 +16,18 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.metadata.idtable.entry;
import org.apache.iotdb.commons.path.PartialPath;
/** A po class contains device id and measurement, represents a timeseries */
/** A po class contains device id and measurement, represents a timeseries. */
public class TimeseriesID {
private IDeviceID deviceID;
private String measurement;
/** build timeseries id from full path */
/** Build timeseries id from full path. */
public TimeseriesID(PartialPath fullPath) {
deviceID = DeviceIDFactory.getInstance().getDeviceID(fullPath.getDevicePath());
measurement = fullPath.getMeasurement();
......
......@@ -28,7 +28,7 @@ import java.io.OutputStream;
/**
* This class provides the ability to buffer the data serialized by nested serializer.
*
* @param <T>
* @param <T> the generic type
*/
@NotThreadSafe
public class BufferedSerializer<T> implements ISerializer<T> {
......
......@@ -27,7 +27,7 @@ import java.nio.ByteBuffer;
* This interface defines the behaviour of a Deserializer of T. An instance of this interface
* provides the ability to deserialize an instance of T from InputStream or Bytebuffer.
*
* @param <T>
* @param <T> the generic type
*/
public interface IDeserializer<T> {
......
......@@ -28,7 +28,7 @@ import java.nio.ByteBuffer;
* This interface defines the behaviour of a Serializer of T. An instance of this interface provides
* the ability to serialize an instance of T to InputStream or Bytebuffer.
*
* @param <T>
* @param <T> the generic type
*/
public interface ISerializer<T> {
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.metadata.logfile;
import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.metadata.logfile;
import java.io.File;
......@@ -32,14 +33,14 @@ public class MLogDescriptionReader {
private MLogDescription deserializeMLogDescription() throws IOException {
try (FileInputStream fileInputStream = new FileInputStream(file)) {
MLogDescription mLogDescription = new MLogDescription();
mLogDescription.deserialize(fileInputStream);
return mLogDescription;
MLogDescription mlogdescription = new MLogDescription();
mlogdescription.deserialize(fileInputStream);
return mlogdescription;
}
}
public long readCheckPoint() throws IOException {
MLogDescription mLogDescription = deserializeMLogDescription();
return mLogDescription.getCheckPoint();
MLogDescription mlogdescription = deserializeMLogDescription();
return mlogdescription.getCheckPoint();
}
}
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.metadata.logfile;
import java.io.File;
......@@ -26,25 +27,24 @@ import java.nio.file.StandardOpenOption;
public class MLogDescriptionWriter {
private final FileChannel fileChannel;
private final MLogDescription mLogDescription;
private final MLogDescription mlogdescription;
public MLogDescriptionWriter(String schemaDir, String logFileName) throws IOException {
File file = new File(schemaDir, logFileName);
if (!file.exists()) {
if (!file.createNewFile()) {
throw new IOException(
String.format(
"Failed to create file %s because the named file already exists", file.getName()));
}
if (!file.exists() && !file.createNewFile()) {
throw new IOException(
String.format(
"Failed to create file %s because the named file already exists", file.getName()));
}
fileChannel =
FileChannel.open(new File(schemaDir, logFileName).toPath(), StandardOpenOption.WRITE);
mLogDescription = new MLogDescription();
mlogdescription = new MLogDescription();
}
public synchronized void updateCheckPoint(long checkPoint) throws IOException {
mLogDescription.setCheckPoint(checkPoint);
mLogDescription.serialize(fileChannel);
mlogdescription.setCheckPoint(checkPoint);
mlogdescription.serialize(fileChannel);
}
public synchronized void close() throws IOException {
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.metadata.logfile;
public class MetadataOperationType {
......
......@@ -41,12 +41,15 @@ import java.util.NoSuchElementException;
* truncate during read process. If some middle part of the log file is corrupted, the read process
* will end and the file will be marked corrupted.
*
* @param <T>
* @param <T> the generic type
*/
public class SchemaLogReader<T> implements AutoCloseable {
private static final Logger LOGGER = LoggerFactory.getLogger(SchemaLogReader.class);
private static final String FILE_CORRUPTED_MSG_TEMPLATE =
"File {} is corrupted. The uncorrupted size is {}.";
private final File logFile;
private final RecordableInputStream inputStream;
......@@ -105,8 +108,7 @@ public class SchemaLogReader<T> implements AutoCloseable {
// failed to read file
nextSchemaPlan = null;
isFileCorrupted = true;
LOGGER.error(
"File {} is corrupted. The uncorrupted size is {}.", logFile.getPath(), currentIndex, e);
LOGGER.error(FILE_CORRUPTED_MSG_TEMPLATE, logFile.getPath(), currentIndex, e);
} catch (Exception e) {
// error occurred when deserializing the entry
nextSchemaPlan = null;
......@@ -114,11 +116,7 @@ public class SchemaLogReader<T> implements AutoCloseable {
if (inputStream.available() > 0) {
// error occurred when deserializing some middle part of the file
isFileCorrupted = true;
LOGGER.error(
"File {} is corrupted. The uncorrupted size is {}.",
logFile.getPath(),
currentIndex,
e);
LOGGER.error(FILE_CORRUPTED_MSG_TEMPLATE, logFile.getPath(), currentIndex, e);
} else {
// the file has already been all read out, but error occurred during deserializing the
// last entry in file ending.
......@@ -128,11 +126,7 @@ public class SchemaLogReader<T> implements AutoCloseable {
} catch (IOException ex) {
// failed to read file
isFileCorrupted = true;
LOGGER.error(
"File {} is corrupted. The uncorrupted size is {}.",
logFile.getPath(),
currentIndex,
e);
LOGGER.error(FILE_CORRUPTED_MSG_TEMPLATE, logFile.getPath(), currentIndex, e);
}
}
}
......@@ -152,7 +146,9 @@ public class SchemaLogReader<T> implements AutoCloseable {
FileChannel channel = outputStream.getChannel()) {
if (currentIndex != channel.size()) {
LOGGER.warn(
"The end of log file {} is corrupted. Start truncate it. The unbroken size is {}. The file size is {}.",
"The end of log file {} is corrupted. "
+ "Start truncate it. "
+ "The unbroken size is {}. The file size is {}.",
logFile.getName(),
currentIndex,
channel.size());
......@@ -170,7 +166,9 @@ public class SchemaLogReader<T> implements AutoCloseable {
ReadWriteIOUtils.skip(inputStream, n);
}
/** This class provides the ability to record the num of read bytes from the nested InputStream */
/**
* This class provides the ability to record the num of read bytes from the nested InputStream.
*/
private static class RecordableInputStream extends InputStream {
private final InputStream inputStream;
......
......@@ -32,7 +32,7 @@ import java.nio.file.Files;
/**
* This class provides the common ability to write a log storing T.
*
* @param <T>
* @param <T> the generic type
*/
public class SchemaLogWriter<T> implements AutoCloseable {
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.metadata.path;
import org.apache.iotdb.commons.path.PartialPath;
......@@ -32,6 +33,11 @@ import java.nio.ByteBuffer;
import java.util.HashSet;
public class PatternTreeMapFactory {
private PatternTreeMapFactory() {
// not allowed construction
}
public static PatternTreeMap<String, StringSerializer> getTriggerPatternTreeMap() {
return new PatternTreeMap<>(
HashSet::new,
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.metadata.tag;
import org.apache.iotdb.commons.conf.CommonDescriptor;
......@@ -88,7 +89,12 @@ public class TagLogFile implements AutoCloseable {
FileUtils.copyFile(tagFile, targetFile);
}
/** @return tags map, attributes map */
/**
* Read tags and attributes from tag file.
*
* @return tags map, attributes map
* @throws IOException error occurred when reading disk
*/
public Pair<Map<String, String>, Map<String, String>> read(int size, long position)
throws IOException {
if (position < 0) {
......@@ -113,7 +119,12 @@ public class TagLogFile implements AutoCloseable {
return write(byteBuffer, -1);
}
/** This method does not modify this file's current position. */
/**
* This method does not modify this file's current position.
*
* @throws IOException IOException
* @throws MetadataException metadata exception
*/
public void write(Map<String, String> tagMap, Map<String, String> attributeMap, long position)
throws IOException, MetadataException {
ByteBuffer byteBuffer = convertMapToByteBuffer(tagMap, attributeMap);
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.metadata.tag;
import org.apache.iotdb.commons.conf.CommonConfig;
......@@ -26,8 +27,6 @@ import org.apache.iotdb.commons.path.PartialPath;
import org.apache.iotdb.commons.schema.filter.impl.TagFilter;
import org.apache.iotdb.commons.schema.node.IMNode;
import org.apache.iotdb.commons.schema.node.role.IMeasurementMNode;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.metadata.MetadataConstant;
import org.apache.iotdb.db.metadata.plan.schemaregion.read.IShowTimeSeriesPlan;
import org.apache.iotdb.db.metadata.plan.schemaregion.result.ShowTimeSeriesResult;
......@@ -65,16 +64,14 @@ public class TagManager {
"before deleting it, tag key is %s, tag value is %s, tlog offset is %d, contains key %b";
private static final Logger logger = LoggerFactory.getLogger(TagManager.class);
private static IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
private static final CommonConfig COMMON_CONFIG = CommonDescriptor.getInstance().getConfig();
private String sgSchemaDirPath;
private TagLogFile tagLogFile;
// tag key -> tag value -> LeafMNode
private Map<String, Map<String, Set<IMeasurementMNode<?>>>> tagIndex = new ConcurrentHashMap<>();
private final Map<String, Map<String, Set<IMeasurementMNode<?>>>> tagIndex =
new ConcurrentHashMap<>();
public TagManager(String sgSchemaDirPath) throws IOException {
this.sgSchemaDirPath = sgSchemaDirPath;
tagLogFile = new TagLogFile(sgSchemaDirPath, MetadataConstant.TAG_LOG);
}
......@@ -86,27 +83,35 @@ public class TagManager {
try {
tagLogFile.copyTo(tagLogSnapshotTmp);
if (tagLogSnapshot.exists() && !tagLogSnapshot.delete()) {
logger.error(
logger.warn(
"Failed to delete old snapshot {} while creating tagManager snapshot.",
tagLogSnapshot.getName());
return false;
}
if (!tagLogSnapshotTmp.renameTo(tagLogSnapshot)) {
logger.error(
logger.warn(
"Failed to rename {} to {} while creating tagManager snapshot.",
tagLogSnapshotTmp.getName(),
tagLogSnapshot.getName());
tagLogSnapshot.delete();
if (!tagLogSnapshot.delete()) {
logger.warn("Failed to delete {} after renaming failure.", tagLogSnapshot.getName());
}
return false;
}
return true;
} catch (IOException e) {
logger.error("Failed to create tagManager snapshot due to {}", e.getMessage(), e);
tagLogSnapshot.delete();
if (!tagLogSnapshot.delete()) {
logger.warn(
"Failed to delete {} after creating tagManager snapshot failure.",
tagLogSnapshot.getName());
}
return false;
} finally {
tagLogSnapshotTmp.delete();
if (!tagLogSnapshotTmp.delete()) {
logger.warn("Failed to delete {}.", tagLogSnapshotTmp.getName());
}
}
}
......@@ -115,15 +120,18 @@ public class TagManager {
File tagSnapshot =
SystemFileFactory.INSTANCE.getFile(snapshotDir, MetadataConstant.TAG_LOG_SNAPSHOT);
File tagFile = SystemFileFactory.INSTANCE.getFile(sgSchemaDirPath, MetadataConstant.TAG_LOG);
if (tagFile.exists()) {
tagFile.delete();
if (tagFile.exists() && !tagFile.delete()) {
logger.warn("Failed to delete existing {} when loading snapshot.", tagFile.getName());
}
try {
FileUtils.copyFile(tagSnapshot, tagFile);
return new TagManager(sgSchemaDirPath);
} catch (IOException e) {
tagFile.delete();
if (!tagFile.delete()) {
logger.warn(
"Failed to delete existing {} when copying snapshot failure.", tagFile.getName());
}
throw e;
}
}
......@@ -237,7 +245,9 @@ public class TagManager {
}
@Override
public void close() {}
public void close() {
// do nothing
}
@Override
public boolean hasNext() {
......@@ -289,7 +299,11 @@ public class TagManager {
};
}
/** remove the node from the tag inverted index */
/**
* Remove the node from the tag inverted index.
*
* @throws IOException error occurred when reading disk
*/
public void removeFromTagInvertedIndex(IMeasurementMNode<?> node) throws IOException {
if (node.getOffset() < 0) {
return;
......@@ -331,8 +345,11 @@ public class TagManager {
}
/**
* upsert tags and attributes key-value for the timeseries if the key has existed, just use the
* Upsert tags and attributes key-value for the timeseries if the key has existed, just use the
* new value to update it.
*
* @throws MetadataException metadata exception
* @throws IOException error occurred when reading disk
*/
public void updateTagsAndAttributes(
Map<String, String> tagsMap,
......@@ -395,10 +412,11 @@ public class TagManager {
}
/**
* add new attributes key-value for the timeseries
* Add new attributes key-value for the timeseries.
*
* @param attributesMap newly added attributes map
* @throws MetadataException tagLogFile write error or attributes already exist
* @throws IOException error occurred when reading disk
*/
public void addAttributes(
Map<String, String> attributesMap, PartialPath fullPath, IMeasurementMNode<?> leafMNode)
......@@ -422,11 +440,12 @@ public class TagManager {
}
/**
* add new tags key-value for the timeseries
* Add new tags key-value for the timeseries.
*
* @param tagsMap newly added tags map
* @param fullPath timeseries
* @throws MetadataException tagLogFile write error or tag already exists
* @throws IOException error occurred when reading disk
*/
public void addTags(
Map<String, String> tagsMap, PartialPath fullPath, IMeasurementMNode<?> leafMNode)
......@@ -457,6 +476,8 @@ public class TagManager {
* exist.
*
* @param keySet tags key or attributes key
* @throws MetadataException metadata exception
* @throws IOException error occurred when reading disk
*/
public void dropTagsOrAttributes(
Set<String> keySet, PartialPath fullPath, IMeasurementMNode<?> leafMNode)
......@@ -483,15 +504,15 @@ public class TagManager {
tagLogFile.write(pair.left, pair.right, leafMNode.getOffset());
Map<String, Set<IMeasurementMNode<?>>> tagVal2LeafMNodeSet;
Set<IMeasurementMNode<?>> MMNodes;
Set<IMeasurementMNode<?>> nodeSet;
for (Map.Entry<String, String> entry : deleteTag.entrySet()) {
String key = entry.getKey();
String value = entry.getValue();
// change the tag inverted index map
tagVal2LeafMNodeSet = tagIndex.get(key);
if (tagVal2LeafMNodeSet != null) {
MMNodes = tagVal2LeafMNodeSet.get(value);
if (MMNodes != null) {
nodeSet = tagVal2LeafMNodeSet.get(value);
if (nodeSet != null) {
if (logger.isDebugEnabled()) {
logger.debug(
String.format(
......@@ -501,8 +522,8 @@ public class TagManager {
leafMNode.getOffset()));
}
MMNodes.remove(leafMNode);
if (MMNodes.isEmpty()) {
nodeSet.remove(leafMNode);
if (nodeSet.isEmpty()) {
tagVal2LeafMNodeSet.remove(value);
if (tagVal2LeafMNodeSet.isEmpty()) {
tagIndex.remove(key);
......@@ -524,10 +545,11 @@ public class TagManager {
}
/**
* set/change the values of tags or attributes
* Set/change the values of tags or attributes.
*
* @param alterMap the new tags or attributes key-value
* @throws MetadataException tagLogFile write error or tags/attributes do not exist
* @throws IOException error occurred when reading disk
*/
public void setTagsOrAttributesValue(
Map<String, String> alterMap, PartialPath fullPath, IMeasurementMNode<?> leafMNode)
......@@ -592,12 +614,13 @@ public class TagManager {
}
/**
* Rename the tag or attribute's key of the timeseries
* Rename the tag or attribute's key of the timeseries.
*
* @param oldKey old key of tag or attribute
* @param newKey new key of tag or attribute
* @throws MetadataException tagLogFile write error or does not have tag/attribute or already has
* a tag/attribute named newKey
* @throws IOException error occurred when reading disk
*/
public void renameTagOrAttributeKey(
String oldKey, String newKey, PartialPath fullPath, IMeasurementMNode<?> leafMNode)
......
......@@ -135,20 +135,16 @@ public class ClusterTemplateManager implements ITemplateManager {
private TCreateSchemaTemplateReq constructTCreateSchemaTemplateReq(
CreateSchemaTemplateStatement statement) {
TCreateSchemaTemplateReq req = new TCreateSchemaTemplateReq();
try {
Template template =
new Template(
statement.getName(),
statement.getMeasurements(),
statement.getDataTypes(),
statement.getEncodings(),
statement.getCompressors(),
statement.isAligned());
req.setName(template.getName());
req.setSerializedTemplate(template.serialize());
} catch (IllegalPathException e) {
throw new RuntimeException(e);
}
Template template =
new Template(
statement.getName(),
statement.getMeasurements(),
statement.getDataTypes(),
statement.getEncodings(),
statement.getCompressors(),
statement.isAligned());
req.setName(template.getName());
req.setSerializedTemplate(template.serialize());
return req;
}
......@@ -157,11 +153,10 @@ public class ClusterTemplateManager implements ITemplateManager {
List<Template> templatesList = new ArrayList<>();
try (ConfigNodeClient configNodeClient =
CONFIG_NODE_CLIENT_MANAGER.borrowClient(ConfigNodeInfo.CONFIG_REGION_ID)) {
TGetAllTemplatesResp tGetAllTemplatesResp = configNodeClient.getAllTemplates();
TGetAllTemplatesResp resp = configNodeClient.getAllTemplates();
// Get response or throw exception
if (tGetAllTemplatesResp.getStatus().getCode()
== TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
List<ByteBuffer> list = tGetAllTemplatesResp.getTemplateList();
if (resp.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
List<ByteBuffer> list = resp.getTemplateList();
list.forEach(
templateData -> {
Template template = new Template();
......@@ -170,9 +165,7 @@ public class ClusterTemplateManager implements ITemplateManager {
});
} else {
throw new RuntimeException(
new IoTDBException(
tGetAllTemplatesResp.getStatus().getMessage(),
tGetAllTemplatesResp.getStatus().getCode()));
new IoTDBException(resp.getStatus().getMessage(), resp.getStatus().getCode()));
}
} catch (ClientManagerException | TException e) {
throw new RuntimeException(
......@@ -431,7 +424,7 @@ public class ClusterTemplateManager implements ITemplateManager {
}
} catch (IllegalPathException ignored) {
// won't reach here
}
}
}
......
......@@ -31,22 +31,20 @@ import java.util.Map;
public interface ITemplateManager {
/**
* Create schema template by sending request to ConfigNode.
*
* @param statement CreateSchemaTemplateStatement
* @return TSStatus
*/
TSStatus createSchemaTemplate(CreateSchemaTemplateStatement statement);
/**
* show schema templates
*
* @return List<Template>
*/
/** Show schema templates. */
List<Template> getAllTemplates();
/**
* show nodes in schema template xx
*
* @param name
* @param name template name
* @return Template
*/
Template getTemplate(String name) throws IoTDBException;
......@@ -54,18 +52,17 @@ public interface ITemplateManager {
Template getTemplate(int id);
/**
* mount template
* Set template to given path.
*
* @param name templateName
* @param path mount path
* @param path set path
*/
void setSchemaTemplate(String queryId, String name, PartialPath path);
/**
* get info of mounted template
* Get info of mounted template.
*
* @param name
* @return
* @param name template name
*/
List<PartialPath> getPathsSetTemplate(String name);
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.metadata.template;
import org.apache.iotdb.commons.exception.IllegalPathException;
......@@ -45,8 +46,6 @@ public class Template implements Serializable {
private boolean isDirectAligned;
private Map<String, IMeasurementSchema> schemaMap;
private transient int rehashCode;
public Template() {
schemaMap = new ConcurrentHashMap<>();
}
......@@ -56,8 +55,7 @@ public class Template implements Serializable {
List<String> measurements,
List<TSDataType> dataTypes,
List<TSEncoding> encodings,
List<CompressionType> compressors)
throws IllegalPathException {
List<CompressionType> compressors) {
this(name, measurements, dataTypes, encodings, compressors, false);
}
......@@ -67,8 +65,7 @@ public class Template implements Serializable {
List<TSDataType> dataTypes,
List<TSEncoding> encodings,
List<CompressionType> compressors,
boolean isAligned)
throws IllegalPathException {
boolean isAligned) {
this.isDirectAligned = isAligned;
this.schemaMap = new ConcurrentHashMap<>();
this.name = name;
......@@ -184,7 +181,7 @@ public class Template implements Serializable {
try {
serialize(outputStream);
} catch (IOException ignored) {
// won't reach here
}
return ByteBuffer.wrap(outputStream.toByteArray());
}
......@@ -222,8 +219,6 @@ public class Template implements Serializable {
@Override
public int hashCode() {
return rehashCode != 0
? rehashCode
: new HashCodeBuilder(17, 37).append(name).append(schemaMap).toHashCode();
return new HashCodeBuilder(17, 37).append(name).append(schemaMap).toHashCode();
}
}
......@@ -42,6 +42,7 @@ public class TemplateInternalRPCUtil {
ReadWriteIOUtils.write(1, outputStream);
ReadWriteIOUtils.write(templateSetPath, outputStream);
} catch (IOException ignored) {
// won't reach here
}
return outputStream.toByteArray();
}
......@@ -79,6 +80,7 @@ public class TemplateInternalRPCUtil {
}
}
} catch (IOException ignored) {
// won't reach here
}
return outputStream.toByteArray();
}
......@@ -110,7 +112,7 @@ public class TemplateInternalRPCUtil {
ReadWriteIOUtils.write(templateId, outputStream);
ReadWriteIOUtils.write(path, outputStream);
} catch (IOException ignored) {
// won't reach here
}
return outputStream.toByteArray();
}
......
......@@ -27,6 +27,10 @@ import java.nio.ByteBuffer;
public class TemplateAlterOperationUtil {
private TemplateAlterOperationUtil() {
// not allowed construction
}
public static byte[] generateExtendTemplateReqInfo(
TemplateAlterOperationType operationType, TemplateAlterInfo templateAlterInfo) {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
......@@ -34,7 +38,7 @@ public class TemplateAlterOperationUtil {
operationType.serialize(outputStream);
templateAlterInfo.serialize(outputStream);
} catch (IOException ignored) {
// won't reach here
}
return outputStream.toByteArray();
}
......
......@@ -58,6 +58,7 @@ public class TemplateExtendInfo extends TemplateAlterInfo {
this.compressors = compressors;
}
@Override
public String getTemplateName() {
return templateName;
}
......@@ -148,30 +149,30 @@ public class TemplateExtendInfo extends TemplateAlterInfo {
public List<String> updateAsDifferenceAndGetIntersection(Set<String> targetMeasurementSet) {
List<String> removedMeasurements = new ArrayList<>();
List<String> measurements = new ArrayList<>();
List<TSDataType> dataTypes = new ArrayList<>();
List<TSEncoding> encodings = this.encodings == null ? null : new ArrayList<>();
List<CompressionType> compressors = this.compressors == null ? null : new ArrayList<>();
List<String> updatedMeasurements = new ArrayList<>();
List<TSDataType> updatedDataTypes = new ArrayList<>();
List<TSEncoding> updatedEncodings = this.encodings == null ? null : new ArrayList<>();
List<CompressionType> updatedCompressors = this.compressors == null ? null : new ArrayList<>();
for (int i = 0; i < this.measurements.size(); i++) {
if (targetMeasurementSet.contains(this.measurements.get(i))) {
removedMeasurements.add(this.measurements.get(i));
continue;
}
measurements.add(this.measurements.get(i));
dataTypes.add(this.dataTypes.get(i));
updatedMeasurements.add(this.measurements.get(i));
updatedDataTypes.add(this.dataTypes.get(i));
if (this.encodings != null) {
encodings.add(this.encodings.get(i));
updatedEncodings.add(this.encodings.get(i));
}
if (this.compressors != null) {
compressors.add(this.compressors.get(i));
updatedCompressors.add(this.compressors.get(i));
}
}
this.measurements = measurements;
this.dataTypes = dataTypes;
this.encodings = encodings;
this.compressors = compressors;
this.measurements = updatedMeasurements;
this.dataTypes = updatedDataTypes;
this.encodings = updatedEncodings;
this.compressors = updatedCompressors;
return removedMeasurements;
}
......@@ -180,6 +181,7 @@ public class TemplateExtendInfo extends TemplateAlterInfo {
return measurements == null || measurements.isEmpty();
}
@Override
public void serialize(OutputStream outputStream) throws IOException {
super.serialize(outputStream);
ReadWriteIOUtils.write(measurements.size(), outputStream);
......@@ -209,6 +211,7 @@ public class TemplateExtendInfo extends TemplateAlterInfo {
}
}
@Override
public void deserialize(ByteBuffer buffer) {
super.deserialize(buffer);
int size = ReadWriteIOUtils.readInt(buffer);
......
......@@ -200,9 +200,11 @@ public class ClusterSchemaTree implements ISchemaTree {
*
* @param schemaComputation the statement
* @param indexOfTargetLogicalView the index list of logicalViewSchemaList that you want to check
* @throws SemanticException path not exist or different source path of view
*/
public void computeSourceOfLogicalView(
ISchemaComputation schemaComputation, List<Integer> indexOfTargetLogicalView) {
ISchemaComputation schemaComputation, List<Integer> indexOfTargetLogicalView)
throws SemanticException {
if (!schemaComputation.hasLogicalViewNeedProcess()) {
return;
}
......@@ -297,11 +299,6 @@ public class ClusterSchemaTree implements ISchemaTree {
}
}
@Override
public boolean hasLogicalViewMeasurement() {
return this.hasLogicalMeasurementPath;
}
public void mergeSchemaTree(ClusterSchemaTree schemaTree) {
this.hasLogicalMeasurementPath =
this.hasLogicalMeasurementPath || schemaTree.hasLogicalViewMeasurement();
......@@ -338,6 +335,11 @@ public class ClusterSchemaTree implements ISchemaTree {
}
}
@Override
public boolean hasLogicalViewMeasurement() {
return this.hasLogicalMeasurementPath;
}
public void serialize(OutputStream outputStream) throws IOException {
root.serialize(outputStream);
}
......@@ -395,6 +397,7 @@ public class ClusterSchemaTree implements ISchemaTree {
*
* @param pathName only full path, cannot be path pattern
* @return database in the given path
* @throws SemanticException no matched database
*/
@Override
public String getBelongedDatabase(String pathName) {
......
......@@ -85,17 +85,17 @@ public class DeviceSchemaInfo {
.collect(Collectors.toList());
}
List<MeasurementPath> measurementPaths = new ArrayList<>();
for (IMeasurementSchemaInfo iMeasurementSchemaInfo : measurementSchemaInfoList) {
for (IMeasurementSchemaInfo measurementSchemaInfo : measurementSchemaInfoList) {
MeasurementPath measurementPath =
new MeasurementPath(
devicePath.concatNode(iMeasurementSchemaInfo.getName()),
iMeasurementSchemaInfo.getSchema());
devicePath.concatNode(measurementSchemaInfo.getName()),
measurementSchemaInfo.getSchema());
measurementPath.setUnderAlignedEntity(isAligned);
if (measurements.contains(iMeasurementSchemaInfo.getName())) {
if (measurements.contains(measurementSchemaInfo.getName())) {
measurementPaths.add(measurementPath);
} else if (iMeasurementSchemaInfo.getAlias() != null
&& measurements.contains(iMeasurementSchemaInfo.getAlias())) {
measurementPath.setMeasurementAlias(iMeasurementSchemaInfo.getAlias());
} else if (measurementSchemaInfo.getAlias() != null
&& measurements.contains(measurementSchemaInfo.getAlias())) {
measurementPath.setMeasurementAlias(measurementSchemaInfo.getAlias());
measurementPaths.add(measurementPath);
}
}
......
......@@ -29,7 +29,11 @@ public interface IMeasurementSchemaInfo {
IMeasurementSchema getSchema();
/** @return if the IMeasurementSchema is MeasurementSchema, return itself; else return null. */
/**
* If the IMeasurementSchema is MeasurementSchema, return itself; else return null.
*
* @return measurementSchema or null if IMeasurementSchema is not MeasurementSchema
*/
MeasurementSchema getSchemaAsMeasurementSchema();
LogicalViewSchema getSchemaAsLogicalViewSchema();
......
......@@ -71,6 +71,10 @@ public interface ISchemaTree {
void mergeSchemaTree(ISchemaTree schemaTree);
/** @return if there is measurementPath in this schema tree, return true; else return false */
/**
* If there is view in this schema tree, return true, else return false.
*
* @return whether there's view in this schema tree
*/
boolean hasLogicalViewMeasurement();
}
......@@ -80,6 +80,11 @@ public class SchemaMeasurementNode extends SchemaNode implements IMeasurementSch
return tagMap;
}
@Override
public SchemaNode getChild(String name) {
return null;
}
@Override
public void replaceChild(String name, SchemaNode newChild) {
throw new UnsupportedOperationException(
......@@ -138,7 +143,7 @@ public class SchemaMeasurementNode extends SchemaNode implements IMeasurementSch
String alias = ReadWriteIOUtils.readString(inputStream);
IMeasurementSchema schema = null;
Byte measurementSchemaType = ReadWriteIOUtils.readByte(inputStream);
byte measurementSchemaType = ReadWriteIOUtils.readByte(inputStream);
if (measurementSchemaType
== MeasurementSchemaType.MEASUREMENT_SCHEMA.getMeasurementSchemaTypeInByteEnum()) {
schema = MeasurementSchema.deserializeFrom(inputStream);
......
......@@ -43,9 +43,7 @@ public abstract class SchemaNode implements ITreeNode {
return name;
}
public SchemaNode getChild(String name) {
return null;
}
public abstract SchemaNode getChild(String name);
public void addChild(String name, SchemaNode child) {}
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.mpp.common.schematree.visitor;
import org.apache.iotdb.commons.path.MeasurementPath;
......
......@@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.mpp.common.schematree.visitor;
import org.apache.iotdb.db.mpp.common.schematree.node.SchemaNode;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册