未验证 提交 a1c29277 编写于 作者: B BUAAserein 提交者: GitHub

[IOTDB-5284] Fix some iotdb-server code smells (#8738)

* fix some code smells
上级 621e0369
......@@ -258,9 +258,9 @@ public class ClusterAuthorityFetcher implements IAuthorityFetcher {
List<PathPrivilege> pathPrivilegeList = new ArrayList<>();
user.setName(tPermissionInfoResp.getUserInfo().getUsername());
user.setPassword(tPermissionInfoResp.getUserInfo().getPassword());
for (int i = 0; i < privilegeList.size(); i++) {
for (int i = 0; i < privilegeList.size(); i += 2) {
String path = privilegeList.get(i);
String privilege = privilegeList.get(++i);
String privilege = privilegeList.get(i + 1);
pathPrivilegeList.add(toPathPrivilege(path, privilege));
}
user.setOpenIdUser(tPermissionInfoResp.getUserInfo().isIsOpenIdUser());
......@@ -278,9 +278,9 @@ public class ClusterAuthorityFetcher implements IAuthorityFetcher {
List<String> privilegeList = tPermissionInfoResp.getRoleInfo().get(roleName).getPrivilegeList();
List<PathPrivilege> pathPrivilegeList = new ArrayList<>();
role.setName(tPermissionInfoResp.getRoleInfo().get(roleName).getRoleName());
for (int i = 0; i < privilegeList.size(); i++) {
for (int i = 0; i < privilegeList.size(); i += 2) {
String path = privilegeList.get(i);
String privilege = privilegeList.get(++i);
String privilege = privilegeList.get(i + 1);
pathPrivilegeList.add(toPathPrivilege(path, privilege));
}
role.setPrivilegeList(pathPrivilegeList);
......
......@@ -148,7 +148,7 @@ public class ConfigNodeClient
public static final String MSG_RECONNECTION_FAIL =
"Fail to connect to any config node. Please check status of ConfigNodes";
private static final int retryIntervalMs = 1000;
private static final int RETRY_INTERVAL_MS = 1000;
private long connectionTimeout = ClientPoolProperty.DefaultProperty.WAIT_CLIENT_TIMEOUT_MS;
......@@ -226,7 +226,7 @@ public class ConfigNodeClient
private void waitAndReconnect() throws TException {
try {
// wait to start the next try
Thread.sleep(retryIntervalMs);
Thread.sleep(RETRY_INTERVAL_MS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new TException(
......
......@@ -44,7 +44,7 @@ public class ConfigNodeInfo {
private static final Logger logger = LoggerFactory.getLogger(ConfigNodeInfo.class);
private final String CONFIG_NODE_LIST = "config_node_list";
private static final String CONFIG_NODE_LIST = "config_node_list";
private static final String PROPERTIES_FILE_NAME = "system.properties";
......
......@@ -193,10 +193,10 @@ public class IoTDBConfig {
private int walBufferQueueCapacity = 50;
/** Size threshold of each wal file. Unit: byte */
private volatile long walFileSizeThresholdInByte = 10 * 1024 * 1024;
private volatile long walFileSizeThresholdInByte = 10 * 1024 * 1024L;
/** Size threshold of each checkpoint file. Unit: byte */
private volatile long checkpointFileSizeThresholdInByte = 3 * 1024 * 1024;
private volatile long checkpointFileSizeThresholdInByte = 3 * 1024 * 1024L;
/** Minimum ratio of effective information in wal files */
private volatile double walMinEffectiveInfoRatio = 0.1;
......@@ -206,13 +206,13 @@ public class IoTDBConfig {
* this, wal can flush this memtable to disk, otherwise wal will snapshot this memtable in wal.
* Unit: byte
*/
private volatile long walMemTableSnapshotThreshold = 8 * 1024 * 1024;
private volatile long walMemTableSnapshotThreshold = 8 * 1024 * 1024L;
/** MemTable's max snapshot number in wal file */
private volatile int maxWalMemTableSnapshotNum = 1;
/** The period when outdated wal files are periodically deleted. Unit: millisecond */
private volatile long deleteWalFilesPeriodInMs = 20 * 1000;
private volatile long deleteWalFilesPeriodInMs = 20 * 1000L;
// endregion
/**
......@@ -1001,7 +1001,7 @@ public class IoTDBConfig {
private long throttleThreshold = 50 * 1024 * 1024 * 1024L;
/** Maximum wait time of write cache in IoTConsensus. Unit: ms */
private long cacheWindowTimeInMs = 10 * 1000;
private long cacheWindowTimeInMs = 10 * 1000L;
private long dataRatisConsensusLogAppenderBufferSizeMax = 4 * 1024 * 1024L;
private long schemaRatisConsensusLogAppenderBufferSizeMax = 4 * 1024 * 1024L;
......@@ -1297,8 +1297,8 @@ public class IoTDBConfig {
|| "us".equals(timestampPrecision)
|| "ns".equals(timestampPrecision))) {
logger.error(
"Wrong timestamp precision, please set as: ms, us or ns ! Current is: "
+ timestampPrecision);
"Wrong timestamp precision, please set as: ms, us or ns ! Current is: {}",
timestampPrecision);
System.exit(-1);
}
this.timestampPrecision = timestampPrecision;
......
......@@ -162,7 +162,7 @@ public class IoTDBStartCheck {
}
private IoTDBStartCheck() {
logger.info("Starting IoTDB " + IoTDBConstant.VERSION_WITH_BUILD);
logger.info("Starting IoTDB {}", IoTDBConstant.VERSION_WITH_BUILD);
// check whether SCHEMA_DIR exists, create if not exists
File dir = SystemFileFactory.INSTANCE.getFile(SCHEMA_DIR);
......
......@@ -53,7 +53,7 @@ public class BloomFilterCache {
private BloomFilterCache() {
if (CACHE_ENABLE) {
logger.info("BloomFilterCache size = " + MEMORY_THRESHOLD_IN_BLOOM_FILTER_CACHE);
logger.info("BloomFilterCache size = {}", MEMORY_THRESHOLD_IN_BLOOM_FILTER_CACHE);
}
lruCache =
Caffeine.newBuilder()
......@@ -99,7 +99,7 @@ public class BloomFilterCache {
BloomFilter bloomFilter = lruCache.get(key);
if (debug) {
DEBUG_LOGGER.info("get bloomFilter from cache where filePath is: " + key.filePath);
DEBUG_LOGGER.info("get bloomFilter from cache where filePath is: {}", key.filePath);
}
return bloomFilter;
......
......@@ -64,7 +64,7 @@ public class ChunkCache {
private ChunkCache() {
if (CACHE_ENABLE) {
logger.info("ChunkCache size = " + MEMORY_THRESHOLD_IN_CHUNK_CACHE);
logger.info("ChunkCache size = {}", MEMORY_THRESHOLD_IN_CHUNK_CACHE);
}
lruCache =
Caffeine.newBuilder()
......@@ -127,7 +127,7 @@ public class ChunkCache {
Chunk chunk = lruCache.get(chunkMetaData);
if (debug) {
DEBUG_LOGGER.info("get chunk from cache whose meta data is: " + chunkMetaData);
DEBUG_LOGGER.info("get chunk from cache whose meta data is: {}", chunkMetaData);
}
return new Chunk(
......
......@@ -84,7 +84,7 @@ public class TimeSeriesMetadataCache {
private TimeSeriesMetadataCache() {
if (CACHE_ENABLE) {
logger.info(
"TimeseriesMetadataCache size = " + MEMORY_THRESHOLD_IN_TIME_SERIES_METADATA_CACHE);
"TimeseriesMetadataCache size = {}", MEMORY_THRESHOLD_IN_TIME_SERIES_METADATA_CACHE);
}
lruCache =
Caffeine.newBuilder()
......
......@@ -121,7 +121,7 @@ public class CompactionExceptionHandler {
lostFiles.add(tsFileResource);
}
}
return lostFiles.size() == 0;
return lostFiles.isEmpty();
}
/**
......
......@@ -23,5 +23,6 @@ import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import java.util.List;
public interface ICrossCompactionPerformer extends ICompactionPerformer {
@Override
void setSourceFiles(List<TsFileResource> seqFiles, List<TsFileResource> unseqFiles);
}
......@@ -23,5 +23,6 @@ import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import java.util.List;
public interface ISeqCompactionPerformer extends ICompactionPerformer {
@Override
void setSourceFiles(List<TsFileResource> seqFiles);
}
......@@ -23,5 +23,6 @@ import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import java.util.List;
public interface IUnseqCompactionPerformer extends ICompactionPerformer {
@Override
void setSourceFiles(List<TsFileResource> unseqFiles);
}
......@@ -110,7 +110,7 @@ public class FastCompactionPerformer
public void perform()
throws IOException, MetadataException, StorageEngineException, InterruptedException {
TsFileMetricManager.getInstance()
.addCompactionTempFileNum(!isCrossCompaction, seqFiles.size() > 0, targetFiles.size());
.addCompactionTempFileNum(!isCrossCompaction, !seqFiles.isEmpty(), targetFiles.size());
try (MultiTsFileDeviceIterator deviceIterator =
new MultiTsFileDeviceIterator(seqFiles, unseqFiles, readerCacheMap);
AbstractCompactionWriter compactionWriter =
......@@ -146,7 +146,7 @@ public class FastCompactionPerformer
long currentTempFileSize = compactionWriter.getWriterSize();
TsFileMetricManager.getInstance()
.addCompactionTempFileSize(
!isCrossCompaction, seqFiles.size() > 0, currentTempFileSize - tempFileSize);
!isCrossCompaction, !seqFiles.isEmpty(), currentTempFileSize - tempFileSize);
tempFileSize = currentTempFileSize;
sortedSourceFiles.clear();
}
......@@ -161,9 +161,9 @@ public class FastCompactionPerformer
readerCacheMap = null;
modificationCache = null;
TsFileMetricManager.getInstance()
.addCompactionTempFileNum(!isCrossCompaction, seqFiles.size() > 0, -targetFiles.size());
.addCompactionTempFileNum(!isCrossCompaction, !seqFiles.isEmpty(), -targetFiles.size());
TsFileMetricManager.getInstance()
.addCompactionTempFileSize(!isCrossCompaction, seqFiles.size() > 0, -tempFileSize);
.addCompactionTempFileSize(!isCrossCompaction, !seqFiles.isEmpty(), -tempFileSize);
}
}
......
......@@ -69,7 +69,7 @@ public class ReadChunkCompactionPerformer implements ISeqCompactionPerformer {
// size for file writer is 5% of per compaction task memory budget
long sizeForFileWriter =
(long)
(SystemInfo.getInstance().getMemorySizeForCompaction()
((double) SystemInfo.getInstance().getMemorySizeForCompaction()
/ IoTDBDescriptor.getInstance().getConfig().getCompactionThreadCount()
* IoTDBDescriptor.getInstance().getConfig().getChunkMetadataSizeProportion());
TsFileMetricManager.getInstance().addCompactionTempFileNum(true, true, 1);
......
......@@ -104,7 +104,7 @@ public class ReadPointCompactionPerformer
.getQueryFileManager()
.addUsedFilesForQuery(queryId, queryDataSource);
TsFileMetricManager.getInstance()
.addCompactionTempFileNum(seqFiles.size() == 0, false, targetFiles.size());
.addCompactionTempFileNum(seqFiles.isEmpty(), false, targetFiles.size());
try (AbstractCompactionWriter compactionWriter =
getCompactionWriter(seqFiles, unseqFiles, targetFiles)) {
// Do not close device iterator, because tsfile reader is managed by FileReaderManager.
......@@ -132,9 +132,9 @@ public class ReadPointCompactionPerformer
} finally {
QueryResourceManager.getInstance().endQuery(queryId);
TsFileMetricManager.getInstance()
.addCompactionTempFileNum(seqFiles.size() == 0, false, -targetFiles.size());
.addCompactionTempFileNum(seqFiles.isEmpty(), false, -targetFiles.size());
TsFileMetricManager.getInstance()
.addCompactionTempFileSize(seqFiles.size() == 0, false, tempFileSize);
.addCompactionTempFileSize(seqFiles.isEmpty(), false, tempFileSize);
}
}
......@@ -187,7 +187,7 @@ public class ReadPointCompactionPerformer
// add temp file metrics
long currentWriterSize = compactionWriter.getWriterSize();
TsFileMetricManager.getInstance()
.addCompactionTempFileSize(seqFiles.size() == 0, false, currentWriterSize - tempFileSize);
.addCompactionTempFileSize(seqFiles.isEmpty(), false, currentWriterSize - tempFileSize);
tempFileSize = currentWriterSize;
}
......@@ -240,7 +240,7 @@ public class ReadPointCompactionPerformer
// add temp file metrics
long currentWriterSize = compactionWriter.getWriterSize();
TsFileMetricManager.getInstance()
.addCompactionTempFileSize(seqFiles.size() == 0, false, currentWriterSize - tempFileSize);
.addCompactionTempFileSize(seqFiles.isEmpty(), false, currentWriterSize - tempFileSize);
tempFileSize = currentWriterSize;
}
......
......@@ -365,10 +365,7 @@ public class CompactionRecoverTask {
tsFileManager.getStorageGroupDir()
+ File.separator
+ IoTDBConstant.COMPACTION_MODIFICATION_FILE_NAME_FROM_OLD);
if (!checkAndDeleteFile(compactionModsFileFromOld)) {
return false;
}
return true;
return checkAndDeleteFile(compactionModsFileFromOld);
}
/**
......
......@@ -256,7 +256,7 @@ public class InnerSpaceCompactionTask extends AbstractCompactionTask {
dataRegionId,
targetTsFileResource.getTsFile().getName(),
costTime,
((double) selectedFileSize) / 1024.0d / 1024.0d / costTime);
selectedFileSize / 1024.0d / 1024.0d / costTime);
if (logFile.exists()) {
FileUtils.delete(logFile);
......
......@@ -305,7 +305,7 @@ public class MultiTsFileDeviceIterator implements AutoCloseable {
TsFileSequenceReader reader = readerMap.get(tsFileResource);
List<AlignedChunkMetadata> alignedChunkMetadataList =
reader.getAlignedChunkMetadata(currentDevice.left);
if (alignedChunkMetadataList.size() > 0) {
if (!alignedChunkMetadataList.isEmpty()) {
alignedChunkMetadataList.forEach(x -> x.setFilePath(tsFileResource.getTsFilePath()));
}
applyModificationForAlignedChunkMetadataList(tsFileResource, alignedChunkMetadataList);
......@@ -429,7 +429,7 @@ public class MultiTsFileDeviceIterator implements AutoCloseable {
}
tempCollectedSeries.addAll(chunkMetadataListMap.keySet());
}
if (tempCollectedSeries.size() > 0) {
if (!tempCollectedSeries.isEmpty()) {
if (!hasRemainingSeries()) {
lastSeries = Collections.max(tempCollectedSeries);
}
......@@ -455,7 +455,7 @@ public class MultiTsFileDeviceIterator implements AutoCloseable {
}
public boolean hasNextSeries() {
if (seriesInThisIteration.size() == 0 && !collectSeries()) {
if (seriesInThisIteration.isEmpty() && !collectSeries()) {
return false;
} else {
return true;
......
......@@ -185,7 +185,7 @@ public class AlignedSeriesCompactionExecutor extends SeriesCompactionExecutor {
ModificationUtils.modifyAlignedChunkMetaData(alignedChunkMetadataList, valueModifications);
}
if (alignedChunkMetadataList.size() == 0) {
if (alignedChunkMetadataList.isEmpty()) {
// all chunks has been deleted in this file or current file does not contain this aligned
// device, just remove it
removeFile(fileElement);
......
......@@ -87,7 +87,7 @@ public abstract class SeriesCompactionExecutor {
// added into this list.
private final List<PageElement> candidateOverlappedPages = new ArrayList<>();
public SeriesCompactionExecutor(
protected SeriesCompactionExecutor(
AbstractCompactionWriter compactionWriter,
Map<TsFileResource, TsFileSequenceReader> readerCacheMap,
Map<TsFileResource, List<Modification>> modificationCacheMap,
......@@ -379,11 +379,9 @@ public abstract class SeriesCompactionExecutor {
List<PageElement> elements = new ArrayList<>();
long endTime = page.pageHeader.getEndTime();
for (PageElement element : pageQueue) {
if (element.startTime <= endTime) {
if (!element.isSelected) {
elements.add(element);
element.isSelected = true;
}
if (element.startTime <= endTime && !element.isSelected) {
elements.add(element);
element.isSelected = true;
}
}
elements.sort(Comparator.comparingLong(o -> o.startTime));
......@@ -400,11 +398,9 @@ public abstract class SeriesCompactionExecutor {
List<ChunkMetadataElement> elements = new ArrayList<>();
long endTime = chunkMetadataElement.chunkMetadata.getEndTime();
for (ChunkMetadataElement element : chunkMetadataQueue) {
if (element.chunkMetadata.getStartTime() <= endTime) {
if (!element.isSelected) {
elements.add(element);
element.isSelected = true;
}
if (element.chunkMetadata.getStartTime() <= endTime && !element.isSelected) {
elements.add(element);
element.isSelected = true;
}
}
elements.sort(Comparator.comparingLong(o -> o.startTime));
......
......@@ -124,7 +124,8 @@ public class SingleSeriesCompactionExecutor {
constructChunkWriterFromReadChunk(currentChunk);
}
CompactionMetricsRecorder.recordReadInfo(
currentChunk.getHeader().getSerializedSize() + currentChunk.getHeader().getDataSize());
(long) currentChunk.getHeader().getSerializedSize()
+ currentChunk.getHeader().getDataSize());
// if this chunk is modified, deserialize it into points
if (chunkMetadata.getDeleteIntervalList() != null) {
......@@ -170,7 +171,7 @@ public class SingleSeriesCompactionExecutor {
}
private long getChunkSize(Chunk chunk) {
return chunk.getHeader().getSerializedSize() + chunk.getHeader().getDataSize();
return (long) chunk.getHeader().getSerializedSize() + chunk.getHeader().getDataSize();
}
private void processModifiedChunk(Chunk chunk) throws IOException {
......
......@@ -62,7 +62,7 @@ public abstract class AbstractCrossCompactionWriter extends AbstractCompactionWr
protected List<TsFileResource> targetResources;
public AbstractCrossCompactionWriter(
protected AbstractCrossCompactionWriter(
List<TsFileResource> targetResources, List<TsFileResource> seqFileResources)
throws IOException {
currentDeviceEndTime = new long[seqFileResources.size()];
......
......@@ -42,7 +42,7 @@ public abstract class AbstractInnerCompactionWriter extends AbstractCompactionWr
protected long targetPagePointNum =
TSFileDescriptor.getInstance().getConfig().getMaxNumberOfPointsInPage();
public AbstractInnerCompactionWriter(TsFileResource targetFileResource) throws IOException {
protected AbstractInnerCompactionWriter(TsFileResource targetFileResource) throws IOException {
long sizeForFileWriter =
(long)
(SystemInfo.getInstance().getMemorySizeForCompaction()
......
......@@ -24,6 +24,7 @@ import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import java.util.List;
public interface ICrossSpaceSelector extends ICompactionSelector {
@Override
List<CrossCompactionTaskResource> selectCrossSpaceTask(
List<TsFileResource> seqFiles, List<TsFileResource> unseqFiles);
}
......@@ -23,5 +23,6 @@ import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import java.util.List;
public interface IInnerSeqSpaceSelector extends ICompactionSelector {
@Override
List<List<TsFileResource>> selectInnerSpaceTask(List<TsFileResource> resources);
}
......@@ -23,5 +23,6 @@ import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
import java.util.List;
public interface IInnerUnseqSpaceSelector extends ICompactionSelector {
@Override
List<List<TsFileResource>> selectInnerSpaceTask(List<TsFileResource> resources);
}
......@@ -72,9 +72,8 @@ public class RewriteCrossSpaceCompactionSelector implements ICrossSpaceSelector
this.tsFileManager = tsFileManager;
this.memoryBudget =
(long)
((double)
(SystemInfo.getInstance().getMemorySizeForCompaction()
/ IoTDBDescriptor.getInstance().getConfig().getCompactionThreadCount())
((double) SystemInfo.getInstance().getMemorySizeForCompaction()
/ IoTDBDescriptor.getInstance().getConfig().getCompactionThreadCount()
* config.getUsableCompactionMemoryProportion());
this.maxCrossCompactionFileNum =
IoTDBDescriptor.getInstance().getConfig().getMaxCrossCompactionCandidateFileNum();
......@@ -190,7 +189,7 @@ public class RewriteCrossSpaceCompactionSelector implements ICrossSpaceSelector
long memoryCost) {
// currently, we must allow at least one unseqFile be selected to handle the situation that
// an unseqFile has huge time range but few data points.
if (taskResource.getUnseqFiles().size() == 0) {
if (taskResource.getUnseqFiles().isEmpty()) {
return true;
}
long totalFileSize = unseqFile.getTsFileSize();
......
......@@ -145,7 +145,7 @@ public class LoadTsFileManager {
uuid2Future.remove(uuid);
if (loadDir.delete()) { // this method will check if there sub-dir in this dir.
logger.info(String.format("Delete load dir %s.", loadDir.getPath()));
logger.info("Delete load dir {}.", loadDir.getPath());
}
}
......@@ -155,7 +155,7 @@ public class LoadTsFileManager {
uuid2Future.remove(uuid);
if (loadDir.delete()) { // this method will check if there sub-dir in this dir.
logger.info(String.format("Delete load dir %s.", loadDir.getPath()));
logger.info("Delete load dir {}.", loadDir.getPath());
}
}
......@@ -179,7 +179,7 @@ public class LoadTsFileManager {
FileUtils.deleteDirectory(dir);
}
if (dir.mkdirs()) {
logger.info(String.format("Load TsFile dir %s is created.", dir.getPath()));
logger.info("Load TsFile dir {} is created.", dir.getPath());
}
}
......@@ -192,7 +192,7 @@ public class LoadTsFileManager {
SystemFileFactory.INSTANCE.getFile(
taskDir, partitionInfo.toString() + TsFileConstant.TSFILE_SUFFIX);
if (!newTsFile.createNewFile()) {
logger.error(String.format("Can not create TsFile %s for writing.", newTsFile.getPath()));
logger.error("Can not create TsFile {} for writing.", newTsFile.getPath());
return;
}
......@@ -250,7 +250,7 @@ public class LoadTsFileManager {
writer.close();
}
if (writer.getFile().exists() && !writer.getFile().delete()) {
logger.warn(String.format("Delete File %s error.", writer.getFile()));
logger.warn("Delete File {} error.", writer.getFile());
}
} catch (IOException e) {
logger.warn(
......@@ -261,7 +261,7 @@ public class LoadTsFileManager {
}
}
if (!taskDir.delete()) {
logger.warn(String.format("Can not delete load dir %s.", taskDir.getPath()));
logger.warn("Can not delete load dir {}.", taskDir.getPath());
}
dataPartition2Writer = null;
dataPartition2LastDevice = null;
......
......@@ -321,7 +321,7 @@ public class TracedBufferedReader extends Reader {
nextChar++;
}
}
long d = nChars - nextChar;
long d = (long) nChars - nextChar;
if (r <= d) {
nextChar += r;
r = 0;
......
......@@ -3169,7 +3169,7 @@ public class DataRegion implements IDataRegionForQuery {
List<TsFileResource> seqResourcesToBeSettled,
List<TsFileResource> unseqResourcesToBeSettled,
List<String> tsFilePaths) {
if (tsFilePaths.size() == 0) {
if (tsFilePaths.isEmpty()) {
for (TsFileResource resource : tsFileManager.getTsFileList(true)) {
if (!resource.isClosed()) {
continue;
......
......@@ -64,6 +64,7 @@ public class ForwardEvent implements Event {
return value;
}
@Override
public PartialPath getFullPath() {
return fullPath;
}
......
......@@ -112,6 +112,7 @@ public class BatchHandlerQueue<T extends Event> {
this.queue = queue;
}
@Override
public void run() {
final long maxWaitMillis = 500;
final ArrayList<T> list = new ArrayList<>();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册