未验证 提交 3f739bf8 编写于 作者: J Jackie Tien 提交者: GitHub

[IOTDB-1276]Add explain sql support and remove debug_state parameter (#2848)

上级 d726fb31
......@@ -20,7 +20,7 @@
grammar SqlBase;
singleStatement
: statement (';')? EOF
: EXPLAIN? statement (';')? EOF
;
/*
......@@ -1285,6 +1285,11 @@ LIKE
TOLERANCE
: T O L E R A N C E
;
EXPLAIN
: E X P L A I N
;
//============================
// End of the keywords list
//============================
......
......@@ -41,6 +41,10 @@ public class RemoteQueryContext extends QueryContext {
super(jobId);
}
public RemoteQueryContext(long jobId, boolean debug) {
super(jobId, debug);
}
public void registerRemoteNode(Node node, Node header) {
queriedNodesMap.computeIfAbsent(header, n -> new HashSet<>()).add(node);
}
......
......@@ -281,8 +281,8 @@ public class ClientServer extends TSServiceImpl {
* @return a RemoteQueryContext using queryId
*/
@Override
protected QueryContext genQueryContext(long queryId) {
RemoteQueryContext context = new RemoteQueryContext(queryId);
protected QueryContext genQueryContext(long queryId, boolean debug) {
RemoteQueryContext context = new RemoteQueryContext(queryId, debug);
queryContextMap.put(queryId, context);
return context;
}
......
......@@ -18,6 +18,13 @@
under the License.
-->
# 0.11.0 -> 0.12.0
## add:
## removed:
debug_state=false
# 0.10.0 -> 0.11.0
......
......@@ -575,9 +575,6 @@ frequency_interval_in_minute=1
# time cost(ms) threshold for slow query
slow_query_threshold=5000
# if the debug_state is true, we will print more details about the process of query
debug_state=false
####################
### MQTT Broker Configuration
####################
......
......@@ -627,9 +627,6 @@ public class IoTDBConfig {
/** time cost(ms) threshold for slow query */
private long slowQueryThreshold = 5000;
/** if the debug_state is true, we will print more details about the process of query */
private boolean debugState = false;
/**
* whether enable the rpc service. This parameter has no a corresponding field in the
* iotdb-engine.properties
......@@ -1992,14 +1989,6 @@ public class IoTDBConfig {
this.slowQueryThreshold = slowQueryThreshold;
}
public boolean isDebugOn() {
return debugState;
}
public void setDebugState(boolean debugState) {
this.debugState = debugState;
}
public boolean isEnableIndex() {
return enableIndex;
}
......
......@@ -641,9 +641,6 @@ public class IoTDBDescriptor {
properties.getProperty(
"slow_query_threshold", String.valueOf(conf.getSlowQueryThreshold()))));
conf.setDebugState(
Boolean.parseBoolean(
properties.getProperty("debug_state", String.valueOf(conf.isDebugOn()))));
conf.setVirtualStorageGroupNum(
Integer.parseInt(
properties.getProperty(
......@@ -1011,9 +1008,6 @@ public class IoTDBDescriptor {
// update slow_query_threshold
conf.setSlowQueryThreshold(Long.parseLong(properties.getProperty("slow_query_threshold")));
// update debug_state
conf.setDebugState(Boolean.parseBoolean(properties.getProperty("debug_state")));
} catch (Exception e) {
throw new QueryProcessException(String.format("Fail to reload configuration because %s", e));
}
......
......@@ -89,6 +89,10 @@ public class ChunkCache {
}
public Chunk get(ChunkMetadata chunkMetaData) throws IOException {
return get(chunkMetaData, false);
}
public Chunk get(ChunkMetadata chunkMetaData, boolean debug) throws IOException {
if (!CACHE_ENABLE) {
TsFileSequenceReader reader =
FileReaderManager.getInstance()
......@@ -134,7 +138,7 @@ public class ChunkCache {
}
}
if (config.isDebugOn()) {
if (debug) {
DEBUG_LOGGER.info("get chunk from cache whose meta data is: " + chunkMetaData);
}
return new Chunk(
......
......@@ -122,9 +122,14 @@ public class TimeSeriesMetadataCache {
return TimeSeriesMetadataCache.TimeSeriesMetadataCacheHolder.INSTANCE;
}
@SuppressWarnings("squid:S1860") // Suppress synchronize warning
public TimeseriesMetadata get(TimeSeriesMetadataCacheKey key, Set<String> allSensors)
throws IOException {
return get(key, allSensors, false);
}
@SuppressWarnings("squid:S1860") // Suppress synchronize warning
public TimeseriesMetadata get(
TimeSeriesMetadataCacheKey key, Set<String> allSensors, boolean debug) throws IOException {
if (!CACHE_ENABLE) {
// bloom filter part
TsFileSequenceReader reader = FileReaderManager.getInstance().get(key.filePath, true);
......@@ -150,7 +155,7 @@ public class TimeSeriesMetadataCache {
cacheHitNum.incrementAndGet();
printCacheLog(true);
} else {
if (config.isDebugOn()) {
if (debug) {
DEBUG_LOGGER.info(
"Cache miss: {}.{} in file: {}", key.device, key.measurement, key.filePath);
DEBUG_LOGGER.info("Device: {}, all sensors: {}", key.device, allSensors);
......@@ -174,7 +179,7 @@ public class TimeSeriesMetadataCache {
TsFileSequenceReader reader = FileReaderManager.getInstance().get(key.filePath, true);
BloomFilter bloomFilter = reader.readBloomFilter();
if (bloomFilter != null && !bloomFilter.contains(path.getFullPath())) {
if (config.isDebugOn()) {
if (debug) {
DEBUG_LOGGER.info("TimeSeries meta data {} is filter by bloomFilter!", key);
}
return null;
......@@ -202,12 +207,12 @@ public class TimeSeriesMetadataCache {
}
}
if (timeseriesMetadata == null) {
if (config.isDebugOn()) {
if (debug) {
DEBUG_LOGGER.info("The file doesn't have this time series {}.", key);
}
return null;
} else {
if (config.isDebugOn()) {
if (debug) {
DEBUG_LOGGER.info(
"Get timeseries: {}.{} metadata in file: {} from cache: {}.",
key.device,
......
......@@ -88,4 +88,20 @@ public class Deletion extends Modification {
public int hashCode() {
return Objects.hash(super.hashCode(), startTime, endTime);
}
@Override
public String toString() {
return "Deletion{"
+ "startTime="
+ startTime
+ ", endTime="
+ endTime
+ ", type="
+ type
+ ", path="
+ path
+ ", fileOffset="
+ fileOffset
+ '}';
}
}
......@@ -1599,7 +1599,7 @@ public class StorageGroupProcessor {
boolean isSeq)
throws MetadataException {
if (config.isDebugOn()) {
if (context.isDebug()) {
DEBUG_LOGGER.info(
"Path: {}.{}, get tsfile list: {} isSeq: {} timefilter: {}",
deviceId.getFullPath(),
......@@ -1618,7 +1618,8 @@ public class StorageGroupProcessor {
// for upgrade files and old files must be closed
for (TsFileResource tsFileResource : upgradeTsFileResources) {
if (!tsFileResource.isSatisfied(deviceId.getFullPath(), timeFilter, isSeq, dataTTL)) {
if (!tsFileResource.isSatisfied(
deviceId.getFullPath(), timeFilter, isSeq, dataTTL, context.isDebug())) {
continue;
}
closeQueryLock.readLock().lock();
......@@ -1630,7 +1631,8 @@ public class StorageGroupProcessor {
}
for (TsFileResource tsFileResource : tsFileResources) {
if (!tsFileResource.isSatisfied(deviceId.getFullPath(), timeFilter, isSeq, dataTTL)) {
if (!tsFileResource.isSatisfied(
deviceId.getFullPath(), timeFilter, isSeq, dataTTL, context.isDebug())) {
continue;
}
closeQueryLock.readLock().lock();
......@@ -1780,6 +1782,11 @@ public class StorageGroupProcessor {
tsFileResource.getModFile().write(deletion);
// remember to close mod file
tsFileResource.getModFile().close();
logger.info(
"[Deletion] Deletion with path:{}, time:{}-{} written into mods file.",
deletion.getPath(),
deletion.getStartTime(),
deletion.getEndTime());
tsFileResource.updatePlanIndexes(planIndex);
......@@ -1811,6 +1818,9 @@ public class StorageGroupProcessor {
&& startTime <= lastPair.getTimestamp()
&& lastPair.getTimestamp() <= endTime) {
((MeasurementMNode) measurementNode).resetCache();
logger.info(
"[tryToDeleteLastCache] Last cache for path: {} is set to null",
measurementNode.getFullPath());
}
}
}
......@@ -1865,7 +1875,7 @@ public class StorageGroupProcessor {
}
/**
* update latest flush time for partition id </>
* update latest flush time for partition id
*
* @param partitionId partition id
* @param latestFlushTime lastest flush time
......@@ -2141,6 +2151,9 @@ public class StorageGroupProcessor {
for (MNode measurementNode : node.getChildren().values()) {
if (measurementNode != null) {
((MeasurementMNode) measurementNode).resetCache();
logger.info(
"[tryToDeleteLastCacheByDevice] Last cache for path: {} is set to null",
measurementNode.getFullPath());
}
}
} catch (MetadataException e) {
......
......@@ -417,6 +417,11 @@ public class TsFileProcessor {
for (PartialPath device : devicePaths) {
workMemTable.delete(
deletion.getPath(), device, deletion.getStartTime(), deletion.getEndTime());
logger.info(
"[Deletion] Delete in-memory data with deletion path: {}, time:{}-{}.",
deletion.getPath(),
deletion.getStartTime(),
deletion.getEndTime());
}
}
// flushing memTables are immutable, only record this deletion in these memTables for query
......@@ -825,6 +830,11 @@ public class TsFileProcessor {
this.tsFileResource.getModFile().write(entry.left);
tsFileResource.getModFile().close();
iterator.remove();
logger.info(
"[Deletion] Deletion with path: {}, time:{}-{} written when flush memtable",
entry.left.getPath(),
((Deletion) (entry.left)).getStartTime(),
((Deletion) (entry.left)).getEndTime());
}
}
} catch (IOException e) {
......
......@@ -549,9 +549,10 @@ public class TsFileResource {
}
/** @return true if the device is contained in the TsFile and it lives beyond TTL */
public boolean isSatisfied(String deviceId, Filter timeFilter, boolean isSeq, long ttl) {
public boolean isSatisfied(
String deviceId, Filter timeFilter, boolean isSeq, long ttl, boolean debug) {
if (!getDevices().contains(deviceId)) {
if (config.isDebugOn()) {
if (debug) {
DEBUG_LOGGER.info(
"Path: {} file {} is not satisfied because of no device!", deviceId, file);
}
......@@ -562,7 +563,7 @@ public class TsFileResource {
long endTime = closed || !isSeq ? getEndTime(deviceId) : Long.MAX_VALUE;
if (!isAlive(endTime, ttl)) {
if (config.isDebugOn()) {
if (debug) {
DEBUG_LOGGER.info("Path: {} file {} is not satisfied because of ttl!", deviceId, file);
}
return false;
......@@ -570,7 +571,7 @@ public class TsFileResource {
if (timeFilter != null) {
boolean res = timeFilter.satisfyStartEndTime(startTime, endTime);
if (config.isDebugOn() && !res) {
if (debug && !res) {
DEBUG_LOGGER.info(
"Path: {} file {} is not satisfied because of time filter!", deviceId, fsFactory);
}
......
......@@ -27,12 +27,15 @@ public abstract class Operator {
protected int tokenIntType;
// operator type in String format
protected String tokenName;
// flag of "explain"
protected boolean isDebug;
protected OperatorType operatorType = OperatorType.NULL;
protected Operator(int tokenIntType) {
this.tokenIntType = tokenIntType;
this.tokenName = SQLConstant.tokenNames.get(tokenIntType);
this.isDebug = false;
}
public OperatorType getType() {
......@@ -55,6 +58,14 @@ public abstract class Operator {
this.operatorType = operatorType;
}
public boolean isDebug() {
return isDebug;
}
public void setDebug(boolean debug) {
isDebug = debug;
}
@Override
public String toString() {
return tokenName;
......
......@@ -75,6 +75,8 @@ public abstract class PhysicalPlan {
// a bridge from a cluster raft log to a physical plan
protected long index;
private boolean debug;
/** whether the plan can be split into more than one Plans. Only used in the cluster mode. */
public boolean canBeSplit() {
return canBeSplit;
......@@ -117,6 +119,14 @@ public abstract class PhysicalPlan {
isQuery = query;
}
public boolean isDebug() {
return debug;
}
public void setDebug(boolean debug) {
this.debug = debug;
}
/**
* Serialize the plan into the given buffer. All necessary fields will be serialized.
*
......
......@@ -260,7 +260,11 @@ public class IoTDBSqlVisitor extends SqlBaseBaseVisitor<Operator> {
@Override
public Operator visitSingleStatement(SingleStatementContext ctx) {
return visit(ctx.statement());
Operator operator = visit(ctx.statement());
if (ctx.EXPLAIN() != null) {
operator.setDebug(true);
}
return operator;
}
@Override
......
......@@ -141,9 +141,16 @@ import java.util.Set;
/** Used to convert logical operator to physical plan */
public class PhysicalGenerator {
@SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
public PhysicalPlan transformToPhysicalPlan(Operator operator, int fetchSize)
throws QueryProcessException {
PhysicalPlan physicalPlan = doTransformation(operator, fetchSize);
physicalPlan.setDebug(operator.isDebug());
return physicalPlan;
}
@SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
private PhysicalPlan doTransformation(Operator operator, int fetchSize)
throws QueryProcessException {
List<PartialPath> paths;
switch (operator.getType()) {
case AUTHOR:
......
......@@ -49,12 +49,19 @@ public class QueryContext {
private long queryTimeLowerBound = Long.MIN_VALUE;
private boolean debug;
public QueryContext() {}
public QueryContext(long queryId) {
this.queryId = queryId;
}
public QueryContext(long queryId, boolean debug) {
this.queryId = queryId;
this.debug = debug;
}
/**
* Find the modifications of timeseries 'path' in 'modFile'. If they are not in the cache, read
* them from 'modFile' and put then into the cache.
......@@ -87,6 +94,10 @@ public class QueryContext {
return queryId;
}
public boolean isDebug() {
return debug;
}
public long getQueryTimeLowerBound() {
return queryTimeLowerBound;
}
......
......@@ -46,6 +46,9 @@ import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet;
import org.apache.iotdb.tsfile.utils.Binary;
import org.apache.iotdb.tsfile.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
......@@ -63,6 +66,7 @@ public class LastQueryExecutor {
protected IExpression expression;
private static final boolean CACHE_ENABLED =
IoTDBDescriptor.getInstance().getConfig().isLastCacheEnabled();
private static final Logger DEBUG_LOGGER = LoggerFactory.getLogger("QUERY_DEBUG");
public LastQueryExecutor(LastQueryPlan lastQueryPlan) {
this.selectedSeries = lastQueryPlan.getDeduplicatedPaths();
......@@ -185,6 +189,10 @@ public class LastQueryExecutor {
resultContainer.get(i).left = true;
if (CACHE_ENABLED) {
cacheAccessors.get(i).write(resultContainer.get(i).right);
DEBUG_LOGGER.info(
"[LastQueryExecutor] Update last cache for path: {} with timestamp: {}",
seriesPaths,
resultContainer.get(i).right.getTimestamp());
}
}
}
......@@ -219,8 +227,16 @@ public class LastQueryExecutor {
restDataType.add(dataTypes.get(i));
} else if (!satisfyFilter(filter, tvPair)) {
resultContainer.add(new Pair<>(true, null));
DEBUG_LOGGER.info(
"[LastQueryExecutor] Last cache hit for path: {} with timestamp: {}",
seriesPaths.get(i),
tvPair.getTimestamp());
} else {
resultContainer.add(new Pair<>(true, tvPair));
DEBUG_LOGGER.info(
"[LastQueryExecutor] Last cache hit for path: {} with timestamp: {}",
seriesPaths.get(i),
tvPair.getTimestamp());
}
}
return resultContainer;
......
......@@ -20,6 +20,7 @@
package org.apache.iotdb.db.query.reader.chunk;
import org.apache.iotdb.db.engine.cache.ChunkCache;
import org.apache.iotdb.db.query.context.QueryContext;
import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
import org.apache.iotdb.tsfile.read.common.Chunk;
import org.apache.iotdb.tsfile.read.controller.IChunkLoader;
......@@ -29,13 +30,15 @@ import java.io.IOException;
/** To read one chunk from disk, and only used in iotdb server module */
public class DiskChunkLoader implements IChunkLoader {
public DiskChunkLoader() {
// do nothing
private final QueryContext context;
public DiskChunkLoader(QueryContext context) {
this.context = context;
}
@Override
public Chunk loadChunk(ChunkMetadata chunkMetaData) throws IOException {
return ChunkCache.getInstance().get(chunkMetaData);
return ChunkCache.getInstance().get(chunkMetaData, context.isDebug());
}
@Override
......
......@@ -29,6 +29,9 @@ import org.apache.iotdb.tsfile.file.metadata.TimeseriesMetadata;
import org.apache.iotdb.tsfile.read.controller.IChunkMetadataLoader;
import org.apache.iotdb.tsfile.read.filter.basic.Filter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
public class DiskChunkMetadataLoader implements IChunkMetadataLoader {
......@@ -39,6 +42,8 @@ public class DiskChunkMetadataLoader implements IChunkMetadataLoader {
// time filter or value filter, only used to check time range
private final Filter filter;
private static final Logger DEBUG_LOGGER = LoggerFactory.getLogger("QUERY_DEBUG");
public DiskChunkMetadataLoader(
TsFileResource resource, PartialPath seriesPath, QueryContext context, Filter filter) {
this.resource = resource;
......@@ -69,6 +74,12 @@ public class DiskChunkMetadataLoader implements IChunkMetadataLoader {
metadata.setVersion(resource.getVersion());
}
}
if (context.isDebug()) {
DEBUG_LOGGER.info("After removed by filter Chunk meta data list is: ");
chunkMetadataList.forEach(c -> DEBUG_LOGGER.info(c.toString()));
}
return chunkMetadataList;
}
......@@ -80,9 +91,23 @@ public class DiskChunkMetadataLoader implements IChunkMetadataLoader {
List<Modification> pathModifications =
context.getPathModifications(resource.getModFile(), seriesPath);
if (context.isDebug()) {
DEBUG_LOGGER.info(
"Modifications size is {} for file Path: {} ",
pathModifications.size(),
resource.getTsFilePath());
pathModifications.forEach(c -> DEBUG_LOGGER.info(c.toString()));
}
if (!pathModifications.isEmpty()) {
QueryUtils.modifyChunkMetaData(chunkMetadataList, pathModifications);
}
if (context.isDebug()) {
DEBUG_LOGGER.info("After modification Chunk meta data list is: ");
chunkMetadataList.forEach(c -> DEBUG_LOGGER.info(c.toString()));
}
// it is ok, even if it is not thread safe, because the cost of creating a DiskChunkLoader is
// very cheap.
chunkMetadataList.forEach(
......@@ -90,7 +115,7 @@ public class DiskChunkMetadataLoader implements IChunkMetadataLoader {
if (chunkMetadata.getChunkLoader() == null) {
chunkMetadata.setFilePath(resource.getTsFilePath());
chunkMetadata.setClosed(resource.isClosed());
chunkMetadata.setChunkLoader(new DiskChunkLoader());
chunkMetadata.setChunkLoader(new DiskChunkLoader(context));
}
});
}
......
......@@ -862,7 +862,7 @@ public class TSServiceImpl implements TSIService.Iface, ServerContext {
if (costTime >= config.getSlowQueryThreshold()) {
SLOW_SQL_LOGGER.info("Cost: {} ms, sql is {}", costTime, statement);
}
if (config.isDebugOn()) {
if (plan.isDebug()) {
SLOW_SQL_LOGGER.info(
"ChunkCache used memory proportion: {}\n"
+ "TimeSeriesMetadataCache used memory proportion: {}",
......@@ -1198,15 +1198,15 @@ public class TSServiceImpl implements TSIService.Iface, ServerContext {
throws QueryProcessException, QueryFilterOptimizationException, StorageEngineException,
IOException, MetadataException, SQLException, TException, InterruptedException {
QueryContext context = genQueryContext(queryId);
QueryContext context = genQueryContext(queryId, physicalPlan.isDebug());
QueryDataSet queryDataSet = executor.processQuery(physicalPlan, context);
queryDataSet.setFetchSize(fetchSize);
queryId2DataSet.put(queryId, queryDataSet);
return queryDataSet;
}
protected QueryContext genQueryContext(long queryId) {
return new QueryContext(queryId);
protected QueryContext genQueryContext(long queryId, boolean debug) {
return new QueryContext(queryId, debug);
}
@Override
......
......@@ -104,7 +104,8 @@ public class FileLoaderUtils {
resource.getTsFilePath(),
seriesPath.getDevice(),
seriesPath.getMeasurement()),
allSensors);
allSensors,
context.isDebug());
if (timeSeriesMetadata != null) {
timeSeriesMetadata.setChunkMetadataLoader(
new DiskChunkMetadataLoader(resource, seriesPath, context, filter));
......
......@@ -93,8 +93,8 @@ public class ChunkMetadata implements Accountable {
@Override
public String toString() {
return String.format(
"measurementId: %s, datatype: %s, version: %d, " + "Statistics: %s, deleteIntervalList: %s",
measurementUid, tsDataType, version, statistics, deleteIntervalList);
"measurementId: %s, datatype: %s, version: %d, Statistics: %s, deleteIntervalList: %s, filePath: %s",
measurementUid, tsDataType, version, statistics, deleteIntervalList, filePath);
}
public long getNumOfPoints() {
......
......@@ -231,4 +231,29 @@ public class TimeseriesMetadata implements Accountable {
public void setChunkMetadataList(ArrayList<ChunkMetadata> chunkMetadataList) {
this.chunkMetadataList = chunkMetadataList;
}
@Override
public String toString() {
return "TimeseriesMetadata{"
+ "startOffsetOfChunkMetaDataList="
+ startOffsetOfChunkMetaDataList
+ ", timeSeriesMetadataType="
+ timeSeriesMetadataType
+ ", chunkMetaDataListDataSize="
+ chunkMetaDataListDataSize
+ ", measurementId='"
+ measurementId
+ '\''
+ ", dataType="
+ dataType
+ ", statistics="
+ statistics
+ ", modified="
+ modified
+ ", isSeq="
+ isSeq
+ ", chunkMetadataList="
+ chunkMetadataList
+ '}';
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册