Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
2dot5
ClickHouse
提交
1fa79598
C
ClickHouse
项目概览
2dot5
/
ClickHouse
通知
3
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
C
ClickHouse
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
1fa79598
编写于
5月 28, 2020
作者:
N
Nikolai Kochetov
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Remove some code.
上级
a6975607
变更
10
展开全部
隐藏空白更改
内联
并排
Showing
10 changed file
with
100 addition
and
1028 deletion
+100
-1028
src/Interpreters/IInterpreter.h
src/Interpreters/IInterpreter.h
+0
-4
src/Interpreters/InterpreterSelectQuery.cpp
src/Interpreters/InterpreterSelectQuery.cpp
+86
-792
src/Interpreters/InterpreterSelectQuery.h
src/Interpreters/InterpreterSelectQuery.h
+5
-90
src/Interpreters/InterpreterSelectWithUnionQuery.cpp
src/Interpreters/InterpreterSelectWithUnionQuery.cpp
+4
-69
src/Interpreters/InterpreterSelectWithUnionQuery.h
src/Interpreters/InterpreterSelectWithUnionQuery.h
+0
-6
src/Interpreters/executeQuery.cpp
src/Interpreters/executeQuery.cpp
+3
-6
src/Storages/IStorage.cpp
src/Storages/IStorage.cpp
+0
-19
src/Storages/IStorage.h
src/Storages/IStorage.h
+0
-10
src/Storages/SelectQueryInfo.h
src/Storages/SelectQueryInfo.h
+0
-22
src/Storages/StorageView.cpp
src/Storages/StorageView.cpp
+2
-10
未找到文件。
src/Interpreters/IInterpreter.h
浏览文件 @
1fa79598
...
...
@@ -22,10 +22,6 @@ public:
*/
virtual
BlockIO
execute
()
=
0
;
virtual
QueryPipeline
executeWithProcessors
()
{
throw
Exception
(
"executeWithProcessors not implemented"
,
ErrorCodes
::
NOT_IMPLEMENTED
);
}
virtual
bool
canExecuteWithProcessors
()
const
{
return
false
;
}
virtual
bool
ignoreQuota
()
const
{
return
false
;
}
virtual
bool
ignoreLimits
()
const
{
return
false
;
}
...
...
src/Interpreters/InterpreterSelectQuery.cpp
浏览文件 @
1fa79598
此差异已折叠。
点击以展开。
src/Interpreters/InterpreterSelectQuery.h
浏览文件 @
1fa79598
...
...
@@ -77,12 +77,6 @@ public:
/// Execute a query. Get the stream of blocks to read.
BlockIO
execute
()
override
;
/// Execute the query and return multuple streams for parallel processing.
BlockInputStreams
executeWithMultipleStreams
(
QueryPipeline
&
parent_pipeline
);
QueryPipeline
executeWithProcessors
()
override
;
bool
canExecuteWithProcessors
()
const
override
{
return
true
;
}
bool
ignoreLimits
()
const
override
{
return
options
.
ignore_limits
;
}
bool
ignoreQuota
()
const
override
{
return
options
.
ignore_quota
;
}
...
...
@@ -108,89 +102,15 @@ private:
Block
getSampleBlockImpl
();
struct
Pipeline
{
/** Streams of data.
* The source data streams are produced in the executeFetchColumns function.
* Then they are converted (wrapped in other streams) using the `execute*` functions,
* to get the whole pipeline running the query.
*/
BlockInputStreams
streams
;
/** When executing FULL or RIGHT JOIN, there will be a data stream from which you can read "not joined" rows.
* It has a special meaning, since reading from it should be done after reading from the main streams.
* It is appended to the main streams in UnionBlockInputStream or ParallelAggregatingBlockInputStream.
*/
BlockInputStreamPtr
stream_with_non_joined_data
;
bool
union_stream
=
false
;
/// Cache value of InterpreterSelectQuery::max_streams
size_t
max_threads
=
1
;
BlockInputStreamPtr
&
firstStream
()
{
return
streams
.
at
(
0
);
}
template
<
typename
Transform
>
void
transform
(
Transform
&&
transformation
)
{
for
(
auto
&
stream
:
streams
)
transformation
(
stream
);
if
(
stream_with_non_joined_data
)
transformation
(
stream_with_non_joined_data
);
}
bool
hasMoreThanOneStream
()
const
{
return
streams
.
size
()
+
(
stream_with_non_joined_data
?
1
:
0
)
>
1
;
}
/// Resulting stream is mix of other streams data. Distinct and/or order guaranties are broken.
bool
hasMixedStreams
()
const
{
return
hasMoreThanOneStream
()
||
union_stream
;
}
bool
hasDelayedStream
()
const
{
return
stream_with_non_joined_data
!=
nullptr
;
}
bool
initialized
()
const
{
return
!
streams
.
empty
();
}
/// Compatibility with QueryPipeline (Processors)
void
setMaxThreads
(
size_t
max_threads_
)
{
max_threads
=
max_threads_
;
}
size_t
getNumThreads
()
const
{
return
max_threads
;
}
};
template
<
typename
TPipeline
>
void
executeImpl
(
TPipeline
&
pipeline
,
const
BlockInputStreamPtr
&
prepared_input
,
std
::
optional
<
Pipe
>
prepared_pipe
,
QueryPipeline
&
save_context_and_storage
);
void
executeImpl
(
QueryPipeline
&
pipeline
,
const
BlockInputStreamPtr
&
prepared_input
,
std
::
optional
<
Pipe
>
prepared_pipe
);
/// Different stages of query execution.
/// dry_run - don't read from table, use empty header block instead.
void
executeWithMultipleStreamsImpl
(
Pipeline
&
pipeline
,
const
BlockInputStreamPtr
&
input
,
bool
dry_run
);
template
<
typename
TPipeline
>
void
executeFetchColumns
(
QueryProcessingStage
::
Enum
processing_stage
,
TPipeline
&
pipeline
,
void
executeFetchColumns
(
QueryProcessingStage
::
Enum
processing_stage
,
QueryPipeline
&
pipeline
,
const
PrewhereInfoPtr
&
prewhere_info
,
const
Names
&
columns_to_remove_after_prewhere
,
QueryPipeline
&
save_context_and_storage
);
void
executeWhere
(
Pipeline
&
pipeline
,
const
ExpressionActionsPtr
&
expression
,
bool
remove_filter
);
void
executeAggregation
(
Pipeline
&
pipeline
,
const
ExpressionActionsPtr
&
expression
,
bool
overflow_row
,
bool
final
);
void
executeMergeAggregated
(
Pipeline
&
pipeline
,
bool
overflow_row
,
bool
final
);
void
executeTotalsAndHaving
(
Pipeline
&
pipeline
,
bool
has_having
,
const
ExpressionActionsPtr
&
expression
,
bool
overflow_row
,
bool
final
);
void
executeHaving
(
Pipeline
&
pipeline
,
const
ExpressionActionsPtr
&
expression
);
static
void
executeExpression
(
Pipeline
&
pipeline
,
const
ExpressionActionsPtr
&
expression
);
void
executeOrder
(
Pipeline
&
pipeline
,
InputSortingInfoPtr
sorting_info
);
void
executeWithFill
(
Pipeline
&
pipeline
);
void
executeMergeSorted
(
Pipeline
&
pipeline
);
void
executePreLimit
(
Pipeline
&
pipeline
);
void
executeUnion
(
Pipeline
&
pipeline
,
Block
header
);
void
executeLimitBy
(
Pipeline
&
pipeline
);
void
executeLimit
(
Pipeline
&
pipeline
);
void
executeOffset
(
Pipeline
&
pipeline
);
static
void
executeProjection
(
Pipeline
&
pipeline
,
const
ExpressionActionsPtr
&
expression
);
void
executeDistinct
(
Pipeline
&
pipeline
,
bool
before_order
,
Names
columns
);
void
executeExtremes
(
Pipeline
&
pipeline
);
void
executeSubqueriesInSetsAndJoins
(
Pipeline
&
pipeline
,
const
std
::
unordered_map
<
String
,
SubqueryForSet
>
&
subqueries_for_sets
);
void
executeMergeSorted
(
Pipeline
&
pipeline
,
const
SortDescription
&
sort_description
,
UInt64
limit
);
const
Names
&
columns_to_remove_after_prewhere
);
void
executeWhere
(
QueryPipeline
&
pipeline
,
const
ExpressionActionsPtr
&
expression
,
bool
remove_filter
);
void
executeAggregation
(
QueryPipeline
&
pipeline
,
const
ExpressionActionsPtr
&
expression
,
bool
overflow_row
,
bool
final
);
...
...
@@ -213,17 +133,12 @@ private:
String
generateFilterActions
(
ExpressionActionsPtr
&
actions
,
const
ASTPtr
&
row_policy_filter
,
const
Names
&
prerequisite_columns
=
{})
const
;
/// Add ConvertingBlockInputStream to specified header.
static
void
unifyStreams
(
Pipeline
&
pipeline
,
Block
header
);
enum
class
Modificator
{
ROLLUP
=
0
,
CUBE
=
1
};
void
executeRollupOrCube
(
Pipeline
&
pipeline
,
Modificator
modificator
);
void
executeRollupOrCube
(
QueryPipeline
&
pipeline
,
Modificator
modificator
);
/** If there is a SETTINGS section in the SELECT query, then apply settings from it.
...
...
src/Interpreters/InterpreterSelectWithUnionQuery.cpp
浏览文件 @
1fa79598
...
...
@@ -3,15 +3,9 @@
#include <Interpreters/Context.h>
#include <Parsers/ASTSelectWithUnionQuery.h>
#include <Parsers/ASTSelectQuery.h>
#include <DataStreams/UnionBlockInputStream.h>
#include <DataStreams/NullBlockInputStream.h>
#include <DataStreams/ConcatBlockInputStream.h>
#include <DataStreams/ConvertingBlockInputStream.h>
#include <Columns/getLeastSuperColumn.h>
#include <Columns/ColumnConst.h>
#include <Common/typeid_cast.h>
#include <Parsers/queryToString.h>
#include <Parsers/ASTExpressionList.h>
#include <Processors/Sources/NullSource.h>
#include <Processors/QueryPipeline.h>
...
...
@@ -180,69 +174,10 @@ Block InterpreterSelectWithUnionQuery::getSampleBlock(
}
BlockInputStreams
InterpreterSelectWithUnionQuery
::
executeWithMultipleStreams
(
QueryPipeline
&
parent_pipeline
)
{
BlockInputStreams
nested_streams
;
for
(
auto
&
interpreter
:
nested_interpreters
)
{
BlockInputStreams
streams
=
interpreter
->
executeWithMultipleStreams
(
parent_pipeline
);
nested_streams
.
insert
(
nested_streams
.
end
(),
streams
.
begin
(),
streams
.
end
());
}
/// Unify data structure.
if
(
nested_interpreters
.
size
()
>
1
)
{
for
(
auto
&
stream
:
nested_streams
)
stream
=
std
::
make_shared
<
ConvertingBlockInputStream
>
(
stream
,
result_header
,
ConvertingBlockInputStream
::
MatchColumnsMode
::
Position
);
parent_pipeline
.
addInterpreterContext
(
context
);
}
/// Update max_streams due to:
/// - max_distributed_connections for Distributed() engine
/// - max_streams_to_max_threads_ratio
///
/// XXX: res.pipeline.getMaxThreads() cannot be used since it is capped to
/// number of streams, which is empty for non-Processors case.
max_streams
=
(
*
std
::
min_element
(
nested_interpreters
.
begin
(),
nested_interpreters
.
end
(),
[](
const
auto
&
a
,
const
auto
&
b
)
{
return
a
->
getMaxStreams
()
<
b
->
getMaxStreams
();
}))
->
getMaxStreams
();
return
nested_streams
;
}
BlockIO
InterpreterSelectWithUnionQuery
::
execute
()
{
BlockIO
res
;
BlockInputStreams
nested_streams
=
executeWithMultipleStreams
(
res
.
pipeline
);
BlockInputStreamPtr
result_stream
;
if
(
nested_streams
.
empty
())
{
result_stream
=
std
::
make_shared
<
NullBlockInputStream
>
(
getSampleBlock
());
}
else
if
(
nested_streams
.
size
()
==
1
)
{
result_stream
=
nested_streams
.
front
();
nested_streams
.
clear
();
}
else
{
result_stream
=
std
::
make_shared
<
UnionBlockInputStream
>
(
nested_streams
,
nullptr
,
max_streams
);
nested_streams
.
clear
();
}
res
.
in
=
result_stream
;
res
.
pipeline
.
addInterpreterContext
(
context
);
return
res
;
}
QueryPipeline
InterpreterSelectWithUnionQuery
::
executeWithProcessors
()
{
QueryPipeline
main_pipeline
;
QueryPipeline
&
main_pipeline
=
res
.
pipeline
;
std
::
vector
<
QueryPipeline
>
pipelines
;
bool
has_main_pipeline
=
false
;
...
...
@@ -254,12 +189,12 @@ QueryPipeline InterpreterSelectWithUnionQuery::executeWithProcessors()
if
(
!
has_main_pipeline
)
{
has_main_pipeline
=
true
;
main_pipeline
=
interpreter
->
execute
WithProcessors
()
;
main_pipeline
=
interpreter
->
execute
().
pipeline
;
headers
.
emplace_back
(
main_pipeline
.
getHeader
());
}
else
{
pipelines
.
emplace_back
(
interpreter
->
execute
WithProcessors
()
);
pipelines
.
emplace_back
(
interpreter
->
execute
().
pipeline
);
headers
.
emplace_back
(
pipelines
.
back
().
getHeader
());
}
}
...
...
@@ -280,7 +215,7 @@ QueryPipeline InterpreterSelectWithUnionQuery::executeWithProcessors()
main_pipeline
.
addInterpreterContext
(
context
);
return
main_pipeline
;
return
res
;
}
...
...
src/Interpreters/InterpreterSelectWithUnionQuery.h
浏览文件 @
1fa79598
...
...
@@ -29,12 +29,6 @@ public:
BlockIO
execute
()
override
;
/// Execute the query without union of streams.
BlockInputStreams
executeWithMultipleStreams
(
QueryPipeline
&
parent_pipeline
);
QueryPipeline
executeWithProcessors
()
override
;
bool
canExecuteWithProcessors
()
const
override
{
return
true
;
}
bool
ignoreLimits
()
const
override
{
return
options
.
ignore_limits
;
}
bool
ignoreQuota
()
const
override
{
return
options
.
ignore_quota
;
}
...
...
src/Interpreters/executeQuery.cpp
浏览文件 @
1fa79598
...
...
@@ -280,7 +280,6 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
/// Copy query into string. It will be written to log and presented in processlist. If an INSERT query, string will not include data to insertion.
String
query
(
begin
,
query_end
);
BlockIO
res
;
QueryPipeline
&
pipeline
=
res
.
pipeline
;
String
query_for_logging
;
...
...
@@ -338,7 +337,6 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
context
.
resetInputCallbacks
();
auto
interpreter
=
InterpreterFactory
::
get
(
ast
,
context
,
stage
);
bool
use_processors
=
interpreter
->
canExecuteWithProcessors
();
std
::
shared_ptr
<
const
EnabledQuota
>
quota
;
if
(
!
interpreter
->
ignoreQuota
())
...
...
@@ -358,10 +356,9 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
limits
.
size_limits
=
SizeLimits
(
settings
.
max_result_rows
,
settings
.
max_result_bytes
,
settings
.
result_overflow_mode
);
}
if
(
use_processors
)
pipeline
=
interpreter
->
executeWithProcessors
();
else
res
=
interpreter
->
execute
();
res
=
interpreter
->
execute
();
QueryPipeline
&
pipeline
=
res
.
pipeline
;
bool
use_processors
=
pipeline
.
initialized
();
if
(
res
.
pipeline
.
initialized
())
use_processors
=
true
;
...
...
src/Storages/IStorage.cpp
浏览文件 @
1fa79598
...
...
@@ -397,25 +397,6 @@ void IStorage::checkAlterIsPossible(const AlterCommands & commands, const Settin
}
}
BlockInputStreams
IStorage
::
readStreams
(
const
Names
&
column_names
,
const
SelectQueryInfo
&
query_info
,
const
Context
&
context
,
QueryProcessingStage
::
Enum
processed_stage
,
size_t
max_block_size
,
unsigned
num_streams
)
{
ForceTreeShapedPipeline
enable_tree_shape
(
query_info
);
auto
pipes
=
read
(
column_names
,
query_info
,
context
,
processed_stage
,
max_block_size
,
num_streams
);
BlockInputStreams
res
;
res
.
reserve
(
pipes
.
size
());
for
(
auto
&
pipe
:
pipes
)
res
.
emplace_back
(
std
::
make_shared
<
TreeExecutorBlockInputStream
>
(
std
::
move
(
pipe
)));
return
res
;
}
StorageID
IStorage
::
getStorageID
()
const
{
...
...
src/Storages/IStorage.h
浏览文件 @
1fa79598
...
...
@@ -303,16 +303,6 @@ public:
throw
Exception
(
"Method read is not supported by storage "
+
getName
(),
ErrorCodes
::
NOT_IMPLEMENTED
);
}
/** The same as read, but returns BlockInputStreams.
*/
BlockInputStreams
readStreams
(
const
Names
&
/*column_names*/
,
const
SelectQueryInfo
&
/*query_info*/
,
const
Context
&
/*context*/
,
QueryProcessingStage
::
Enum
/*processed_stage*/
,
size_t
/*max_block_size*/
,
unsigned
/*num_streams*/
);
/** Writes the data to a table.
* Receives a description of the query, which can contain information about the data write method.
* Returns an object by which you can write data sequentially.
...
...
src/Storages/SelectQueryInfo.h
浏览文件 @
1fa79598
...
...
@@ -80,28 +80,6 @@ struct SelectQueryInfo
/// Prepared sets are used for indices by storage engine.
/// Example: x IN (1, 2, 3)
PreparedSets
sets
;
/// Temporary flag is needed to support old pipeline with input streams.
/// If enabled, then pipeline returned by storage must be a tree.
/// Processors from the tree can't return ExpandPipeline status.
mutable
bool
force_tree_shaped_pipeline
=
false
;
};
/// RAII class to enable force_tree_shaped_pipeline for SelectQueryInfo.
/// Looks awful, but I hope it's temporary.
struct
ForceTreeShapedPipeline
{
explicit
ForceTreeShapedPipeline
(
const
SelectQueryInfo
&
info_
)
:
info
(
info_
)
{
force_tree_shaped_pipeline
=
info
.
force_tree_shaped_pipeline
;
info
.
force_tree_shaped_pipeline
=
true
;
}
~
ForceTreeShapedPipeline
()
{
info
.
force_tree_shaped_pipeline
=
force_tree_shaped_pipeline
;
}
private:
bool
force_tree_shaped_pipeline
;
const
SelectQueryInfo
&
info
;
};
}
src/Storages/StorageView.cpp
浏览文件 @
1fa79598
...
...
@@ -64,16 +64,8 @@ Pipes StorageView::read(
QueryPipeline
pipeline
;
InterpreterSelectWithUnionQuery
interpreter
(
current_inner_query
,
context
,
{},
column_names
);
/// FIXME res may implicitly use some objects owned be pipeline, but them will be destructed after return
if
(
query_info
.
force_tree_shaped_pipeline
)
{
BlockInputStreams
streams
=
interpreter
.
executeWithMultipleStreams
(
pipeline
);
for
(
auto
&
stream
:
streams
)
pipes
.
emplace_back
(
std
::
make_shared
<
SourceFromInputStream
>
(
std
::
move
(
stream
)));
}
else
/// TODO: support multiple streams here. Need more general interface than pipes.
pipes
.
emplace_back
(
interpreter
.
executeWithProcessors
().
getPipe
());
/// TODO: support multiple streams here. Need more general interface than pipes.
pipes
.
emplace_back
(
interpreter
.
execute
().
pipeline
.
getPipe
());
/// It's expected that the columns read from storage are not constant.
/// Because method 'getSampleBlockForColumns' is used to obtain a structure of result in InterpreterSelectQuery.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录