提交 2c02bfac 编写于 作者: N Nikolai Kochetov

Compact pipeline.

上级 f4e944cd
......@@ -130,6 +130,7 @@ struct QueryPipelineSettings
{
QueryPlan::ExplainPipelineOptions query_pipeline_options;
bool graph = false;
bool compact = true;
constexpr static char name[] = "PIPELINE";
......@@ -137,6 +138,7 @@ struct QueryPipelineSettings
{
{"header", query_pipeline_options.header},
{"graph", graph},
{"compact", compact},
};
};
......@@ -262,7 +264,10 @@ BlockInputStreamPtr InterpreterExplainQuery::executeImpl()
if (settings.graph)
{
printPipeline(pipeline->getProcessors(), buffer);
if (settings.compact)
printPipelineCompact(pipeline->getProcessors(), buffer, settings.query_pipeline_options.header);
else
printPipeline(pipeline->getProcessors(), buffer);
}
else
{
......
......@@ -312,8 +312,8 @@ private:
bool has_quota = false;
IQueryPlanStep * query_plan_step;
size_t query_plan_step_group;
IQueryPlanStep * query_plan_step = nullptr;
size_t query_plan_step_group = 0;
};
......
......@@ -879,12 +879,11 @@ QueryPipelineProcessorsCollector::~QueryPipelineProcessorsCollector()
Processors QueryPipelineProcessorsCollector::detachProcessors(size_t group)
{
Processors res;
res.swap(processors);
for (auto & processor : processors)
processor->setQueryPlanStep(step, group);
Processors res;
res.swap(processors);
return res;
}
......
#include <Processors/printPipeline.h>
#include <Processors/QueryPlan/IQueryPlanStep.h>
#include <set>
#include <map>
namespace DB
{
void printPipelineCompact(const Processors & processors, WriteBuffer & out, bool with_header)
{
struct Node;
struct Key
{
size_t group;
IQueryPlanStep * step;
std::string name;
auto getTuple() const { return std::forward_as_tuple(group, step, name); }
bool operator<(const Key & other) const
{
return getTuple() < other.getTuple();
}
};
struct EdgeData
{
Block header;
size_t count;
};
using Edge = std::vector<EdgeData>;
struct Node
{
size_t id = 0;
std::map<Node *, Edge> edges = {};
std::vector<const IProcessor *> agents = {};
};
std::map<Key, Node> graph;
auto get_key = [](const IProcessor & processor)
{
return Key{processor.getQueryPlanStepGroup(), processor.getQueryPlanStep(), processor.getName()};
};
for (const auto & processor : processors)
{
auto res = graph.emplace(get_key(*processor), Node());
res.first->second.agents.emplace_back(processor.get());
if (res.second)
res.first->second.id = graph.size();
}
Block empty_header;
for (const auto & processor : processors)
{
auto & from = graph[get_key(*processor)];
for (auto & port : processor->getOutputs())
{
if (!port.isConnected())
continue;
auto & to = graph[get_key(port.getInputPort().getProcessor())];
auto & edge = from.edges[&to];
const auto & header = with_header ? port.getHeader() : empty_header;
bool found = false;
for (auto & item : edge)
{
if (blocksHaveEqualStructure(header, item.header))
{
found = true;
++item.count;
break;
}
}
if (!found)
edge.emplace_back(EdgeData{header, 1});
}
}
std::map<IQueryPlanStep *, std::vector<const Node *>> steps_map;
for (const auto & item : graph)
steps_map[item.first.step].emplace_back(&item.second);
out << "digraph\n{\n";
out << " rankdir=\"LR\";\n";
out << " { node [shape = box]\n";
/// Nodes // TODO quoting and escaping
size_t next_step = 0;
for (const auto & item : steps_map)
{
if (item.first != nullptr)
{
out << " subgraph cluster_" << next_step << " {\n";
out << " label =\"" << item.first->getName() << "\";\n";
out << " style=filled;\n";
out << " color=lightgrey;\n";
out << " node [style=filled,color=white];\n";
out << " { rank = same;\n";
++next_step;
}
for (const auto & node : item.second)
{
const auto & processor = node->agents.front();
out << " n" << node->id << " [label=\"" << processor->getName();
if (node->agents.size() > 1)
out << " x " << node->agents.size();
const auto & description = processor->getDescription();
if (!description.empty())
out << ' ' << description;
out << "\"];\n";
}
if (item.first != nullptr)
{
out << " }\n";
out << " }\n";
}
}
out << " }\n";
/// Edges
for (const auto & item : graph)
{
for (const auto & edge : item.second.edges)
{
for (const auto & data : edge.second)
{
out << " n" << item.second.id << " -> " << "n" << edge.first->id << " [label=\"";
if (data.count > 1)
out << "x " << data.count;
if (with_header)
{
for (const auto & elem : data.header)
{
out << "\n";
elem.dumpStructure(out);
}
}
out << "\"];\n";
}
}
}
out << "}\n";
}
}
......@@ -15,6 +15,8 @@ template <typename Processors, typename Statuses>
void printPipeline(const Processors & processors, const Statuses & statuses, WriteBuffer & out)
{
out << "digraph\n{\n";
out << " rankdir=\"LR\";\n";
out << " { node [shape = box]\n";
auto get_proc_id = [](const IProcessor & proc) -> UInt64
{
......@@ -26,7 +28,7 @@ void printPipeline(const Processors & processors, const Statuses & statuses, Wri
/// Nodes // TODO quoting and escaping
for (const auto & processor : processors)
{
out << "n" << get_proc_id(*processor) << "[label=\"" << processor->getName() << processor->getDescription();
out << " n" << get_proc_id(*processor) << "[label=\"" << processor->getName() << processor->getDescription();
if (statuses_iter != statuses.end())
{
......@@ -37,6 +39,8 @@ void printPipeline(const Processors & processors, const Statuses & statuses, Wri
out << "\"];\n";
}
out << " }\n";
/// Edges
for (const auto & processor : processors)
{
......@@ -48,7 +52,7 @@ void printPipeline(const Processors & processors, const Statuses & statuses, Wri
const IProcessor & curr = *processor;
const IProcessor & next = port.getInputPort().getProcessor();
out << "n" << get_proc_id(curr) << " -> n" << get_proc_id(next) << ";\n";
out << " n" << get_proc_id(curr) << " -> n" << get_proc_id(next) << ";\n";
}
}
out << "}\n";
......@@ -60,4 +64,6 @@ void printPipeline(const Processors & processors, WriteBuffer & out)
printPipeline(processors, std::vector<IProcessor::Status>(), out);
}
void printPipelineCompact(const Processors & processors, WriteBuffer & out, bool with_header);
}
......@@ -105,6 +105,7 @@ SRCS(
OffsetTransform.cpp
Pipe.cpp
Port.cpp
printPipeline.cpp
QueryPipeline.cpp
ResizeProcessor.cpp
Sources/DelayedSource.cpp
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册