提交 397d0567 编写于 作者: S superjomn

update cpplint

test=develop
上级 4e0b25e3
...@@ -13,6 +13,10 @@ ...@@ -13,6 +13,10 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/lite/api/cxx_api.h" #include "paddle/fluid/lite/api/cxx_api.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK #ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
#include "paddle/fluid/platform/port.h" #include "paddle/fluid/platform/port.h"
#endif #endif
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "paddle/fluid/lite/api/cxx_api.h" #include "paddle/fluid/lite/api/cxx_api.h"
#include <gflags/gflags.h> #include <gflags/gflags.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <vector>
#include "paddle/fluid/lite/core/mir/passes.h" #include "paddle/fluid/lite/core/mir/passes.h"
#include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/core/op_registry.h"
......
...@@ -18,7 +18,9 @@ ...@@ -18,7 +18,9 @@
*/ */
#pragma once #pragma once
#include <memory>
#include <string> #include <string>
#include <utility>
#include <vector> #include <vector>
#include "paddle/fluid/lite/core/program.h" #include "paddle/fluid/lite/core/program.h"
#include "paddle/fluid/lite/core/types.h" #include "paddle/fluid/lite/core/types.h"
......
cc_library(lite_gtest_main SRCS lite_gtest_main.cc DEPS gtest) cc_library(lite_gtest_main SRCS lite_gtest_main.cc DEPS gtest)
cc_library(memory_lite SRCS memory.cc DEPS target_wrapper_lite target_wrapper_host) cc_library(memory_lite SRCS memory.cc DEPS target_wrapper_lite target_wrapper_host)
cc_library(target_wrapper_lite SRCS target_wrapper.cc) cc_library(target_wrapper_lite SRCS target_wrapper.cc)
cc_library(lite_tensor SRCS lite_tensor.cc DEPS memory_lite target_wrapper_lite) cc_library(lite_tensor SRCS lite_tensor.cc DEPS memory_lite target_wrapper_lite)
if (NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) if (NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
cc_library(hvy_tensor SRCS hvy_tensor.cc DEPS lod_tensor) cc_library(hvy_tensor SRCS hvy_tensor.cc DEPS lod_tensor)
...@@ -24,7 +23,6 @@ cc_library(op_lite SRCS op_lite.cc DEPS scope_lite op_registry_lite compatible_p ...@@ -24,7 +23,6 @@ cc_library(op_lite SRCS op_lite.cc DEPS scope_lite op_registry_lite compatible_p
cc_library(types_lite SRCS types.cc) cc_library(types_lite SRCS types.cc)
cc_library(type_system SRCS type_system.cc DEPS ${tensor_lite} target_wrapper_lite) cc_library(type_system SRCS type_system.cc DEPS ${tensor_lite} target_wrapper_lite)
#cc_library(kernel_executor_lite SRCS kernel_executor.cc DEPS mir_ssa_graph kernel_lite)
cc_library(program_lite SRCS program.cc DEPS op_lite kernel_lite) cc_library(program_lite SRCS program.cc DEPS op_lite kernel_lite)
cc_library(optimizer_lite SRCS optimizer.cc DEPS mir_pass_manager model_parser_lite program_lite) cc_library(optimizer_lite SRCS optimizer.cc DEPS mir_pass_manager model_parser_lite program_lite)
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "paddle/fluid/lite/cuda/cuda_utils.h" #include "paddle/fluid/lite/cuda/cuda_utils.h"
#endif #endif
#include <memory> #include <memory>
#include <set>
#include <vector> #include <vector>
#include "paddle/fluid/lite/core/target_wrapper.h" #include "paddle/fluid/lite/core/target_wrapper.h"
......
...@@ -62,5 +62,6 @@ std::ostream &operator<<(std::ostream &os, ...@@ -62,5 +62,6 @@ std::ostream &operator<<(std::ostream &os,
<< other.place.DebugString(); << other.place.DebugString();
return os; return os;
} }
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
\ No newline at end of file
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <set> #include <set>
#include <sstream> #include <sstream>
#include <string> #include <string>
#include <utility>
#include <vector> #include <vector>
#include "paddle/fluid/lite/core/context.h" #include "paddle/fluid/lite/core/context.h"
#include "paddle/fluid/lite/core/target_wrapper.h" #include "paddle/fluid/lite/core/target_wrapper.h"
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/lite/core/kernel_executor.h"
namespace paddle {
namespace lite {} // namespace lite
} // namespace paddle
\ No newline at end of file
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/lite/core/mir/ssa_graph.h"
namespace paddle {
namespace lite {
/*
* KernelExecutor executes a list of kernels.
*/
class KernelExecutorBase {
public:
KernelExecutorBase(std::unique_ptr<mir::Program>&& program);
// Prepare runtime context.
void PrepareWorkspace();
void Run();
private:
lite::Scope* scope_{};
lite::Scope* exec_scope_{};
};
/*
* KernelExecutor executes the kernels without concurrency, works in X86 place.
*/
class SerialKernelExecutor : public KernelExecutorBase {};
/*
* KernelExecutor executes the kernels with CUDA like stream parallel support,
* works in CUDA like devices.
*/
class StreamKernelExecutor : public KernelExecutorBase {};
} // namespace lite
} // namespace paddle
...@@ -29,7 +29,7 @@ class DDimLite : public DDimBase<DDimLite> { ...@@ -29,7 +29,7 @@ class DDimLite : public DDimBase<DDimLite> {
public: public:
DDimLite() = default; DDimLite() = default;
DDimLite(const std::vector<value_type> &x) : DDimBase<DDimLite>() { explicit DDimLite(const std::vector<value_type> &x) : DDimBase<DDimLite>() {
ConstructFrom(x); ConstructFrom(x);
} }
......
...@@ -21,7 +21,7 @@ namespace mir { ...@@ -21,7 +21,7 @@ namespace mir {
class ArgumentTypeDisplayPass : public DebugPass { class ArgumentTypeDisplayPass : public DebugPass {
public: public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override { void Apply(const std::unique_ptr<SSAGraph>& graph) override {
LOG(INFO) << "== Argument types =="; LOG(INFO) << "== Argument types ==";
for (auto& node : graph->mutable_nodes()) { for (auto& node : graph->mutable_nodes()) {
if (!node.IsArg()) continue; if (!node.IsArg()) continue;
......
...@@ -21,7 +21,7 @@ namespace mir { ...@@ -21,7 +21,7 @@ namespace mir {
class DemoPass : public mir::DebugPass { class DemoPass : public mir::DebugPass {
public: public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override {} void Apply(const std::unique_ptr<SSAGraph> &graph) override {}
}; };
/* /*
......
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/lite/core/mir/generate_program_pass.h" #include "paddle/fluid/lite/core/mir/generate_program_pass.h"
#include <memory>
#include <utility>
#include <vector>
#include "paddle/fluid/lite/core/mir/graph_visualize_pass.h" #include "paddle/fluid/lite/core/mir/graph_visualize_pass.h"
#include "paddle/fluid/lite/core/mir/pass_registry.h" #include "paddle/fluid/lite/core/mir/pass_registry.h"
...@@ -20,7 +23,7 @@ namespace paddle { ...@@ -20,7 +23,7 @@ namespace paddle {
namespace lite { namespace lite {
namespace mir { namespace mir {
void GenerateProgramPass::Apply(std::unique_ptr<mir::SSAGraph>& graph) { void GenerateProgramPass::Apply(const std::unique_ptr<SSAGraph>& graph) {
LOG(INFO) << "final program \n" << Visualize(graph.get()); LOG(INFO) << "final program \n" << Visualize(graph.get());
for (auto& item : graph->StmtTopologicalOrder()) { for (auto& item : graph->StmtTopologicalOrder()) {
if (item->IsStmt()) { if (item->IsStmt()) {
......
...@@ -28,7 +28,7 @@ namespace mir { ...@@ -28,7 +28,7 @@ namespace mir {
*/ */
class GenerateProgramPass : public ProgramPass { class GenerateProgramPass : public ProgramPass {
public: public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override; void Apply(const std::unique_ptr<SSAGraph> &graph) override;
std::unique_ptr<RuntimeProgram> GenProgram() { std::unique_ptr<RuntimeProgram> GenProgram() {
LOG(INFO) << "insts.size " << insts_.size(); LOG(INFO) << "insts.size " << insts_.size();
......
...@@ -13,7 +13,9 @@ ...@@ -13,7 +13,9 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/lite/core/mir/graph_visualize_pass.h" #include "paddle/fluid/lite/core/mir/graph_visualize_pass.h"
#include <memory>
#include <set> #include <set>
#include <string>
#include "paddle/fluid/lite/core/mir/pass_registry.h" #include "paddle/fluid/lite/core/mir/pass_registry.h"
namespace paddle { namespace paddle {
...@@ -22,7 +24,7 @@ namespace mir { ...@@ -22,7 +24,7 @@ namespace mir {
using inference::analysis::Dot; using inference::analysis::Dot;
void GraphVisualizePass::Apply(std::unique_ptr<mir::SSAGraph>& graph) { void GraphVisualizePass::Apply(const std::unique_ptr<SSAGraph>& graph) {
Visualize(graph.get()); Visualize(graph.get());
} }
......
...@@ -27,7 +27,7 @@ namespace mir { ...@@ -27,7 +27,7 @@ namespace mir {
*/ */
class GraphVisualizePass : public DebugPass { class GraphVisualizePass : public DebugPass {
public: public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override; void Apply(const std::unique_ptr<SSAGraph>& graph) override;
}; };
std::string Visualize(mir::SSAGraph* graph); std::string Visualize(mir::SSAGraph* graph);
......
...@@ -21,7 +21,7 @@ namespace mir { ...@@ -21,7 +21,7 @@ namespace mir {
class IoCopyKernelPickPass : public StmtPass { class IoCopyKernelPickPass : public StmtPass {
public: public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override { void Apply(const std::unique_ptr<SSAGraph>& graph) override {
for (auto& node : graph->mutable_nodes()) { for (auto& node : graph->mutable_nodes()) {
if (!node.IsStmt()) continue; if (!node.IsStmt()) continue;
auto& inst = node.AsStmt(); auto& inst = node.AsStmt();
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <list> #include <list>
#include <memory> #include <memory>
#include <string> #include <string>
#include <utility>
#include <vector> #include <vector>
#include "paddle/fluid/lite/core/kernel.h" #include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/op_lite.h" #include "paddle/fluid/lite/core/op_lite.h"
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <memory>
#include <string>
#include "paddle/fluid/lite/core/mir/node.h" #include "paddle/fluid/lite/core/mir/node.h"
#include "paddle/fluid/lite/core/mir/ssa_graph.h" #include "paddle/fluid/lite/core/mir/ssa_graph.h"
...@@ -32,9 +34,9 @@ class Pass { ...@@ -32,9 +34,9 @@ class Pass {
kDebug, kDebug,
}; };
Pass(Kind kind) : kind_(kind) {} explicit Pass(Kind kind) : kind_(kind) {}
virtual void Apply(std::unique_ptr<mir::SSAGraph>& graph) = 0; virtual void Apply(const std::unique_ptr<SSAGraph>& graph) = 0;
void set_name(const std::string& name) { name_ = name; } void set_name(const std::string& name) { name_ = name; }
const std::string& name() const { return name_; } const std::string& name() const { return name_; }
......
...@@ -32,7 +32,7 @@ class PassManager { ...@@ -32,7 +32,7 @@ class PassManager {
PassManager(); PassManager();
void Run(std::unique_ptr<SSAGraph>& graph) { void Run(const std::unique_ptr<SSAGraph>& graph) {
for (auto& pass : passes_) { for (auto& pass : passes_) {
LOG(INFO) << "Running MIR pass " << pass->name(); LOG(INFO) << "Running MIR pass " << pass->name();
pass->Apply(graph); pass->Apply(graph);
......
...@@ -27,7 +27,7 @@ class RuntimeContextAssignPass : public StmtPass { ...@@ -27,7 +27,7 @@ class RuntimeContextAssignPass : public StmtPass {
#endif #endif
} }
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override { void Apply(const std::unique_ptr<SSAGraph>& graph) override {
for (auto& node : graph->mutable_nodes()) { for (auto& node : graph->mutable_nodes()) {
if (!node.IsStmt()) continue; if (!node.IsStmt()) continue;
......
...@@ -13,6 +13,10 @@ ...@@ -13,6 +13,10 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/lite/core/mir/ssa_graph.h" #include "paddle/fluid/lite/core/mir/ssa_graph.h"
#include <algorithm>
#include <memory>
#include <set>
#include <utility>
namespace paddle { namespace paddle {
namespace lite { namespace lite {
......
...@@ -27,7 +27,7 @@ bool KernelScoreCmp(const std::pair<size_t, std::unique_ptr<KernelBase>>& a, ...@@ -27,7 +27,7 @@ bool KernelScoreCmp(const std::pair<size_t, std::unique_ptr<KernelBase>>& a,
return a.first > b.first; return a.first > b.first;
} }
void StaticKernelPickPass::Apply(std::unique_ptr<mir::SSAGraph>& graph) { void StaticKernelPickPass::Apply(const std::unique_ptr<SSAGraph>& graph) {
CHECK(kernel_pick_factors_.AnyFactorConsidered()) CHECK(kernel_pick_factors_.AnyFactorConsidered())
<< "kernel_pick_factors should be specified first"; << "kernel_pick_factors should be specified first";
CHECK(graph) << "graph not valid"; CHECK(graph) << "graph not valid";
......
...@@ -35,7 +35,7 @@ namespace mir { ...@@ -35,7 +35,7 @@ namespace mir {
*/ */
class StaticKernelPickPass : public mir::StmtPass { class StaticKernelPickPass : public mir::StmtPass {
public: public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override; void Apply(const std::unique_ptr<SSAGraph>& graph) override;
void SetPreferPlace(const Place& place) { place_ = place; } void SetPreferPlace(const Place& place) { place_ = place; }
const Place& place() const { return place_; } const Place& place() const { return place_; }
......
...@@ -14,7 +14,9 @@ ...@@ -14,7 +14,9 @@
#include "paddle/fluid/lite/core/mir/type_target_transform_pass.h" #include "paddle/fluid/lite/core/mir/type_target_transform_pass.h"
#include <list> #include <list>
#include <memory>
#include <string> #include <string>
#include <utility>
#include <vector> #include <vector>
#include "paddle/fluid/lite/core/mir/graph_visualize_pass.h" #include "paddle/fluid/lite/core/mir/graph_visualize_pass.h"
#include "paddle/fluid/lite/core/mir/pass_registry.h" #include "paddle/fluid/lite/core/mir/pass_registry.h"
...@@ -23,7 +25,7 @@ namespace paddle { ...@@ -23,7 +25,7 @@ namespace paddle {
namespace lite { namespace lite {
namespace mir { namespace mir {
void TypeTargetTransformPass::Apply(std::unique_ptr<mir::SSAGraph>& graph) { void TypeTargetTransformPass::Apply(const std::unique_ptr<SSAGraph>& graph) {
// Start from inputs of the graph, those should have place set. // Start from inputs of the graph, those should have place set.
std::list<Node*> nodes; std::list<Node*> nodes;
for (auto& node : graph->mutable_nodes()) { for (auto& node : graph->mutable_nodes()) {
......
...@@ -40,7 +40,7 @@ static void UpdateInputTo(framework::proto::OpDesc* desc, ...@@ -40,7 +40,7 @@ static void UpdateInputTo(framework::proto::OpDesc* desc,
*/ */
class TypeTargetTransformPass : public ProgramPass { class TypeTargetTransformPass : public ProgramPass {
public: public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override; void Apply(const std::unique_ptr<SSAGraph>& graph) override;
void ComplementInputs(SSAGraph* graph, Node* inst_node, Node* in); void ComplementInputs(SSAGraph* graph, Node* inst_node, Node* in);
......
...@@ -13,13 +13,14 @@ ...@@ -13,13 +13,14 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/lite/core/mir/variable_place_inference_pass.h" #include "paddle/fluid/lite/core/mir/variable_place_inference_pass.h"
#include <memory>
#include "paddle/fluid/lite/core/mir/pass_registry.h" #include "paddle/fluid/lite/core/mir/pass_registry.h"
namespace paddle { namespace paddle {
namespace lite { namespace lite {
namespace mir { namespace mir {
void VariablePlaceInferencePass::Apply(std::unique_ptr<mir::SSAGraph>& graph) { void VariablePlaceInferencePass::Apply(const std::unique_ptr<SSAGraph> &graph) {
MarkInputPlace(graph.get()); MarkInputPlace(graph.get());
InferenceArgumentPlace(graph.get()); InferenceArgumentPlace(graph.get());
CheckAllArgumentTypeDetermined(graph.get()); CheckAllArgumentTypeDetermined(graph.get());
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <memory>
#include "paddle/fluid/lite/core/mir/pass.h" #include "paddle/fluid/lite/core/mir/pass.h"
#include "paddle/fluid/lite/core/target_wrapper.h" #include "paddle/fluid/lite/core/target_wrapper.h"
...@@ -26,7 +27,7 @@ namespace mir { ...@@ -26,7 +27,7 @@ namespace mir {
*/ */
class VariablePlaceInferencePass : public DebugPass { class VariablePlaceInferencePass : public DebugPass {
public: public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override; void Apply(const std::unique_ptr<SSAGraph>& graph) override;
private: private:
// Mark the place of input arguments. // Mark the place of input arguments.
......
...@@ -13,7 +13,10 @@ ...@@ -13,7 +13,10 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/lite/core/op_lite.h" #include "paddle/fluid/lite/core/op_lite.h"
#include "op_lite.h" #include <list>
#include <set>
#include <utility>
#include <vector>
#include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/core/op_registry.h"
namespace paddle { namespace paddle {
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/core/op_registry.h"
#include <list>
#include <set>
namespace paddle { namespace paddle {
namespace lite { namespace lite {
...@@ -90,4 +92,4 @@ KernelRegistry &KernelRegistry::Global() { ...@@ -90,4 +92,4 @@ KernelRegistry &KernelRegistry::Global() {
} }
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
\ No newline at end of file
...@@ -14,8 +14,10 @@ ...@@ -14,8 +14,10 @@
#pragma once #pragma once
#include <memory> #include <memory>
#include <set>
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <utility>
#include "paddle/fluid/lite/core/kernel.h" #include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/op_lite.h" #include "paddle/fluid/lite/core/op_lite.h"
#include "paddle/fluid/lite/core/target_wrapper.h" #include "paddle/fluid/lite/core/target_wrapper.h"
...@@ -43,7 +45,7 @@ class LiteOpRegistry final : public Factory<OpLite, std::shared_ptr<OpLite>> { ...@@ -43,7 +45,7 @@ class LiteOpRegistry final : public Factory<OpLite, std::shared_ptr<OpLite>> {
template <typename OpClass> template <typename OpClass>
class OpLiteRegistor : public Registor<OpClass> { class OpLiteRegistor : public Registor<OpClass> {
public: public:
OpLiteRegistor(const std::string &op_type) explicit OpLiteRegistor(const std::string &op_type)
: Registor<OpClass>([&] { : Registor<OpClass>([&] {
LiteOpRegistry::Global().Register( LiteOpRegistry::Global().Register(
op_type, [op_type]() -> std::unique_ptr<OpLite> { op_type, [op_type]() -> std::unique_ptr<OpLite> {
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/lite/core/mir/generate_program_pass.h" #include "paddle/fluid/lite/core/mir/generate_program_pass.h"
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#include "paddle/fluid/lite/core/optimizer.h" #include "paddle/fluid/lite/core/optimizer.h"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <memory>
#include <utility>
#include "paddle/fluid/lite/core/mir/generate_program_pass.h" #include "paddle/fluid/lite/core/mir/generate_program_pass.h"
#include "paddle/fluid/lite/core/mir/pass_manager.h" #include "paddle/fluid/lite/core/mir/pass_manager.h"
#include "paddle/fluid/lite/core/mir/passes.h" #include "paddle/fluid/lite/core/mir/passes.h"
......
...@@ -14,10 +14,11 @@ ...@@ -14,10 +14,11 @@
#pragma once #pragma once
#include <list> #include <list>
#include <memory>
#include <string> #include <string>
#include <utility>
#include <vector> #include <vector>
#include "paddle/fluid/lite/core/kernel.h" #include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/mir/node.h" #include "paddle/fluid/lite/core/mir/node.h"
#include "paddle/fluid/lite/core/op_lite.h" #include "paddle/fluid/lite/core/op_lite.h"
#include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/core/op_registry.h"
...@@ -25,7 +26,7 @@ ...@@ -25,7 +26,7 @@
namespace paddle { namespace paddle {
namespace lite { namespace lite {
static const std::string kKernelTypeAttr = "__@kernel_type_attr@__"; static const char kKernelTypeAttr[] = "__@kernel_type_attr@__";
// A program is used to represent a code program, in Paddle, a code program // A program is used to represent a code program, in Paddle, a code program
// contains: // contains:
......
...@@ -13,7 +13,11 @@ ...@@ -13,7 +13,11 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <memory>
#include <set>
#include <string> #include <string>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/lite/core/mir/ssa_graph.h" #include "paddle/fluid/lite/core/mir/ssa_graph.h"
#include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/core/op_registry.h"
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/lite/core/target_wrapper.h" #include "paddle/fluid/lite/core/target_wrapper.h"
#include <string>
#include "paddle/fluid/lite/utils/all.h" #include "paddle/fluid/lite/utils/all.h"
namespace paddle { namespace paddle {
...@@ -43,4 +44,4 @@ std::string Place::DebugString() const { ...@@ -43,4 +44,4 @@ std::string Place::DebugString() const {
} }
} // namespace lite } // namespace lite
} // namespace paddle } // namespace paddle
\ No newline at end of file
...@@ -82,11 +82,11 @@ struct Place { ...@@ -82,11 +82,11 @@ struct Place {
TargetType target{TARGET(kUnk)}; TargetType target{TARGET(kUnk)};
PrecisionType precision{PRECISION(kUnk)}; PrecisionType precision{PRECISION(kUnk)};
DataLayoutType layout{DATALAYOUT(kUnk)}; DataLayoutType layout{DATALAYOUT(kUnk)};
short device{0}; // device ID int16_t device{0}; // device ID
Place() = default; Place() = default;
Place(TargetType target, PrecisionType precision, Place(TargetType target, PrecisionType precision,
DataLayoutType layout = DATALAYOUT(kNCHW), short device = 0) DataLayoutType layout = DATALAYOUT(kNCHW), int16_t device = 0)
: target(target), precision(precision), layout(layout), device(device) {} : target(target), precision(precision), layout(layout), device(device) {}
bool is_valid() const { bool is_valid() const {
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <set>
#include <string>
#include "paddle/fluid/lite/core/compatible_tensor.h" #include "paddle/fluid/lite/core/compatible_tensor.h"
#include "paddle/fluid/lite/utils/all.h" #include "paddle/fluid/lite/utils/all.h"
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#pragma once
#include <cuda.h> #include <cuda.h>
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include "paddle/fluid/lite/core/target_wrapper.h" #include "paddle/fluid/lite/core/target_wrapper.h"
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <algorithm>
#include "paddle/fluid/lite/core/kernel.h" #include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/op_registry.h" #include "paddle/fluid/lite/core/op_registry.h"
......
...@@ -13,7 +13,9 @@ ...@@ -13,7 +13,9 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/lite/model_parser/model_parser.h" #include "paddle/fluid/lite/model_parser/model_parser.h"
#include <algorithm>
#include <fstream> #include <fstream>
#include <limits>
#include "paddle/fluid/lite/core/compatible_tensor.h" #include "paddle/fluid/lite/core/compatible_tensor.h"
#include "paddle/fluid/lite/core/scope.h" #include "paddle/fluid/lite/core/scope.h"
#include "paddle/fluid/lite/core/variable.h" #include "paddle/fluid/lite/core/variable.h"
...@@ -218,8 +220,8 @@ void TensorToStream(std::ostream &os, const lite::Tensor &tensor) { ...@@ -218,8 +220,8 @@ void TensorToStream(std::ostream &os, const lite::Tensor &tensor) {
tensor.data_size(), IoDirection::DtoH); tensor.data_size(), IoDirection::DtoH);
os.write(static_cast<const char *>(tmp_buffer.get()), os.write(static_cast<const char *>(tmp_buffer.get()),
static_cast<std::streamsize>(size)); static_cast<std::streamsize>(size));
} else } else // NOLINT
#endif // LITE_WITH_CUDA #endif // LITE_WITH_CUDA
{ {
os.write(static_cast<const char *>(tensor.data<void>()), os.write(static_cast<const char *>(tensor.data<void>()),
static_cast<std::streamsize>(size)); static_cast<std::streamsize>(size));
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
// This file contains model format related operations, such as load a model, // This file contains model format related operations, such as load a model,
// parse an operator definitions and so on. // parse an operator definitions and so on.
#pragma once
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
......
...@@ -11,3 +11,5 @@ ...@@ -11,3 +11,5 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/fluid/lite/model_parser/pb/block_desc.h"
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
*/ */
#include <algorithm> #include <algorithm>
#include <map>
#include <set>
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
...@@ -44,7 +46,7 @@ class OpDesc { ...@@ -44,7 +46,7 @@ class OpDesc {
public: public:
OpDesc() {} OpDesc() {}
OpDesc(const framework::proto::OpDesc &desc) : desc_(desc) {} explicit OpDesc(const framework::proto::OpDesc &desc) : desc_(desc) {}
void CopyFrom(const OpDesc &op_desc) { desc_ = op_desc.ReadonlyProto(); } void CopyFrom(const OpDesc &op_desc) { desc_ = op_desc.ReadonlyProto(); }
...@@ -127,13 +129,13 @@ class OpDesc { ...@@ -127,13 +129,13 @@ class OpDesc {
} }
size_t hash = typeid(T).hash_code(); size_t hash = typeid(T).hash_code();
if (hash == typeid(int).hash_code()) { if (hash == typeid(int).hash_code()) { // NOLINT
it->set_type(framework::proto::INT); it->set_type(framework::proto::INT);
it->set_i(v); it->set_i(v);
} else if (hash == typeid(float).hash_code()) { } else if (hash == typeid(float).hash_code()) { // NOLINT
it->set_type(framework::proto::FLOAT); it->set_type(framework::proto::FLOAT);
it->set_f(v); it->set_f(v);
} else if (hash == typeid(bool).hash_code()) { } else if (hash == typeid(bool).hash_code()) { // NOLINT
it->set_type(framework::proto::BOOLEAN); it->set_type(framework::proto::BOOLEAN);
it->set_b(v); it->set_b(v);
} else { } else {
......
...@@ -11,3 +11,5 @@ ...@@ -11,3 +11,5 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/fluid/lite/model_parser/pb/program_desc.h"
...@@ -11,3 +11,5 @@ ...@@ -11,3 +11,5 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#pragma once
...@@ -18,7 +18,7 @@ namespace paddle { ...@@ -18,7 +18,7 @@ namespace paddle {
namespace lite { namespace lite {
namespace pb { namespace pb {
using namespace framework; using namespace framework; // NOLINT
proto::VarType::Type VarDesc::GetType() const { return desc_.type().type(); } proto::VarType::Type VarDesc::GetType() const { return desc_.type().type(); }
......
...@@ -119,5 +119,5 @@ class VarDesc { ...@@ -119,5 +119,5 @@ class VarDesc {
}; };
} // namespace pb } // namespace pb
} // namespace framework } // namespace lite
} // namespace paddle } // namespace paddle
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <map>
#include <string>
#include <vector>
#include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/lite/utils/all.h" #include "paddle/fluid/lite/utils/all.h"
...@@ -44,7 +47,7 @@ class LoDTensorDesc { ...@@ -44,7 +47,7 @@ class LoDTensorDesc {
TensorDesc tensor; TensorDesc tensor;
int lod_level{-1}; int lod_level{-1};
LoDTensorDesc(const framework::proto::VarType_LoDTensorDesc& proto) { explicit LoDTensorDesc(const framework::proto::VarType_LoDTensorDesc& proto) {
Parse(proto); Parse(proto);
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/lite/core/op_lite.h" #include "paddle/fluid/lite/core/op_lite.h"
...@@ -25,7 +26,7 @@ namespace operators { ...@@ -25,7 +26,7 @@ namespace operators {
class ReluOp : public OpLite { class ReluOp : public OpLite {
public: public:
ReluOp() {} ReluOp() {}
ReluOp(const std::string &op_type) : OpLite(op_type) {} explicit ReluOp(const std::string &op_type) : OpLite(op_type) {}
bool CheckShape() const override; bool CheckShape() const override;
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#pragma once
#ifdef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK #ifdef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
#include "paddle/fluid/lite/utils/logging.h" #include "paddle/fluid/lite/utils/logging.h"
#else // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK #else // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
......
...@@ -17,7 +17,9 @@ ...@@ -17,7 +17,9 @@
#include <list> #include <list>
#include <memory> #include <memory>
#include <sstream> #include <sstream>
#include <string>
#include <unordered_map> #include <unordered_map>
#include <utility>
#include "paddle/fluid/lite/utils/cp_logging.h" #include "paddle/fluid/lite/utils/cp_logging.h"
namespace paddle { namespace paddle {
...@@ -83,7 +85,7 @@ class Factory { ...@@ -83,7 +85,7 @@ class Factory {
template <typename Type> template <typename Type>
class Registor { class Registor {
public: public:
Registor(std::function<void()>&& functor) { functor(); } explicit Registor(std::function<void()>&& functor) { functor(); }
// Touch will do nothing. // Touch will do nothing.
int Touch() { return 0; } int Touch() { return 0; }
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#define VLOG(level) LOG_INFO.stream() #define VLOG(level) LOG_INFO.stream()
// CHECK() // CHECK()
// NOLINT
#define CHECK(x) \ #define CHECK(x) \
if (!(x)) \ if (!(x)) \
paddle::lite::LogMessageFatal(__FILE__, __LINE__).stream() \ paddle::lite::LogMessageFatal(__FILE__, __LINE__).stream() \
......
...@@ -13,10 +13,12 @@ ...@@ -13,10 +13,12 @@
// limitations under the License. // limitations under the License.
#pragma once #pragma once
#include <algorithm>
#include <exception> #include <exception>
#include <memory> #include <memory>
#include <type_traits> #include <type_traits>
#include <typeinfo> #include <typeinfo>
#include <utility>
#include "paddle/fluid/lite/utils/cp_logging.h" #include "paddle/fluid/lite/utils/cp_logging.h"
// This is an equivalent implementation of boost::any. We implement this to // This is an equivalent implementation of boost::any. We implement this to
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "paddle/fluid/lite/core/target_wrapper.h" #include "paddle/fluid/lite/core/target_wrapper.h"
#include <algorithm> #include <algorithm>
#include "paddle/fluid/lite/utils/all.h" #include "paddle/fluid/lite/utils/all.h"
#include "paddle/fluid/lite/x86/target_wrapper.h"
namespace paddle { namespace paddle {
namespace lite { namespace lite {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册