提交 397d0567 编写于 作者: S superjomn

update cpplint

test=develop
上级 4e0b25e3
......@@ -13,6 +13,10 @@
// limitations under the License.
#include "paddle/fluid/lite/api/cxx_api.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
#include "paddle/fluid/platform/port.h"
#endif
......
......@@ -15,6 +15,7 @@
#include "paddle/fluid/lite/api/cxx_api.h"
#include <gflags/gflags.h>
#include <gtest/gtest.h>
#include <vector>
#include "paddle/fluid/lite/core/mir/passes.h"
#include "paddle/fluid/lite/core/op_registry.h"
......
......@@ -18,7 +18,9 @@
*/
#pragma once
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "paddle/fluid/lite/core/program.h"
#include "paddle/fluid/lite/core/types.h"
......
cc_library(lite_gtest_main SRCS lite_gtest_main.cc DEPS gtest)
cc_library(memory_lite SRCS memory.cc DEPS target_wrapper_lite target_wrapper_host)
cc_library(target_wrapper_lite SRCS target_wrapper.cc)
cc_library(lite_tensor SRCS lite_tensor.cc DEPS memory_lite target_wrapper_lite)
if (NOT LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
cc_library(hvy_tensor SRCS hvy_tensor.cc DEPS lod_tensor)
......@@ -24,7 +23,6 @@ cc_library(op_lite SRCS op_lite.cc DEPS scope_lite op_registry_lite compatible_p
cc_library(types_lite SRCS types.cc)
cc_library(type_system SRCS type_system.cc DEPS ${tensor_lite} target_wrapper_lite)
#cc_library(kernel_executor_lite SRCS kernel_executor.cc DEPS mir_ssa_graph kernel_lite)
cc_library(program_lite SRCS program.cc DEPS op_lite kernel_lite)
cc_library(optimizer_lite SRCS optimizer.cc DEPS mir_pass_manager model_parser_lite program_lite)
......
......@@ -20,6 +20,7 @@
#include "paddle/fluid/lite/cuda/cuda_utils.h"
#endif
#include <memory>
#include <set>
#include <vector>
#include "paddle/fluid/lite/core/target_wrapper.h"
......
......@@ -62,5 +62,6 @@ std::ostream &operator<<(std::ostream &os,
<< other.place.DebugString();
return os;
}
} // namespace lite
} // namespace paddle
\ No newline at end of file
} // namespace paddle
......@@ -19,6 +19,7 @@
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "paddle/fluid/lite/core/context.h"
#include "paddle/fluid/lite/core/target_wrapper.h"
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/lite/core/kernel_executor.h"
namespace paddle {
namespace lite {} // namespace lite
} // namespace paddle
\ No newline at end of file
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/lite/core/mir/ssa_graph.h"
namespace paddle {
namespace lite {
/*
* KernelExecutor executes a list of kernels.
*/
class KernelExecutorBase {
public:
KernelExecutorBase(std::unique_ptr<mir::Program>&& program);
// Prepare runtime context.
void PrepareWorkspace();
void Run();
private:
lite::Scope* scope_{};
lite::Scope* exec_scope_{};
};
/*
* KernelExecutor executes the kernels without concurrency, works in X86 place.
*/
class SerialKernelExecutor : public KernelExecutorBase {};
/*
* KernelExecutor executes the kernels with CUDA like stream parallel support,
* works in CUDA like devices.
*/
class StreamKernelExecutor : public KernelExecutorBase {};
} // namespace lite
} // namespace paddle
......@@ -29,7 +29,7 @@ class DDimLite : public DDimBase<DDimLite> {
public:
DDimLite() = default;
DDimLite(const std::vector<value_type> &x) : DDimBase<DDimLite>() {
explicit DDimLite(const std::vector<value_type> &x) : DDimBase<DDimLite>() {
ConstructFrom(x);
}
......
......@@ -21,7 +21,7 @@ namespace mir {
class ArgumentTypeDisplayPass : public DebugPass {
public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override {
void Apply(const std::unique_ptr<SSAGraph>& graph) override {
LOG(INFO) << "== Argument types ==";
for (auto& node : graph->mutable_nodes()) {
if (!node.IsArg()) continue;
......
......@@ -21,7 +21,7 @@ namespace mir {
class DemoPass : public mir::DebugPass {
public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override {}
void Apply(const std::unique_ptr<SSAGraph> &graph) override {}
};
/*
......
......@@ -13,6 +13,9 @@
// limitations under the License.
#include "paddle/fluid/lite/core/mir/generate_program_pass.h"
#include <memory>
#include <utility>
#include <vector>
#include "paddle/fluid/lite/core/mir/graph_visualize_pass.h"
#include "paddle/fluid/lite/core/mir/pass_registry.h"
......@@ -20,7 +23,7 @@ namespace paddle {
namespace lite {
namespace mir {
void GenerateProgramPass::Apply(std::unique_ptr<mir::SSAGraph>& graph) {
void GenerateProgramPass::Apply(const std::unique_ptr<SSAGraph>& graph) {
LOG(INFO) << "final program \n" << Visualize(graph.get());
for (auto& item : graph->StmtTopologicalOrder()) {
if (item->IsStmt()) {
......
......@@ -28,7 +28,7 @@ namespace mir {
*/
class GenerateProgramPass : public ProgramPass {
public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override;
void Apply(const std::unique_ptr<SSAGraph> &graph) override;
std::unique_ptr<RuntimeProgram> GenProgram() {
LOG(INFO) << "insts.size " << insts_.size();
......
......@@ -13,7 +13,9 @@
// limitations under the License.
#include "paddle/fluid/lite/core/mir/graph_visualize_pass.h"
#include <memory>
#include <set>
#include <string>
#include "paddle/fluid/lite/core/mir/pass_registry.h"
namespace paddle {
......@@ -22,7 +24,7 @@ namespace mir {
using inference::analysis::Dot;
void GraphVisualizePass::Apply(std::unique_ptr<mir::SSAGraph>& graph) {
void GraphVisualizePass::Apply(const std::unique_ptr<SSAGraph>& graph) {
Visualize(graph.get());
}
......
......@@ -27,7 +27,7 @@ namespace mir {
*/
class GraphVisualizePass : public DebugPass {
public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override;
void Apply(const std::unique_ptr<SSAGraph>& graph) override;
};
std::string Visualize(mir::SSAGraph* graph);
......
......@@ -21,7 +21,7 @@ namespace mir {
class IoCopyKernelPickPass : public StmtPass {
public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override {
void Apply(const std::unique_ptr<SSAGraph>& graph) override {
for (auto& node : graph->mutable_nodes()) {
if (!node.IsStmt()) continue;
auto& inst = node.AsStmt();
......
......@@ -17,6 +17,7 @@
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/op_lite.h"
......
......@@ -13,6 +13,8 @@
// limitations under the License.
#pragma once
#include <memory>
#include <string>
#include "paddle/fluid/lite/core/mir/node.h"
#include "paddle/fluid/lite/core/mir/ssa_graph.h"
......@@ -32,9 +34,9 @@ class Pass {
kDebug,
};
Pass(Kind kind) : kind_(kind) {}
explicit Pass(Kind kind) : kind_(kind) {}
virtual void Apply(std::unique_ptr<mir::SSAGraph>& graph) = 0;
virtual void Apply(const std::unique_ptr<SSAGraph>& graph) = 0;
void set_name(const std::string& name) { name_ = name; }
const std::string& name() const { return name_; }
......
......@@ -32,7 +32,7 @@ class PassManager {
PassManager();
void Run(std::unique_ptr<SSAGraph>& graph) {
void Run(const std::unique_ptr<SSAGraph>& graph) {
for (auto& pass : passes_) {
LOG(INFO) << "Running MIR pass " << pass->name();
pass->Apply(graph);
......
......@@ -27,7 +27,7 @@ class RuntimeContextAssignPass : public StmtPass {
#endif
}
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override {
void Apply(const std::unique_ptr<SSAGraph>& graph) override {
for (auto& node : graph->mutable_nodes()) {
if (!node.IsStmt()) continue;
......
......@@ -13,6 +13,10 @@
// limitations under the License.
#include "paddle/fluid/lite/core/mir/ssa_graph.h"
#include <algorithm>
#include <memory>
#include <set>
#include <utility>
namespace paddle {
namespace lite {
......
......@@ -27,7 +27,7 @@ bool KernelScoreCmp(const std::pair<size_t, std::unique_ptr<KernelBase>>& a,
return a.first > b.first;
}
void StaticKernelPickPass::Apply(std::unique_ptr<mir::SSAGraph>& graph) {
void StaticKernelPickPass::Apply(const std::unique_ptr<SSAGraph>& graph) {
CHECK(kernel_pick_factors_.AnyFactorConsidered())
<< "kernel_pick_factors should be specified first";
CHECK(graph) << "graph not valid";
......
......@@ -35,7 +35,7 @@ namespace mir {
*/
class StaticKernelPickPass : public mir::StmtPass {
public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override;
void Apply(const std::unique_ptr<SSAGraph>& graph) override;
void SetPreferPlace(const Place& place) { place_ = place; }
const Place& place() const { return place_; }
......
......@@ -14,7 +14,9 @@
#include "paddle/fluid/lite/core/mir/type_target_transform_pass.h"
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "paddle/fluid/lite/core/mir/graph_visualize_pass.h"
#include "paddle/fluid/lite/core/mir/pass_registry.h"
......@@ -23,7 +25,7 @@ namespace paddle {
namespace lite {
namespace mir {
void TypeTargetTransformPass::Apply(std::unique_ptr<mir::SSAGraph>& graph) {
void TypeTargetTransformPass::Apply(const std::unique_ptr<SSAGraph>& graph) {
// Start from inputs of the graph, those should have place set.
std::list<Node*> nodes;
for (auto& node : graph->mutable_nodes()) {
......
......@@ -40,7 +40,7 @@ static void UpdateInputTo(framework::proto::OpDesc* desc,
*/
class TypeTargetTransformPass : public ProgramPass {
public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override;
void Apply(const std::unique_ptr<SSAGraph>& graph) override;
void ComplementInputs(SSAGraph* graph, Node* inst_node, Node* in);
......
......@@ -13,13 +13,14 @@
// limitations under the License.
#include "paddle/fluid/lite/core/mir/variable_place_inference_pass.h"
#include <memory>
#include "paddle/fluid/lite/core/mir/pass_registry.h"
namespace paddle {
namespace lite {
namespace mir {
void VariablePlaceInferencePass::Apply(std::unique_ptr<mir::SSAGraph>& graph) {
void VariablePlaceInferencePass::Apply(const std::unique_ptr<SSAGraph> &graph) {
MarkInputPlace(graph.get());
InferenceArgumentPlace(graph.get());
CheckAllArgumentTypeDetermined(graph.get());
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#pragma once
#include <memory>
#include "paddle/fluid/lite/core/mir/pass.h"
#include "paddle/fluid/lite/core/target_wrapper.h"
......@@ -26,7 +27,7 @@ namespace mir {
*/
class VariablePlaceInferencePass : public DebugPass {
public:
void Apply(std::unique_ptr<mir::SSAGraph>& graph) override;
void Apply(const std::unique_ptr<SSAGraph>& graph) override;
private:
// Mark the place of input arguments.
......
......@@ -13,7 +13,10 @@
// limitations under the License.
#include "paddle/fluid/lite/core/op_lite.h"
#include "op_lite.h"
#include <list>
#include <set>
#include <utility>
#include <vector>
#include "paddle/fluid/lite/core/op_registry.h"
namespace paddle {
......
......@@ -13,6 +13,8 @@
// limitations under the License.
#include "paddle/fluid/lite/core/op_registry.h"
#include <list>
#include <set>
namespace paddle {
namespace lite {
......@@ -90,4 +92,4 @@ KernelRegistry &KernelRegistry::Global() {
}
} // namespace lite
} // namespace paddle
\ No newline at end of file
} // namespace paddle
......@@ -14,8 +14,10 @@
#pragma once
#include <memory>
#include <set>
#include <string>
#include <unordered_map>
#include <utility>
#include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/op_lite.h"
#include "paddle/fluid/lite/core/target_wrapper.h"
......@@ -43,7 +45,7 @@ class LiteOpRegistry final : public Factory<OpLite, std::shared_ptr<OpLite>> {
template <typename OpClass>
class OpLiteRegistor : public Registor<OpClass> {
public:
OpLiteRegistor(const std::string &op_type)
explicit OpLiteRegistor(const std::string &op_type)
: Registor<OpClass>([&] {
LiteOpRegistry::Global().Register(
op_type, [op_type]() -> std::unique_ptr<OpLite> {
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#pragma once
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/lite/core/mir/generate_program_pass.h"
......
......@@ -14,6 +14,8 @@
#include "paddle/fluid/lite/core/optimizer.h"
#include <gtest/gtest.h>
#include <memory>
#include <utility>
#include "paddle/fluid/lite/core/mir/generate_program_pass.h"
#include "paddle/fluid/lite/core/mir/pass_manager.h"
#include "paddle/fluid/lite/core/mir/passes.h"
......
......@@ -14,10 +14,11 @@
#pragma once
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/mir/node.h"
#include "paddle/fluid/lite/core/op_lite.h"
#include "paddle/fluid/lite/core/op_registry.h"
......@@ -25,7 +26,7 @@
namespace paddle {
namespace lite {
static const std::string kKernelTypeAttr = "__@kernel_type_attr@__";
static const char kKernelTypeAttr[] = "__@kernel_type_attr@__";
// A program is used to represent a code program, in Paddle, a code program
// contains:
......
......@@ -13,7 +13,11 @@
// limitations under the License.
#pragma once
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/lite/core/mir/ssa_graph.h"
#include "paddle/fluid/lite/core/op_registry.h"
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#include "paddle/fluid/lite/core/target_wrapper.h"
#include <string>
#include "paddle/fluid/lite/utils/all.h"
namespace paddle {
......@@ -43,4 +44,4 @@ std::string Place::DebugString() const {
}
} // namespace lite
} // namespace paddle
\ No newline at end of file
} // namespace paddle
......@@ -82,11 +82,11 @@ struct Place {
TargetType target{TARGET(kUnk)};
PrecisionType precision{PRECISION(kUnk)};
DataLayoutType layout{DATALAYOUT(kUnk)};
short device{0}; // device ID
int16_t device{0}; // device ID
Place() = default;
Place(TargetType target, PrecisionType precision,
DataLayoutType layout = DATALAYOUT(kNCHW), short device = 0)
DataLayoutType layout = DATALAYOUT(kNCHW), int16_t device = 0)
: target(target), precision(precision), layout(layout), device(device) {}
bool is_valid() const {
......
......@@ -13,6 +13,8 @@
// limitations under the License.
#pragma once
#include <set>
#include <string>
#include "paddle/fluid/lite/core/compatible_tensor.h"
#include "paddle/fluid/lite/utils/all.h"
......
......@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cuda.h>
#include <cuda_runtime.h>
#include "paddle/fluid/lite/core/target_wrapper.h"
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#pragma once
#include <algorithm>
#include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/op_registry.h"
......
......@@ -13,7 +13,9 @@
// limitations under the License.
#include "paddle/fluid/lite/model_parser/model_parser.h"
#include <algorithm>
#include <fstream>
#include <limits>
#include "paddle/fluid/lite/core/compatible_tensor.h"
#include "paddle/fluid/lite/core/scope.h"
#include "paddle/fluid/lite/core/variable.h"
......@@ -218,8 +220,8 @@ void TensorToStream(std::ostream &os, const lite::Tensor &tensor) {
tensor.data_size(), IoDirection::DtoH);
os.write(static_cast<const char *>(tmp_buffer.get()),
static_cast<std::streamsize>(size));
} else
#endif // LITE_WITH_CUDA
} else // NOLINT
#endif // LITE_WITH_CUDA
{
os.write(static_cast<const char *>(tensor.data<void>()),
static_cast<std::streamsize>(size));
......
......@@ -15,6 +15,7 @@
// This file contains model format related operations, such as load a model,
// parse an operator definitions and so on.
#pragma once
#include <memory>
#include <string>
#include <vector>
......
......@@ -11,3 +11,5 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/lite/model_parser/pb/block_desc.h"
......@@ -21,6 +21,8 @@
*/
#include <algorithm>
#include <map>
#include <set>
#include <string>
#include <unordered_map>
#include <vector>
......@@ -44,7 +46,7 @@ class OpDesc {
public:
OpDesc() {}
OpDesc(const framework::proto::OpDesc &desc) : desc_(desc) {}
explicit OpDesc(const framework::proto::OpDesc &desc) : desc_(desc) {}
void CopyFrom(const OpDesc &op_desc) { desc_ = op_desc.ReadonlyProto(); }
......@@ -127,13 +129,13 @@ class OpDesc {
}
size_t hash = typeid(T).hash_code();
if (hash == typeid(int).hash_code()) {
if (hash == typeid(int).hash_code()) { // NOLINT
it->set_type(framework::proto::INT);
it->set_i(v);
} else if (hash == typeid(float).hash_code()) {
} else if (hash == typeid(float).hash_code()) { // NOLINT
it->set_type(framework::proto::FLOAT);
it->set_f(v);
} else if (hash == typeid(bool).hash_code()) {
} else if (hash == typeid(bool).hash_code()) { // NOLINT
it->set_type(framework::proto::BOOLEAN);
it->set_b(v);
} else {
......
......@@ -11,3 +11,5 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/lite/model_parser/pb/program_desc.h"
......@@ -11,3 +11,5 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
......@@ -18,7 +18,7 @@ namespace paddle {
namespace lite {
namespace pb {
using namespace framework;
using namespace framework; // NOLINT
proto::VarType::Type VarDesc::GetType() const { return desc_.type().type(); }
......
......@@ -119,5 +119,5 @@ class VarDesc {
};
} // namespace pb
} // namespace framework
} // namespace lite
} // namespace paddle
......@@ -13,6 +13,9 @@
// limitations under the License.
#pragma once
#include <map>
#include <string>
#include <vector>
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/lite/utils/all.h"
......@@ -44,7 +47,7 @@ class LoDTensorDesc {
TensorDesc tensor;
int lod_level{-1};
LoDTensorDesc(const framework::proto::VarType_LoDTensorDesc& proto) {
explicit LoDTensorDesc(const framework::proto::VarType_LoDTensorDesc& proto) {
Parse(proto);
}
......
......@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle/fluid/lite/core/op_lite.h"
......@@ -25,7 +26,7 @@ namespace operators {
class ReluOp : public OpLite {
public:
ReluOp() {}
ReluOp(const std::string &op_type) : OpLite(op_type) {}
explicit ReluOp(const std::string &op_type) : OpLite(op_type) {}
bool CheckShape() const override;
......
......@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifdef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
#include "paddle/fluid/lite/utils/logging.h"
#else // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
......
......@@ -17,7 +17,9 @@
#include <list>
#include <memory>
#include <sstream>
#include <string>
#include <unordered_map>
#include <utility>
#include "paddle/fluid/lite/utils/cp_logging.h"
namespace paddle {
......@@ -83,7 +85,7 @@ class Factory {
template <typename Type>
class Registor {
public:
Registor(std::function<void()>&& functor) { functor(); }
explicit Registor(std::function<void()>&& functor) { functor(); }
// Touch will do nothing.
int Touch() { return 0; }
......
......@@ -34,6 +34,7 @@
#define VLOG(level) LOG_INFO.stream()
// CHECK()
// NOLINT
#define CHECK(x) \
if (!(x)) \
paddle::lite::LogMessageFatal(__FILE__, __LINE__).stream() \
......
......@@ -13,10 +13,12 @@
// limitations under the License.
#pragma once
#include <algorithm>
#include <exception>
#include <memory>
#include <type_traits>
#include <typeinfo>
#include <utility>
#include "paddle/fluid/lite/utils/cp_logging.h"
// This is an equivalent implementation of boost::any. We implement this to
......
......@@ -15,6 +15,7 @@
#include "paddle/fluid/lite/core/target_wrapper.h"
#include <algorithm>
#include "paddle/fluid/lite/utils/all.h"
#include "paddle/fluid/lite/x86/target_wrapper.h"
namespace paddle {
namespace lite {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册