From f41d73b3b7b29f0bf5d35901c2e3ab52226ccd52 Mon Sep 17 00:00:00 2001 From: Superjomn Date: Sun, 14 Apr 2019 17:40:00 +0800 Subject: [PATCH] init mir --- paddle/fluid/lite/core/CMakeLists.txt | 2 + paddle/fluid/lite/core/context.cc | 2 +- paddle/fluid/lite/core/mir/CMakeLists.txt | 3 + paddle/fluid/lite/core/mir/node.cc | 1 + paddle/fluid/lite/core/mir/node.h | 15 +++ paddle/fluid/lite/core/mir/pass.cc | 1 + paddle/fluid/lite/core/mir/pass.h | 0 paddle/fluid/lite/core/mir/ssa_graph.cc | 1 + paddle/fluid/lite/core/mir/ssa_graph.h | 0 paddle/fluid/lite/core/target_wrapper.h | 10 +- paddle/fluid/lite/core/tensor.cc | 2 +- paddle/fluid/lite/core/type_system.h | 113 ++++++++++++++++++++++ paddle/fluid/lite/operators/fc_op_test.cc | 4 +- 13 files changed, 148 insertions(+), 6 deletions(-) create mode 100644 paddle/fluid/lite/core/mir/CMakeLists.txt create mode 100644 paddle/fluid/lite/core/mir/node.cc create mode 100644 paddle/fluid/lite/core/mir/node.h create mode 100644 paddle/fluid/lite/core/mir/pass.cc create mode 100644 paddle/fluid/lite/core/mir/pass.h create mode 100644 paddle/fluid/lite/core/mir/ssa_graph.cc create mode 100644 paddle/fluid/lite/core/mir/ssa_graph.h diff --git a/paddle/fluid/lite/core/CMakeLists.txt b/paddle/fluid/lite/core/CMakeLists.txt index 1d837e448..b915b6f3e 100644 --- a/paddle/fluid/lite/core/CMakeLists.txt +++ b/paddle/fluid/lite/core/CMakeLists.txt @@ -16,3 +16,5 @@ cc_test(test_op_lite SRCS op_lite_test.cc DEPS op_lite) cc_test(test_tensor_lite SRCS tensor_test.cc) cc_test(test_executor_lite SRCS executor_test.cc DEPS executor_lite ops_lite host_kernels) cc_test(test_type_system SRCS type_system_test.cc DEPS type_system) + +add_subdirectory(mir) diff --git a/paddle/fluid/lite/core/context.cc b/paddle/fluid/lite/core/context.cc index 2dffeb717..fa01f1d3e 100644 --- a/paddle/fluid/lite/core/context.cc +++ b/paddle/fluid/lite/core/context.cc @@ -16,4 +16,4 @@ // Created by chunwei on 19-2-22. // -#include "context.h" +#include "paddle/fluid/lite/core/context.h" diff --git a/paddle/fluid/lite/core/mir/CMakeLists.txt b/paddle/fluid/lite/core/mir/CMakeLists.txt new file mode 100644 index 000000000..55b75b73a --- /dev/null +++ b/paddle/fluid/lite/core/mir/CMakeLists.txt @@ -0,0 +1,3 @@ +cc_library(mir_pass SRCS pass.cc) +cc_library(mir_node SRCS node.cc) +cc_library(mir_ssa_graph SRCS ssa_graph.cc) \ No newline at end of file diff --git a/paddle/fluid/lite/core/mir/node.cc b/paddle/fluid/lite/core/mir/node.cc new file mode 100644 index 000000000..e31f70bfe --- /dev/null +++ b/paddle/fluid/lite/core/mir/node.cc @@ -0,0 +1 @@ +#include "paddle/fluid/lite/core/mir/node.h" diff --git a/paddle/fluid/lite/core/mir/node.h b/paddle/fluid/lite/core/mir/node.h new file mode 100644 index 000000000..daef28587 --- /dev/null +++ b/paddle/fluid/lite/core/mir/node.h @@ -0,0 +1,15 @@ +namespace paddle { +namespace lite { +namespace mir { + +class Node { + public: + // Tell is instruction. + bool IsInstruct() const; + // Tell is an argument. + bool IsArgument() const; +}; + +} // namespace mir +} // namespace lite +} // namespace paddle \ No newline at end of file diff --git a/paddle/fluid/lite/core/mir/pass.cc b/paddle/fluid/lite/core/mir/pass.cc new file mode 100644 index 000000000..31b105ac0 --- /dev/null +++ b/paddle/fluid/lite/core/mir/pass.cc @@ -0,0 +1 @@ +#include "paddle/fluid/lite/core/mir/pass.h" diff --git a/paddle/fluid/lite/core/mir/pass.h b/paddle/fluid/lite/core/mir/pass.h new file mode 100644 index 000000000..e69de29bb diff --git a/paddle/fluid/lite/core/mir/ssa_graph.cc b/paddle/fluid/lite/core/mir/ssa_graph.cc new file mode 100644 index 000000000..56a600419 --- /dev/null +++ b/paddle/fluid/lite/core/mir/ssa_graph.cc @@ -0,0 +1 @@ +#include "paddle/fluid/lite/core/mir/ssa_graph.h" diff --git a/paddle/fluid/lite/core/mir/ssa_graph.h b/paddle/fluid/lite/core/mir/ssa_graph.h new file mode 100644 index 000000000..e69de29bb diff --git a/paddle/fluid/lite/core/target_wrapper.h b/paddle/fluid/lite/core/target_wrapper.h index 7762bdce2..90c701c34 100644 --- a/paddle/fluid/lite/core/target_wrapper.h +++ b/paddle/fluid/lite/core/target_wrapper.h @@ -38,11 +38,17 @@ struct Place { TargetType target{TARGET(kHost)}; PrecisionType precision{PRECISION(kFloat)}; DataLayoutType layout{DATALAYOUT(kNCHW)}; + short device{0}; // device ID Place() = default; Place(TargetType target, PrecisionType precision, - DataLayoutType layout = DATALAYOUT(kNCHW)) - : target(target), precision(precision), layout(layout) {} + DataLayoutType layout = DATALAYOUT(kNCHW), short device = 0) + : target(target), precision(precision), layout(layout), device(device) {} + + bool operator==(const Place& other) const { + return target == other.target && precision == other.precision && + layout == other.layout && device == other.device; + } }; constexpr const int kNumPrecisions = diff --git a/paddle/fluid/lite/core/tensor.cc b/paddle/fluid/lite/core/tensor.cc index 39ffd2ee6..a962dab49 100644 --- a/paddle/fluid/lite/core/tensor.cc +++ b/paddle/fluid/lite/core/tensor.cc @@ -24,7 +24,7 @@ std::ostream &operator<<(std::ostream &os, const DDim &dims) { } os << "["; - for (int i = 0; i < dims.size() - 1; i++) { + for (size_t i = 0; i < dims.size() - 1; i++) { os << dims[i] << " "; } os << dims.back() << "]"; diff --git a/paddle/fluid/lite/core/type_system.h b/paddle/fluid/lite/core/type_system.h index 05af476ef..aabd50bb9 100644 --- a/paddle/fluid/lite/core/type_system.h +++ b/paddle/fluid/lite/core/type_system.h @@ -31,6 +31,119 @@ namespace paddle { namespace lite { +// Type is the definition of all the types that supported by the Variable that +// represents as the input and output of an operator or kernel. +// The DNN system is simple, and the architecture can not process that many data +// types as a compiler, or that will turn out to a chaos. +// +// We should make sure that supported data types should be registered here, and +// keep the quantity small. And avoid using some special data types as op's IO, +// such as some runtime cache, that need to be avoided. +// +// TODO(Superjomn) Add operator/kernel-wise static checking to avoid unsupported +// type mixed in the system. +class DataTypeBase { + public: + // The Void type can cast to any other type. + // The Unsupported is the data type that developed include in the system, for + // example, some `std::set` is used as input of some operator. It wan't be + // analyzed or optimized by the system, that way results in many bugs in + // previous system, so it should be avoided. + enum class ID : int { + Void = 0, // unknown type that can be cast to any data type. + Unsupported, // Unsupported data type that will not be analyzed. + Tensor_Fp32_NCHW, + Tensor_Int8_NCHW, + Tensor_Int64_NCHW, + NumTypes, // Must remains as last defined ID. + }; + + ID id() const { return id_; } + + // type check. + bool IsTensor() const { return is_tensor_; } + bool IsVoid() const { return id_ == ID::Void; } + bool IsUnsupported() const { return id_ == ID::Unsupported; } + bool IsTensorFp32NCHW() const { return id_ == ID::Tensor_Fp32_NCHW; } + bool IsTensorInt8NCHW() const { return id_ == ID::Tensor_Int8_NCHW; } + bool IsTensorInt64NCHW() const { return id_ == ID::Tensor_Int64_NCHW; } + + int num_types() const { return static_cast(ID::NumTypes); } + + protected: + // Can only extended by subclass. + DataTypeBase(ID id, bool is_tensor) : id_(id), is_tensor_(is_tensor) {} + + ID id_{ID::Unsupported}; + bool is_tensor_{false}; +}; + +/* + * Datatype with device info considered. + * NOTE A Type with different device is treated as different DeviceDataType. + */ +class DeviceDataType : public DataTypeBase { + public: + TargetType target() const { return place_.target; } + PrecisionType precision() const { return place_.precision; } + DataLayoutType layout() const { return place_.layout; } + const Place& place() const { return place_; } + const std::string& name() const { return name_; } + + bool operator==(const DeviceDataType& other) { + return id_ == other.id() && place_ == other.place(); + } + + // Can cast to another type. This is heavily used in MIR, by determine whether + // is is possible to add a instruction to transform a type to another. + virtual bool TypeCastable(const DeviceDataType& type) const { + return id_ == type.id(); + } + + virtual ~DeviceDataType() = default; + + protected: + DeviceDataType(ID id, const std::string& name, bool is_tensor, + TargetType target = TargetType::kHost, + PrecisionType precision = PrecisionType::kFloat, + DataLayoutType layout = DataLayoutType::kNCHW) + : DataTypeBase(id, is_tensor), + place_{target, precision, layout}, + name_(name) {} + + protected: + Place place_; + const std::string name_; +}; + +// -------------------------------- predefined types --------------------------- +class Void : public DeviceDataType { + public: + Void() : DeviceDataType(ID::Void, "Void", false /*is_tensor*/) {} +}; +class TensorFp32NCHW : public DeviceDataType { + public: + TensorFp32NCHW(TargetType target) + : DeviceDataType(ID::Tensor_Fp32_NCHW, "TensorFp32NCHW", + true /*is_tensor*/, target, PrecisionType::kFloat, + DataLayoutType::kNCHW) {} +}; +class TensorInt8NCHW : public DeviceDataType { + public: + TensorInt8NCHW(TargetType target) + : DeviceDataType(ID::Tensor_Int8_NCHW, "TensorInt8NCHW", + true /*is_tensor*/, target, PrecisionType::kInt8, + DataLayoutType::kNCHW) {} +}; +class TensorInt64NCHW : public DeviceDataType { + public: + TensorInt64NCHW(TargetType target) + : DeviceDataType(ID::Tensor_Int64_NCHW, "TensorInt64NCHW", + true /*is_tensor*/, target, PrecisionType::kInt8, + DataLayoutType::kNCHW) {} +}; +// ------------------------- end predefined types --------------------------- + // NOTE TypeSystem has some overhead, and better to be used in analysis phase. class TypeSystem { private: diff --git a/paddle/fluid/lite/operators/fc_op_test.cc b/paddle/fluid/lite/operators/fc_op_test.cc index 4279823d2..b191469ff 100644 --- a/paddle/fluid/lite/operators/fc_op_test.cc +++ b/paddle/fluid/lite/operators/fc_op_test.cc @@ -58,8 +58,8 @@ TEST(fc_op_lite, test) { FcOpLite fc("fc"); - fc.SetValidPlaces({OpLite::Place{TARGET(kHost), PRECISION(kFloat)}}); - fc.PickKernel({OpLite::Place{TARGET(kHost), PRECISION(kFloat)}}); + fc.SetValidPlaces({Place{TARGET(kHost), PRECISION(kFloat)}}); + fc.PickKernel({Place{TARGET(kHost), PRECISION(kFloat)}}); fc.Attach(desc, &scope); fc.Run(); -- GitLab