diff --git a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc index 7a297b3daefd754cc2a9fe1df3d48208d7e86847..cd81168753bedfa7c072bbfe0e10807f7fd7d98b 100644 --- a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc @@ -12,7 +12,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "boost/optional.hpp" #include "paddle/fluid/framework/data_layout_transform.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" diff --git a/paddle/fluid/operators/sequence_ops/sequence_concat_op.h b/paddle/fluid/operators/sequence_ops/sequence_concat_op.h index 8d9302fa43b7a3a597b2f90119e7e27c2ca357a2..4943e0e2ea09bc11e297ee8cab70d3a7b3951648 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_concat_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_concat_op.h @@ -17,7 +17,6 @@ #include #include -#include "boost/optional.hpp" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/concat_and_split.h" diff --git a/paddle/fluid/platform/mkldnn_reuse.h b/paddle/fluid/platform/mkldnn_reuse.h index 05ebedf611a4b26eb3745175494d68dfa0db05b0..41a4f551cedc1ed925a02bd52c0055e5db7d4d3c 100644 --- a/paddle/fluid/platform/mkldnn_reuse.h +++ b/paddle/fluid/platform/mkldnn_reuse.h @@ -20,7 +20,6 @@ limitations under the License. */ #include #include -#include "boost/optional.hpp" #include "paddle/fluid/framework/data_layout_transform.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/pool_op.h" diff --git a/paddle/fluid/pybind/reader_py.cc b/paddle/fluid/pybind/reader_py.cc index 9c80bb8a67e6332e4f370d92ff0d71db0ee2830b..36c09f543a6c20cea94865bc01fc992d43e4184d 100644 --- a/paddle/fluid/pybind/reader_py.cc +++ b/paddle/fluid/pybind/reader_py.cc @@ -22,7 +22,7 @@ #include #include "Python.h" -#include "boost/optional.hpp" + #include "gflags/gflags.h" #include "paddle/fluid/framework/reader.h" #include "paddle/fluid/imperative/layer.h" diff --git a/paddle/infrt/host_context/mlir_to_runtime_translate.cc b/paddle/infrt/host_context/mlir_to_runtime_translate.cc index 9292e593a708fbf31b21719c3fd2771c5683e3fd..81b41d61ded3e908a13deb2e1e65c52a1855c028 100644 --- a/paddle/infrt/host_context/mlir_to_runtime_translate.cc +++ b/paddle/infrt/host_context/mlir_to_runtime_translate.cc @@ -31,7 +31,6 @@ #include #include -#include "boost/optional.hpp" #include "paddle/infrt/common/string.h" #include "paddle/infrt/dialect/dense_tensor.h" #include "paddle/infrt/dialect/mlir_loader.h" @@ -124,118 +123,118 @@ bool MlirToRuntimeTranslator::EmitConstantOp(mlir::Operation* op) { } template <> -boost::optional MlirToRuntimeTranslator::EmitAttribute( +paddle::optional MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; if (attr.isa()) { auto val = attr.cast(); if (val.getType().isInteger(32)) { return val.getValue().getSExtValue(); } } - return boost::none; + return paddle::none; } template <> -boost::optional MlirToRuntimeTranslator::EmitAttribute( +paddle::optional MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; if (attr.isa()) { auto val = attr.cast(); if (val.getType().isInteger(64)) { return val.getValue().getSExtValue(); } } - return boost::none; + return paddle::none; } // TODO(Superjomn) Make double and float parsing share some thing. template <> -boost::optional MlirToRuntimeTranslator::EmitAttribute( +paddle::optional MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; if (attr.isa()) { auto val = attr.cast(); if (val.getType().isF32()) return val.getValueAsDouble(); } - return boost::none; + return paddle::none; } template <> -boost::optional MlirToRuntimeTranslator::EmitAttribute( +paddle::optional MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; if (attr.isa()) { auto val = attr.cast(); return val.getValue(); } - return boost::none; + return paddle::none; } template <> -boost::optional MlirToRuntimeTranslator::EmitAttribute( +paddle::optional MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; if (attr.isa()) { auto val = attr.cast(); if (val.getType().isF64()) return val.getValueAsDouble(); } - return boost::none; + return paddle::none; } template <> -boost::optional<::infrt::TargetType> MlirToRuntimeTranslator::EmitAttribute( +paddle::optional<::infrt::TargetType> MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa<::infrt::TargetAttr>()) return boost::none; + if (!attr.isa<::infrt::TargetAttr>()) return paddle::none; if (attr.isa<::infrt::TargetAttr>()) { return attr.cast<::infrt::TargetAttr>().getTarget(); } - return boost::none; + return paddle::none; } template <> -boost::optional<::infrt::LayoutType> MlirToRuntimeTranslator::EmitAttribute( +paddle::optional<::infrt::LayoutType> MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa<::infrt::LayoutAttr>()) return boost::none; + if (!attr.isa<::infrt::LayoutAttr>()) return paddle::none; if (attr.isa<::infrt::LayoutAttr>()) { return attr.cast<::infrt::LayoutAttr>().getLayout(); } - return boost::none; + return paddle::none; } template <> -boost::optional<::infrt::PrecisionType> MlirToRuntimeTranslator::EmitAttribute( +paddle::optional<::infrt::PrecisionType> MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa<::infrt::PrecisionAttr>()) return boost::none; + if (!attr.isa<::infrt::PrecisionAttr>()) return paddle::none; if (attr.isa<::infrt::PrecisionAttr>()) { return attr.cast<::infrt::PrecisionAttr>().getPrecision(); } - return boost::none; + return paddle::none; } template <> -boost::optional MlirToRuntimeTranslator::EmitAttribute( +paddle::optional MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; return attr.cast().getValue().str(); } -#define PROCESS_ARRAY_INT(type__, bits__) \ - template <> \ - boost::optional> MlirToRuntimeTranslator::EmitAttribute( \ - const mlir::Attribute& attr) { \ - if (!attr.isa()) return boost::none; \ - auto array = attr.cast(); \ - CHECK(!array.empty()); \ - \ - if (!array[0].getType().isInteger(bits__)) { \ - return boost::none; \ - } \ - \ - std::vector res; \ - for (auto& v : array) { \ - res.push_back(v.cast().getValue().getSExtValue()); \ - } \ - return res; \ +#define PROCESS_ARRAY_INT(type__, bits__) \ + template <> \ + paddle::optional> \ + MlirToRuntimeTranslator::EmitAttribute(const mlir::Attribute& attr) { \ + if (!attr.isa()) return paddle::none; \ + auto array = attr.cast(); \ + CHECK(!array.empty()); \ + \ + if (!array[0].getType().isInteger(bits__)) { \ + return paddle::none; \ + } \ + \ + std::vector res; \ + for (auto& v : array) { \ + res.push_back(v.cast().getValue().getSExtValue()); \ + } \ + return res; \ } PROCESS_ARRAY_INT(bool, 1); @@ -244,13 +243,13 @@ PROCESS_ARRAY_INT(int32_t, 32); PROCESS_ARRAY_INT(int64_t, 64); template <> -boost::optional> MlirToRuntimeTranslator::EmitAttribute( +paddle::optional> MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; auto array = attr.cast(); CHECK(!array.empty()); - if (!array[0].getType().isF32()) return boost::none; + if (!array[0].getType().isF32()) return paddle::none; std::vector res; for (auto& v : array) { @@ -260,13 +259,13 @@ boost::optional> MlirToRuntimeTranslator::EmitAttribute( } template <> -boost::optional> MlirToRuntimeTranslator::EmitAttribute( +paddle::optional> MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; auto array = attr.cast(); CHECK(!array.empty()); - if (!array[0].getType().isF64()) return boost::none; + if (!array[0].getType().isF64()) return paddle::none; std::vector res; for (auto& v : array) { diff --git a/paddle/infrt/host_context/mlir_to_runtime_translate.h b/paddle/infrt/host_context/mlir_to_runtime_translate.h index 27a7f20168667daddd353e902d49479aa612e38f..64dc770489c4d63e8cd84c5d4875791b1c0991d7 100644 --- a/paddle/infrt/host_context/mlir_to_runtime_translate.h +++ b/paddle/infrt/host_context/mlir_to_runtime_translate.h @@ -75,7 +75,7 @@ class MlirToRuntimeTranslator { bool EmitCallOp(mlir::Operation* op, function_defs_t* function_table); template - boost::optional EmitAttribute(const mlir::Attribute& attr); + paddle::optional EmitAttribute(const mlir::Attribute& attr); Value* GetOpResult(mlir::Operation* op);