From 957258d968b522307f491e9e040a684f4970c18f Mon Sep 17 00:00:00 2001 From: Ruibiao Chen Date: Mon, 4 Jul 2022 11:03:35 +0800 Subject: [PATCH] Remove boost::optional and boost::none (#44029) --- .../mkldnn/conv_transpose_mkldnn_op.cc | 1 - .../sequence_ops/sequence_concat_op.h | 1 - paddle/fluid/platform/mkldnn_reuse.h | 1 - paddle/fluid/pybind/reader_py.cc | 2 +- .../host_context/mlir_to_runtime_translate.cc | 99 +++++++++---------- .../host_context/mlir_to_runtime_translate.h | 2 +- 6 files changed, 51 insertions(+), 55 deletions(-) diff --git a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc index 7a297b3daef..cd81168753b 100644 --- a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc @@ -12,7 +12,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "boost/optional.hpp" #include "paddle/fluid/framework/data_layout_transform.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" diff --git a/paddle/fluid/operators/sequence_ops/sequence_concat_op.h b/paddle/fluid/operators/sequence_ops/sequence_concat_op.h index 8d9302fa43b..4943e0e2ea0 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_concat_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_concat_op.h @@ -17,7 +17,6 @@ #include #include -#include "boost/optional.hpp" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/concat_and_split.h" diff --git a/paddle/fluid/platform/mkldnn_reuse.h b/paddle/fluid/platform/mkldnn_reuse.h index 05ebedf611a..41a4f551ced 100644 --- a/paddle/fluid/platform/mkldnn_reuse.h +++ b/paddle/fluid/platform/mkldnn_reuse.h @@ -20,7 +20,6 @@ limitations under the License. */ #include #include -#include "boost/optional.hpp" #include "paddle/fluid/framework/data_layout_transform.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/pool_op.h" diff --git a/paddle/fluid/pybind/reader_py.cc b/paddle/fluid/pybind/reader_py.cc index 9c80bb8a67e..36c09f543a6 100644 --- a/paddle/fluid/pybind/reader_py.cc +++ b/paddle/fluid/pybind/reader_py.cc @@ -22,7 +22,7 @@ #include #include "Python.h" -#include "boost/optional.hpp" + #include "gflags/gflags.h" #include "paddle/fluid/framework/reader.h" #include "paddle/fluid/imperative/layer.h" diff --git a/paddle/infrt/host_context/mlir_to_runtime_translate.cc b/paddle/infrt/host_context/mlir_to_runtime_translate.cc index 9292e593a70..81b41d61ded 100644 --- a/paddle/infrt/host_context/mlir_to_runtime_translate.cc +++ b/paddle/infrt/host_context/mlir_to_runtime_translate.cc @@ -31,7 +31,6 @@ #include #include -#include "boost/optional.hpp" #include "paddle/infrt/common/string.h" #include "paddle/infrt/dialect/dense_tensor.h" #include "paddle/infrt/dialect/mlir_loader.h" @@ -124,118 +123,118 @@ bool MlirToRuntimeTranslator::EmitConstantOp(mlir::Operation* op) { } template <> -boost::optional MlirToRuntimeTranslator::EmitAttribute( +paddle::optional MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; if (attr.isa()) { auto val = attr.cast(); if (val.getType().isInteger(32)) { return val.getValue().getSExtValue(); } } - return boost::none; + return paddle::none; } template <> -boost::optional MlirToRuntimeTranslator::EmitAttribute( +paddle::optional MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; if (attr.isa()) { auto val = attr.cast(); if (val.getType().isInteger(64)) { return val.getValue().getSExtValue(); } } - return boost::none; + return paddle::none; } // TODO(Superjomn) Make double and float parsing share some thing. template <> -boost::optional MlirToRuntimeTranslator::EmitAttribute( +paddle::optional MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; if (attr.isa()) { auto val = attr.cast(); if (val.getType().isF32()) return val.getValueAsDouble(); } - return boost::none; + return paddle::none; } template <> -boost::optional MlirToRuntimeTranslator::EmitAttribute( +paddle::optional MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; if (attr.isa()) { auto val = attr.cast(); return val.getValue(); } - return boost::none; + return paddle::none; } template <> -boost::optional MlirToRuntimeTranslator::EmitAttribute( +paddle::optional MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; if (attr.isa()) { auto val = attr.cast(); if (val.getType().isF64()) return val.getValueAsDouble(); } - return boost::none; + return paddle::none; } template <> -boost::optional<::infrt::TargetType> MlirToRuntimeTranslator::EmitAttribute( +paddle::optional<::infrt::TargetType> MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa<::infrt::TargetAttr>()) return boost::none; + if (!attr.isa<::infrt::TargetAttr>()) return paddle::none; if (attr.isa<::infrt::TargetAttr>()) { return attr.cast<::infrt::TargetAttr>().getTarget(); } - return boost::none; + return paddle::none; } template <> -boost::optional<::infrt::LayoutType> MlirToRuntimeTranslator::EmitAttribute( +paddle::optional<::infrt::LayoutType> MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa<::infrt::LayoutAttr>()) return boost::none; + if (!attr.isa<::infrt::LayoutAttr>()) return paddle::none; if (attr.isa<::infrt::LayoutAttr>()) { return attr.cast<::infrt::LayoutAttr>().getLayout(); } - return boost::none; + return paddle::none; } template <> -boost::optional<::infrt::PrecisionType> MlirToRuntimeTranslator::EmitAttribute( +paddle::optional<::infrt::PrecisionType> MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa<::infrt::PrecisionAttr>()) return boost::none; + if (!attr.isa<::infrt::PrecisionAttr>()) return paddle::none; if (attr.isa<::infrt::PrecisionAttr>()) { return attr.cast<::infrt::PrecisionAttr>().getPrecision(); } - return boost::none; + return paddle::none; } template <> -boost::optional MlirToRuntimeTranslator::EmitAttribute( +paddle::optional MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; return attr.cast().getValue().str(); } -#define PROCESS_ARRAY_INT(type__, bits__) \ - template <> \ - boost::optional> MlirToRuntimeTranslator::EmitAttribute( \ - const mlir::Attribute& attr) { \ - if (!attr.isa()) return boost::none; \ - auto array = attr.cast(); \ - CHECK(!array.empty()); \ - \ - if (!array[0].getType().isInteger(bits__)) { \ - return boost::none; \ - } \ - \ - std::vector res; \ - for (auto& v : array) { \ - res.push_back(v.cast().getValue().getSExtValue()); \ - } \ - return res; \ +#define PROCESS_ARRAY_INT(type__, bits__) \ + template <> \ + paddle::optional> \ + MlirToRuntimeTranslator::EmitAttribute(const mlir::Attribute& attr) { \ + if (!attr.isa()) return paddle::none; \ + auto array = attr.cast(); \ + CHECK(!array.empty()); \ + \ + if (!array[0].getType().isInteger(bits__)) { \ + return paddle::none; \ + } \ + \ + std::vector res; \ + for (auto& v : array) { \ + res.push_back(v.cast().getValue().getSExtValue()); \ + } \ + return res; \ } PROCESS_ARRAY_INT(bool, 1); @@ -244,13 +243,13 @@ PROCESS_ARRAY_INT(int32_t, 32); PROCESS_ARRAY_INT(int64_t, 64); template <> -boost::optional> MlirToRuntimeTranslator::EmitAttribute( +paddle::optional> MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; auto array = attr.cast(); CHECK(!array.empty()); - if (!array[0].getType().isF32()) return boost::none; + if (!array[0].getType().isF32()) return paddle::none; std::vector res; for (auto& v : array) { @@ -260,13 +259,13 @@ boost::optional> MlirToRuntimeTranslator::EmitAttribute( } template <> -boost::optional> MlirToRuntimeTranslator::EmitAttribute( +paddle::optional> MlirToRuntimeTranslator::EmitAttribute( const mlir::Attribute& attr) { - if (!attr.isa()) return boost::none; + if (!attr.isa()) return paddle::none; auto array = attr.cast(); CHECK(!array.empty()); - if (!array[0].getType().isF64()) return boost::none; + if (!array[0].getType().isF64()) return paddle::none; std::vector res; for (auto& v : array) { diff --git a/paddle/infrt/host_context/mlir_to_runtime_translate.h b/paddle/infrt/host_context/mlir_to_runtime_translate.h index 27a7f201686..64dc770489c 100644 --- a/paddle/infrt/host_context/mlir_to_runtime_translate.h +++ b/paddle/infrt/host_context/mlir_to_runtime_translate.h @@ -75,7 +75,7 @@ class MlirToRuntimeTranslator { bool EmitCallOp(mlir::Operation* op, function_defs_t* function_table); template - boost::optional EmitAttribute(const mlir::Attribute& attr); + paddle::optional EmitAttribute(const mlir::Attribute& attr); Value* GetOpResult(mlir::Operation* op); -- GitLab