未验证 提交 957258d9 编写于 作者: R Ruibiao Chen 提交者: GitHub

Remove boost::optional and boost::none (#44029)

上级 f1e61f04
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "boost/optional.hpp"
#include "paddle/fluid/framework/data_layout_transform.h" #include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/memory/malloc.h"
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "boost/optional.hpp"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/math/concat_and_split.h"
......
...@@ -20,7 +20,6 @@ limitations under the License. */ ...@@ -20,7 +20,6 @@ limitations under the License. */
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "boost/optional.hpp"
#include "paddle/fluid/framework/data_layout_transform.h" #include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/pool_op.h" #include "paddle/fluid/operators/pool_op.h"
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <vector> #include <vector>
#include "Python.h" #include "Python.h"
#include "boost/optional.hpp"
#include "gflags/gflags.h" #include "gflags/gflags.h"
#include "paddle/fluid/framework/reader.h" #include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/imperative/layer.h" #include "paddle/fluid/imperative/layer.h"
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "boost/optional.hpp"
#include "paddle/infrt/common/string.h" #include "paddle/infrt/common/string.h"
#include "paddle/infrt/dialect/dense_tensor.h" #include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/mlir_loader.h" #include "paddle/infrt/dialect/mlir_loader.h"
...@@ -124,118 +123,118 @@ bool MlirToRuntimeTranslator::EmitConstantOp(mlir::Operation* op) { ...@@ -124,118 +123,118 @@ bool MlirToRuntimeTranslator::EmitConstantOp(mlir::Operation* op) {
} }
template <> template <>
boost::optional<int32_t> MlirToRuntimeTranslator::EmitAttribute( paddle::optional<int32_t> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) { const mlir::Attribute& attr) {
if (!attr.isa<mlir::IntegerAttr>()) return boost::none; if (!attr.isa<mlir::IntegerAttr>()) return paddle::none;
if (attr.isa<mlir::IntegerAttr>()) { if (attr.isa<mlir::IntegerAttr>()) {
auto val = attr.cast<mlir::IntegerAttr>(); auto val = attr.cast<mlir::IntegerAttr>();
if (val.getType().isInteger(32)) { if (val.getType().isInteger(32)) {
return val.getValue().getSExtValue(); return val.getValue().getSExtValue();
} }
} }
return boost::none; return paddle::none;
} }
template <> template <>
boost::optional<int64_t> MlirToRuntimeTranslator::EmitAttribute( paddle::optional<int64_t> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) { const mlir::Attribute& attr) {
if (!attr.isa<mlir::IntegerAttr>()) return boost::none; if (!attr.isa<mlir::IntegerAttr>()) return paddle::none;
if (attr.isa<mlir::IntegerAttr>()) { if (attr.isa<mlir::IntegerAttr>()) {
auto val = attr.cast<mlir::IntegerAttr>(); auto val = attr.cast<mlir::IntegerAttr>();
if (val.getType().isInteger(64)) { if (val.getType().isInteger(64)) {
return val.getValue().getSExtValue(); return val.getValue().getSExtValue();
} }
} }
return boost::none; return paddle::none;
} }
// TODO(Superjomn) Make double and float parsing share some thing. // TODO(Superjomn) Make double and float parsing share some thing.
template <> template <>
boost::optional<float> MlirToRuntimeTranslator::EmitAttribute( paddle::optional<float> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) { const mlir::Attribute& attr) {
if (!attr.isa<mlir::FloatAttr>()) return boost::none; if (!attr.isa<mlir::FloatAttr>()) return paddle::none;
if (attr.isa<mlir::FloatAttr>()) { if (attr.isa<mlir::FloatAttr>()) {
auto val = attr.cast<mlir::FloatAttr>(); auto val = attr.cast<mlir::FloatAttr>();
if (val.getType().isF32()) return val.getValueAsDouble(); if (val.getType().isF32()) return val.getValueAsDouble();
} }
return boost::none; return paddle::none;
} }
template <> template <>
boost::optional<bool> MlirToRuntimeTranslator::EmitAttribute( paddle::optional<bool> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) { const mlir::Attribute& attr) {
if (!attr.isa<mlir::BoolAttr>()) return boost::none; if (!attr.isa<mlir::BoolAttr>()) return paddle::none;
if (attr.isa<mlir::BoolAttr>()) { if (attr.isa<mlir::BoolAttr>()) {
auto val = attr.cast<mlir::BoolAttr>(); auto val = attr.cast<mlir::BoolAttr>();
return val.getValue(); return val.getValue();
} }
return boost::none; return paddle::none;
} }
template <> template <>
boost::optional<double> MlirToRuntimeTranslator::EmitAttribute( paddle::optional<double> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) { const mlir::Attribute& attr) {
if (!attr.isa<mlir::FloatAttr>()) return boost::none; if (!attr.isa<mlir::FloatAttr>()) return paddle::none;
if (attr.isa<mlir::FloatAttr>()) { if (attr.isa<mlir::FloatAttr>()) {
auto val = attr.cast<mlir::FloatAttr>(); auto val = attr.cast<mlir::FloatAttr>();
if (val.getType().isF64()) return val.getValueAsDouble(); if (val.getType().isF64()) return val.getValueAsDouble();
} }
return boost::none; return paddle::none;
} }
template <> template <>
boost::optional<::infrt::TargetType> MlirToRuntimeTranslator::EmitAttribute( paddle::optional<::infrt::TargetType> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) { const mlir::Attribute& attr) {
if (!attr.isa<::infrt::TargetAttr>()) return boost::none; if (!attr.isa<::infrt::TargetAttr>()) return paddle::none;
if (attr.isa<::infrt::TargetAttr>()) { if (attr.isa<::infrt::TargetAttr>()) {
return attr.cast<::infrt::TargetAttr>().getTarget(); return attr.cast<::infrt::TargetAttr>().getTarget();
} }
return boost::none; return paddle::none;
} }
template <> template <>
boost::optional<::infrt::LayoutType> MlirToRuntimeTranslator::EmitAttribute( paddle::optional<::infrt::LayoutType> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) { const mlir::Attribute& attr) {
if (!attr.isa<::infrt::LayoutAttr>()) return boost::none; if (!attr.isa<::infrt::LayoutAttr>()) return paddle::none;
if (attr.isa<::infrt::LayoutAttr>()) { if (attr.isa<::infrt::LayoutAttr>()) {
return attr.cast<::infrt::LayoutAttr>().getLayout(); return attr.cast<::infrt::LayoutAttr>().getLayout();
} }
return boost::none; return paddle::none;
} }
template <> template <>
boost::optional<::infrt::PrecisionType> MlirToRuntimeTranslator::EmitAttribute( paddle::optional<::infrt::PrecisionType> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) { const mlir::Attribute& attr) {
if (!attr.isa<::infrt::PrecisionAttr>()) return boost::none; if (!attr.isa<::infrt::PrecisionAttr>()) return paddle::none;
if (attr.isa<::infrt::PrecisionAttr>()) { if (attr.isa<::infrt::PrecisionAttr>()) {
return attr.cast<::infrt::PrecisionAttr>().getPrecision(); return attr.cast<::infrt::PrecisionAttr>().getPrecision();
} }
return boost::none; return paddle::none;
} }
template <> template <>
boost::optional<std::string> MlirToRuntimeTranslator::EmitAttribute( paddle::optional<std::string> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) { const mlir::Attribute& attr) {
if (!attr.isa<mlir::StringAttr>()) return boost::none; if (!attr.isa<mlir::StringAttr>()) return paddle::none;
return attr.cast<mlir::StringAttr>().getValue().str(); return attr.cast<mlir::StringAttr>().getValue().str();
} }
#define PROCESS_ARRAY_INT(type__, bits__) \ #define PROCESS_ARRAY_INT(type__, bits__) \
template <> \ template <> \
boost::optional<std::vector<type__>> MlirToRuntimeTranslator::EmitAttribute( \ paddle::optional<std::vector<type__>> \
const mlir::Attribute& attr) { \ MlirToRuntimeTranslator::EmitAttribute(const mlir::Attribute& attr) { \
if (!attr.isa<mlir::ArrayAttr>()) return boost::none; \ if (!attr.isa<mlir::ArrayAttr>()) return paddle::none; \
auto array = attr.cast<mlir::ArrayAttr>(); \ auto array = attr.cast<mlir::ArrayAttr>(); \
CHECK(!array.empty()); \ CHECK(!array.empty()); \
\ \
if (!array[0].getType().isInteger(bits__)) { \ if (!array[0].getType().isInteger(bits__)) { \
return boost::none; \ return paddle::none; \
} \ } \
\ \
std::vector<type__> res; \ std::vector<type__> res; \
for (auto& v : array) { \ for (auto& v : array) { \
res.push_back(v.cast<mlir::IntegerAttr>().getValue().getSExtValue()); \ res.push_back(v.cast<mlir::IntegerAttr>().getValue().getSExtValue()); \
} \ } \
return res; \ return res; \
} }
PROCESS_ARRAY_INT(bool, 1); PROCESS_ARRAY_INT(bool, 1);
...@@ -244,13 +243,13 @@ PROCESS_ARRAY_INT(int32_t, 32); ...@@ -244,13 +243,13 @@ PROCESS_ARRAY_INT(int32_t, 32);
PROCESS_ARRAY_INT(int64_t, 64); PROCESS_ARRAY_INT(int64_t, 64);
template <> template <>
boost::optional<std::vector<float>> MlirToRuntimeTranslator::EmitAttribute( paddle::optional<std::vector<float>> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) { const mlir::Attribute& attr) {
if (!attr.isa<mlir::ArrayAttr>()) return boost::none; if (!attr.isa<mlir::ArrayAttr>()) return paddle::none;
auto array = attr.cast<mlir::ArrayAttr>(); auto array = attr.cast<mlir::ArrayAttr>();
CHECK(!array.empty()); CHECK(!array.empty());
if (!array[0].getType().isF32()) return boost::none; if (!array[0].getType().isF32()) return paddle::none;
std::vector<float> res; std::vector<float> res;
for (auto& v : array) { for (auto& v : array) {
...@@ -260,13 +259,13 @@ boost::optional<std::vector<float>> MlirToRuntimeTranslator::EmitAttribute( ...@@ -260,13 +259,13 @@ boost::optional<std::vector<float>> MlirToRuntimeTranslator::EmitAttribute(
} }
template <> template <>
boost::optional<std::vector<double>> MlirToRuntimeTranslator::EmitAttribute( paddle::optional<std::vector<double>> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) { const mlir::Attribute& attr) {
if (!attr.isa<mlir::ArrayAttr>()) return boost::none; if (!attr.isa<mlir::ArrayAttr>()) return paddle::none;
auto array = attr.cast<mlir::ArrayAttr>(); auto array = attr.cast<mlir::ArrayAttr>();
CHECK(!array.empty()); CHECK(!array.empty());
if (!array[0].getType().isF64()) return boost::none; if (!array[0].getType().isF64()) return paddle::none;
std::vector<double> res; std::vector<double> res;
for (auto& v : array) { for (auto& v : array) {
......
...@@ -75,7 +75,7 @@ class MlirToRuntimeTranslator { ...@@ -75,7 +75,7 @@ class MlirToRuntimeTranslator {
bool EmitCallOp(mlir::Operation* op, function_defs_t* function_table); bool EmitCallOp(mlir::Operation* op, function_defs_t* function_table);
template <typename T> template <typename T>
boost::optional<T> EmitAttribute(const mlir::Attribute& attr); paddle::optional<T> EmitAttribute(const mlir::Attribute& attr);
Value* GetOpResult(mlir::Operation* op); Value* GetOpResult(mlir::Operation* op);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册