diff --git a/paddle/fluid/framework/dlpack_tensor.cc b/paddle/fluid/framework/dlpack_tensor.cc index 057a19f31759b47f61b7a17a71920183f06ec18d..b7bca733b8f9e63bd5bc88ecced6b55c2078e886 100644 --- a/paddle/fluid/framework/dlpack_tensor.cc +++ b/paddle/fluid/framework/dlpack_tensor.cc @@ -69,7 +69,8 @@ static DLDataType GetDLDataTypeFromTypeIndex(proto::VarType::Type type) { #undef REG_DL_DATA_TYPE } -struct DLDeviceVisitor : public boost::static_visitor<::DLDevice> { +struct DLDeviceVisitor + : public std::unary_function { inline ::DLDevice operator()(const platform::CPUPlace &place) const { ::DLDevice device; device.device_type = kDLCPU; diff --git a/paddle/fluid/framework/ir/generate_pass.cc b/paddle/fluid/framework/ir/generate_pass.cc index 160304784a9fa69d3248f773cf48a90470ffd3f2..455af83427819afaf90c1d7f387e4856be7930a6 100644 --- a/paddle/fluid/framework/ir/generate_pass.cc +++ b/paddle/fluid/framework/ir/generate_pass.cc @@ -21,7 +21,7 @@ namespace paddle { namespace framework { namespace ir { -class element_visitor : public boost::static_visitor { +class element_visitor { public: explicit element_visitor(int index) : index_(index) {} @@ -47,7 +47,7 @@ class element_visitor : public boost::static_visitor { int index_; }; -class operation_visitor : public boost::static_visitor { +class operation_visitor { public: explicit operation_visitor(const proto::PassDesc::OperationType& type) : type_(type) {} diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 169722b971b29fc97b41040b69810dc9c7bb0e37..c0a9528c28126ff3f8abc50066ca28640f5354e5 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -764,7 +764,7 @@ void OpDesc::RenameInput(const std::string &old_name, need_update_ = true; } -struct SetAttrDescVisitor : public boost::static_visitor { +struct SetAttrDescVisitor { explicit SetAttrDescVisitor(proto::OpDesc::Attr *attr) : attr_(attr) {} mutable proto::OpDesc::Attr *attr_; void operator()(int v) const { attr_->set_i(v); } diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index dd80458b624c6c8857515252f638fc7b1725788d..dbb549efa2519d48136648ba0cfbba2c4d82cc9a 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -689,7 +689,7 @@ inline void AnyImpl(Predicate predicate, } template -class AnyVisitor : public boost::static_visitor { +class AnyVisitor : public std::unary_function { private: const framework::Tensor& tensor_; Predicate predicate_; @@ -774,7 +774,7 @@ class AnyVisitor : public boost::static_visitor { }; template -class AnyOutVisitor : public boost::static_visitor<> { +class AnyOutVisitor : public std::unary_function { private: const framework::Tensor& tensor_; mutable framework::Tensor* out_; @@ -843,7 +843,7 @@ inline void AllImpl(Predicate predicate, } template -class AllOutVisitor : public boost::static_visitor<> { +class AllOutVisitor : public std::unary_function { private: const framework::Tensor& tensor_; mutable framework::Tensor* out_; @@ -942,7 +942,7 @@ static inline void __global__ BothFalse(const T* cmp, T* out, int element_num) { } #endif -struct BothFalseVisitor : public boost::static_visitor<> { +struct BothFalseVisitor : public std::unary_function { const framework::Tensor& in_; mutable framework::Tensor* out_; BothFalseVisitor(const framework::Tensor& in, framework::Tensor* out) diff --git a/paddle/fluid/imperative/gradient_accumulator.cc b/paddle/fluid/imperative/gradient_accumulator.cc index 4a8fc6a5d546ce150f1b46de7885e1a9f250dd91..f6883fe6c6a923429235874ab788e7bd2f224c1f 100644 --- a/paddle/fluid/imperative/gradient_accumulator.cc +++ b/paddle/fluid/imperative/gradient_accumulator.cc @@ -79,7 +79,8 @@ static void MoveOrCopyVar(framework::Variable* dst, } template -class TensorAddFunctor : public boost::static_visitor<> { +class TensorAddFunctor + : public std::unary_function { public: TensorAddFunctor(int64_t numel, const T* x, T* y) : numel_(numel), x_(x), y_(y) {} diff --git a/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc b/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc index 9d5f048a1651db0b16c05cafca79afddbcb9a1a0..57c5941d5227d9943568ec4ac8f387bfb1666889 100644 --- a/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc +++ b/paddle/fluid/memory/allocation/naive_best_fit_allocator.cc @@ -59,7 +59,7 @@ uint64_t Release(const Place &place); template size_t Used(const Place &place); -struct Usage : public boost::static_visitor { +struct Usage { size_t operator()(const platform::CPUPlace &cpu) const; size_t operator()(const platform::CUDAPlace &gpu) const; size_t operator()(const platform::CUDAPinnedPlace &cuda_pinned) const; @@ -894,7 +894,7 @@ size_t Used(const platform::CustomPlace &place) { #endif } -struct AllocVisitor : public boost::static_visitor { +struct AllocVisitor : std::unary_function { inline explicit AllocVisitor(size_t size) : size_(size) {} template @@ -906,7 +906,7 @@ struct AllocVisitor : public boost::static_visitor { size_t size_; }; -struct FreeVisitor : public boost::static_visitor { +struct FreeVisitor : public std::unary_function { inline explicit FreeVisitor(void *ptr, size_t size) : ptr_(ptr), size_(size) {} @@ -920,7 +920,7 @@ struct FreeVisitor : public boost::static_visitor { size_t size_; }; -struct ReleaseVisitor : public boost::static_visitor { +struct ReleaseVisitor : std::unary_function { template inline uint64_t operator()(const Place &place) const { return Release(place); diff --git a/paddle/fluid/operators/array_to_lod_tensor_op.cc b/paddle/fluid/operators/array_to_lod_tensor_op.cc index 5b23ff604759acb302144c6506e6404e371051e6..a2af64e2276801a05c114c7b7c6477949d781df8 100644 --- a/paddle/fluid/operators/array_to_lod_tensor_op.cc +++ b/paddle/fluid/operators/array_to_lod_tensor_op.cc @@ -43,7 +43,7 @@ struct ArrayToLoDFunctorImpl { void apply(); }; -struct ArrayToLoDFunctor : public boost::static_visitor { +struct ArrayToLoDFunctor : public std::unary_function { std::vector in; mutable framework::Tensor *out; diff --git a/paddle/fluid/operators/controlflow/feed_op.cc b/paddle/fluid/operators/controlflow/feed_op.cc index 00806d18c066fee1106cec10b8426c0b8b8eb28b..4cef104496510f4c75d2857cadc1538eb10476cc 100644 --- a/paddle/fluid/operators/controlflow/feed_op.cc +++ b/paddle/fluid/operators/controlflow/feed_op.cc @@ -29,7 +29,7 @@ namespace operators { // FeedVariableVisitor is to feed the variable data // according to data type (LoDTensor or Strings). -class FeedVariableVisitor : public boost::static_visitor { +class FeedVariableVisitor { public: explicit FeedVariableVisitor(framework::Variable *out_var, const platform::Place &place) diff --git a/paddle/fluid/operators/controlflow/op_variant.cc b/paddle/fluid/operators/controlflow/op_variant.cc index 60f58955adbedaa0a01fb7e698e173bd48bf2068..48b7a4341067284a4c48e2c9e89c26e077d49570 100644 --- a/paddle/fluid/operators/controlflow/op_variant.cc +++ b/paddle/fluid/operators/controlflow/op_variant.cc @@ -17,24 +17,21 @@ namespace paddle { namespace operators { -struct InputsVisitor - : public boost::static_visitor { +struct InputsVisitor { template const framework::VariableNameMap *operator()(const OpType *op) const { return &(op->Inputs()); } }; -struct OutputsVisitor - : public boost::static_visitor { +struct OutputsVisitor { template const framework::VariableNameMap *operator()(const OpType *op) const { return &(op->Outputs()); } }; -struct AttributeMapVisitor - : public boost::static_visitor { +struct AttributeMapVisitor { const framework::AttributeMap *operator()(const framework::OpDesc *op) const { return &(op->GetAttrMap()); } @@ -45,7 +42,7 @@ struct AttributeMapVisitor } }; -struct RawPointerVisitor : public boost::static_visitor { +struct RawPointerVisitor { template const void *operator()(const OpType *op) const { return op; diff --git a/paddle/fluid/operators/lod_tensor_to_array_op.cc b/paddle/fluid/operators/lod_tensor_to_array_op.cc index 147b23f56acdcfca0c3daf265a3121a6fbbe42e7..d4b36f31e6201f8e52eb388481eb80a3dfcbfa46 100644 --- a/paddle/fluid/operators/lod_tensor_to_array_op.cc +++ b/paddle/fluid/operators/lod_tensor_to_array_op.cc @@ -44,7 +44,8 @@ struct LoDTensorToArrayFunctorImpl { void apply(); }; -struct LoDTensorToArrayFunctor : public boost::static_visitor { +struct LoDTensorToArrayFunctor + : public std::unary_function { std::vector ref_inputs_; mutable std::vector outputs_; const framework::Tensor &input_; diff --git a/paddle/fluid/operators/math/matrix_bit_code.cc b/paddle/fluid/operators/math/matrix_bit_code.cc index 8a6f098baefd9539425fa4882a95a34b72c92728..0648f2497d9d75c4771014ec285718263c3f96cb 100644 --- a/paddle/fluid/operators/math/matrix_bit_code.cc +++ b/paddle/fluid/operators/math/matrix_bit_code.cc @@ -19,7 +19,7 @@ namespace operators { namespace math { template -struct MatrixBitCodeFunctorAdd : public boost::static_visitor { +struct MatrixBitCodeFunctorAdd { const framework::Tensor &vec_; framework::Tensor *tmat_; @@ -51,7 +51,7 @@ void MatrixBitCodeFunctor::Add(const framework::Tensor &vec, } template -struct MatrixBitCodeFunctorAddGrad : public boost::static_visitor { +struct MatrixBitCodeFunctorAddGrad { const framework::Tensor &tmat_; framework::Tensor *vec_; MatrixBitCodeFunctorAddGrad(const framework::Tensor &tmat, @@ -83,7 +83,7 @@ void MatrixBitCodeFunctor::AddGrad(const framework::Tensor &tmat, } template -struct MatrixBitCodeFunctorSum : public boost::static_visitor { +struct MatrixBitCodeFunctorSum { const framework::Tensor &tmat_; framework::Tensor *sum_; T scale_sum_; @@ -125,7 +125,7 @@ void MatrixBitCodeFunctor::Sum(const framework::Tensor &tmat, } template -struct MatrixBitCodeFunctorMul : public boost::static_visitor { +struct MatrixBitCodeFunctorMul { framework::Tensor *tmat_; const framework::Tensor &weight_; const framework::Tensor &input_; @@ -174,7 +174,7 @@ class ReservedVector : public std::vector { }; template -struct MatrixBitCodeFunctorMulGradWeight : public boost::static_visitor { +struct MatrixBitCodeFunctorMulGradWeight { const framework::Tensor &tmat_; framework::Tensor *weight_; const framework::Tensor &input_; @@ -224,8 +224,7 @@ void MatrixBitCodeFunctor::MulGradWeight(const framework::Tensor &tmat, } template -struct MatrixBitCodeFunctorMulGradWeightSR - : public boost::static_visitor { +struct MatrixBitCodeFunctorMulGradWeightSR { const framework::Tensor &tmat_; phi::SelectedRows *weight_; const framework::Tensor &input_; @@ -280,7 +279,7 @@ void MatrixBitCodeFunctor::MulGradWeight(const framework::Tensor &tmat, } template -struct MatrixBitCodeFunctorMulGradError : public boost::static_visitor { +struct MatrixBitCodeFunctorMulGradError { const framework::Tensor &tmat_; const framework::Tensor &weight_; framework::Tensor *input_; @@ -324,7 +323,7 @@ void MatrixBitCodeFunctor::MulGradError(const framework::Tensor &tmat, } template -struct MatrixBitCodeFunctorSub : public boost::static_visitor { +struct MatrixBitCodeFunctorSub { framework::Tensor *tmat_; explicit MatrixBitCodeFunctorSub(framework::Tensor *tmat) : tmat_(tmat) {} diff --git a/paddle/fluid/platform/device/ipu/ipu_compiler.cc b/paddle/fluid/platform/device/ipu/ipu_compiler.cc index 39ff4601b6749e021e5776dc7ef4982fb8e75542..330ddef577ef20f85805311d2640fe5ada1d0145 100644 --- a/paddle/fluid/platform/device/ipu/ipu_compiler.cc +++ b/paddle/fluid/platform/device/ipu/ipu_compiler.cc @@ -32,7 +32,7 @@ namespace ipu { namespace { -struct CustomOpAttrVisitor : public boost::static_visitor { +struct CustomOpAttrVisitor { CustomOpAttrVisitor(std::map* attr, const std::string& attr_name) : attrs_(attr), attr_name_(attr_name) {} @@ -82,7 +82,7 @@ struct CustomOpAttrVisitor : public boost::static_visitor { } }; -struct ConstantOpAttrVisitor : public boost::static_visitor { +struct ConstantOpAttrVisitor { ConstantOpAttrVisitor(framework::LoDTensor* tensor, VarType::Type dtype) : tensor_(tensor), dtype_(dtype) {} diff --git a/paddle/fluid/platform/variant.h b/paddle/fluid/platform/variant.h index fb4772abd30621087bf45acecce78de476b3e85e..9682749898fc77af54d81c1dccdac3b474dcd9ca 100644 --- a/paddle/fluid/platform/variant.h +++ b/paddle/fluid/platform/variant.h @@ -40,7 +40,6 @@ limitations under the License. */ #include #include -#include #include "paddle/utils/any.h" #include "paddle/utils/optional.h" diff --git a/paddle/fluid/pybind/global_value_getter_setter.cc b/paddle/fluid/pybind/global_value_getter_setter.cc index 2871d1de56780f70777f43fa730cbdc34843ec77..b2a52e568aed9fa317b0592ac0b3f8f6b71ab5af 100644 --- a/paddle/fluid/pybind/global_value_getter_setter.cc +++ b/paddle/fluid/pybind/global_value_getter_setter.cc @@ -217,7 +217,7 @@ void BindGlobalValueGetterSetter(pybind11::module *module) { GlobalVarGetterSetterRegistry::CreateSetter(&var)); \ } while (0) -struct RegisterGetterSetterVisitor : public boost::static_visitor { +struct RegisterGetterSetterVisitor { RegisterGetterSetterVisitor(const std::string &name, bool is_writable, void *value_ptr) diff --git a/paddle/fluid/pybind/pybind_boost_headers.h b/paddle/fluid/pybind/pybind_boost_headers.h index 623ec84acda6f67409b50c565fa0abea1fac8d17..2a25990944d141c3be962b6f901219ed0b177cba 100644 --- a/paddle/fluid/pybind/pybind_boost_headers.h +++ b/paddle/fluid/pybind/pybind_boost_headers.h @@ -18,14 +18,12 @@ limitations under the License. */ #include #include "glog/logging.h" -#include "paddle/fluid/platform/variant.h" #include "paddle/utils/variant.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" // Cast paddle::variant for PyBind. // Copy from - // https://github.com/pybind/pybind11/issues/576#issuecomment-269563199 namespace pybind11 { namespace detail { @@ -78,10 +76,7 @@ struct paddle_variant_caster> { using Type = V; template - typename std::enable_if< - !std::is_same::value, - bool>::type - try_load(handle src, bool convert) { + bool try_load(handle src, bool convert) { auto caster = make_caster(); if (!load_success_ && caster.load(src, convert)) { load_success_ = true; @@ -112,13 +107,6 @@ struct paddle_variant_caster> { return false; } - template - typename std::enable_if::value, - bool>::type - try_load(handle src, bool convert) { - return false; - } - bool load(handle src, bool convert) { auto unused = {false, try_load(src, convert)...}; (void)(unused); @@ -128,11 +116,6 @@ struct paddle_variant_caster> { static handle cast(Type const& src, return_value_policy policy, handle parent) { - /* - auto paddle_variant_caster_visitor = [&](Type const& src)->handle { - return make_caster::cast(src, policy, parent); - } - */ paddle_variant_caster_visitor visitor(policy, parent); return paddle::visit(visitor, src); } diff --git a/paddle/phi/kernels/funcs/math_function.cc b/paddle/phi/kernels/funcs/math_function.cc index 61cedb57faf2721b7332065272251ba6e1e18ffd..15a708f02f4974c0d8a5e3e485276c789ff37f41 100644 --- a/paddle/phi/kernels/funcs/math_function.cc +++ b/paddle/phi/kernels/funcs/math_function.cc @@ -214,7 +214,8 @@ void set_constant_with_place( phi::VisitDataType(tensor->dtype(), TensorSetConstantCPU(tensor, value)); } -struct TensorSetConstantWithPlace : public boost::static_visitor { +struct TensorSetConstantWithPlace + : public std::unary_function { TensorSetConstantWithPlace(const paddle::platform::DeviceContext& context, paddle::framework::Tensor* tensor, float value)