diff --git a/paddle/fluid/distributed/collective/HCCLTools.h b/paddle/fluid/distributed/collective/HCCLTools.h index 5f21555e0d94f54b12d23dc100f78e3afa03313a..6eb169d8fff0501d2d31e60030128e2527bb8c71 100644 --- a/paddle/fluid/distributed/collective/HCCLTools.h +++ b/paddle/fluid/distributed/collective/HCCLTools.h @@ -18,7 +18,6 @@ #include -#include "boost/variant.hpp" #include "paddle/fluid/distributed/collective/Types.h" #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/variable.h" @@ -27,6 +26,7 @@ #include "paddle/fluid/platform/device/npu/npu_info.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" +#include "paddle/utils/variant.h" namespace paddle { namespace distributed { diff --git a/paddle/fluid/distributed/collective/NCCLTools.h b/paddle/fluid/distributed/collective/NCCLTools.h index ce3cf43f242d598572b168a368c92ecd52327882..197761dc3c3234de53ed902676f95a1ed00f0238 100644 --- a/paddle/fluid/distributed/collective/NCCLTools.h +++ b/paddle/fluid/distributed/collective/NCCLTools.h @@ -25,7 +25,6 @@ #include -#include "boost/variant.hpp" #include "paddle/fluid/distributed/collective/Types.h" #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/variable.h" @@ -43,6 +42,7 @@ #endif #include "paddle/fluid/platform/enforce.h" +#include "paddle/utils/variant.h" namespace paddle { namespace distributed { diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index 3379ed4e1633fdb970626105d3f882cebdfb3376..6910f9e537fc8a83cb16f5a866fa388ad8edd211 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -336,7 +336,7 @@ static std::string AttrTypeToString(const proto::AttrType& type) { } default: { PADDLE_THROW(platform::errors::Fatal( - "AttrType of type boost::variant only supports specific data types." + "AttrType of type paddle::variant only supports specific data types." "However, detected unrecognized AttrType: %d", type)); } @@ -344,37 +344,39 @@ static std::string AttrTypeToString(const proto::AttrType& type) { return ret; } -template -static std::string GetAttrValue(const framework::Attribute& attr, - bool is_vector) { +template +static typename std::enable_if::type GetAttrValue( + const framework::Attribute& attr) { std::string val = ""; - if (is_vector) { - val += "{"; - for (auto x : BOOST_GET_CONST(std::vector, attr)) { - val += std::to_string(x) + ","; - } - if (val.size() > 1) val.pop_back(); - val += "}"; - } else { - val = std::to_string(BOOST_GET_CONST(T, attr)); + val += "{"; + for (auto x : BOOST_GET_CONST(std::vector, attr)) { + val += std::to_string(x) + ","; } + if (val.size() > 1) val.pop_back(); + val += "}"; return val; } +template +static typename std::enable_if::type GetAttrValue( + const framework::Attribute& attr) { + return std::to_string(BOOST_GET_CONST(T, attr)); +} + static std::pair GetAttrType( const framework::Attribute& attr, bool is_arg) { std::string ret = ""; std::string val = ""; - size_t variant_pos = attr.which(); + size_t variant_pos = attr.index(); switch (variant_pos) { case (1): { ret = "int"; - val = GetAttrValue(attr, false); + val = GetAttrValue(attr); break; } case (2): { ret = "float"; - val = GetAttrValue(attr, false); + val = GetAttrValue(attr); break; } case (3): { @@ -386,13 +388,13 @@ static std::pair GetAttrType( case (4): { ret = "std::vector"; if (is_arg) ret += "&"; - val = GetAttrValue(attr, true); + val = GetAttrValue(attr); break; } case (5): { ret = "std::vector"; if (is_arg) ret += "&"; - val = GetAttrValue(attr, true); + val = GetAttrValue(attr); break; } case (6): { @@ -408,13 +410,13 @@ static std::pair GetAttrType( } case (7): { ret = "bool"; - val = GetAttrValue(attr, false); + val = GetAttrValue(attr); break; } case (8): { ret = "std::vector"; if (is_arg) ret += "&"; - val = GetAttrValue(attr, true); + val = GetAttrValue(attr); break; } case (9): { @@ -423,7 +425,7 @@ static std::pair GetAttrType( } case (10): { ret = "int64_t"; - val = GetAttrValue(attr, false); + val = GetAttrValue(attr); break; } case (11): { @@ -434,18 +436,18 @@ static std::pair GetAttrType( case (12): { ret = "std::vector"; if (is_arg) ret += "&"; - val = GetAttrValue(attr, true); + val = GetAttrValue(attr); break; } case (13): { ret = "std::vector"; if (is_arg) ret += "&"; - val = GetAttrValue(attr, true); + val = GetAttrValue(attr); break; } default: { PADDLE_THROW(platform::errors::Fatal( - "AttrType of type boost::variant only supports specific data types." + "AttrType of type paddle::variant only supports specific data types." "However, detected unrecognized AttrType: %d", variant_pos)); } diff --git a/paddle/fluid/framework/attribute.cc b/paddle/fluid/framework/attribute.cc index 2599e3232cac7657429a47c226e74a9f9425bb4c..ed50d5f6bfc4fc6843b1e92d92f24d8b7a182036 100644 --- a/paddle/fluid/framework/attribute.cc +++ b/paddle/fluid/framework/attribute.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/attribute.h" +#include "boost/blank.hpp" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/attribute.h b/paddle/fluid/framework/attribute.h index 90fa49699ac6024b70139ccac1f29b3b3b4769d5..f9cd5c7383662270acd58b8c32f7f328ed24acc9 100644 --- a/paddle/fluid/framework/attribute.h +++ b/paddle/fluid/framework/attribute.h @@ -23,12 +23,12 @@ limitations under the License. */ #include #include -#include "boost/variant/get.hpp" #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/type_defs.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/errors.h" #include "paddle/utils/any.h" +#include "paddle/utils/variant.h" namespace paddle { namespace framework { @@ -45,8 +45,8 @@ struct ExtractAttribute { T* operator()(Attribute& attr) const { T* attr_value = nullptr; try { - attr_value = &boost::get(attr); - } catch (boost::bad_get& bad_get) { + attr_value = &paddle::get(attr); + } catch (paddle::bad_variant_access const& bad_get) { PADDLE_THROW(platform::errors::InvalidArgument( "Cannot get attribute (%s) by type %s, its type is %s.", attr_name_, @@ -80,8 +80,8 @@ struct ExtractAttribute { } bool* attr_value = nullptr; try { - attr_value = &boost::get(attr); - } catch (boost::bad_get& bad_get) { + attr_value = &paddle::get(attr); + } catch (paddle::bad_variant_access const& bad_get) { PADDLE_THROW(platform::errors::InvalidArgument( "Cannot get attribute (%s) by type bool, its type is %s.", attr_name_, @@ -108,8 +108,8 @@ struct ExtractAttribute { } int64_t* attr_value = nullptr; try { - attr_value = &boost::get(attr); - } catch (boost::bad_get& bad_get) { + attr_value = &paddle::get(attr); + } catch (paddle::bad_variant_access const& bad_get) { PADDLE_THROW(platform::errors::InvalidArgument( "Cannot get attribute (%s) by type int64_t, its type is %s.", attr_name_, @@ -138,8 +138,8 @@ struct ExtractAttribute> { } std::vector* attr_value = nullptr; try { - attr_value = &boost::get>(attr); - } catch (boost::bad_get& bad_get) { + attr_value = &paddle::get>(attr); + } catch (paddle::bad_variant_access const& bad_get) { PADDLE_THROW(platform::errors::InvalidArgument( "Cannot get attribute (%s) by type std::vector, its type is " "%s.", @@ -167,8 +167,8 @@ struct ExtractAttribute { } float* attr_value = nullptr; try { - attr_value = &boost::get(attr); - } catch (boost::bad_get& bad_get) { + attr_value = &paddle::get(attr); + } catch (paddle::bad_variant_access const& bad_get) { PADDLE_THROW(platform::errors::InvalidArgument( "Cannot get attribute (%s) by type float, its type is %s.", attr_name_, @@ -197,8 +197,8 @@ struct ExtractAttribute> { } std::vector* attr_value = nullptr; try { - attr_value = &boost::get>(attr); - } catch (boost::bad_get& bad_get) { + attr_value = &paddle::get>(attr); + } catch (paddle::bad_variant_access const& bad_get) { PADDLE_THROW(platform::errors::InvalidArgument( "Cannot get attribute (%s) by type std::vector, its type is " "%s.", @@ -214,11 +214,11 @@ struct ExtractAttribute> { template inline proto::AttrType AttrTypeID() { Attribute tmp = T(); - return static_cast(tmp.which() - 1); + return static_cast(tmp.index() - 1); } inline proto::AttrType AttrTypeID(const Attribute& attr) { - return static_cast(attr.which() - 1); + return static_cast(attr.index() - 1); } class AttrReader { diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index 117b4b9686ec5587b8c11d99e17cc9c627fa3024..9d62fd8100b0832d30f50139e7a92534062574d6 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -272,7 +272,7 @@ void BlockDesc::MoveFrom(BlockDesc *block) { for (const auto &pair : src_op->GetAttrMap()) { const auto &attr_name = pair.first; const auto &attr_value = pair.second; - auto attr_type = static_cast(attr_value.which() - 1); + auto attr_type = static_cast(attr_value.index() - 1); if (attr_type == proto::AttrType::BLOCK) { auto block_id = BOOST_GET_CONST(BlockDesc *, attr_value)->ID(); dst_op->SetBlockAttr(attr_name, prog_->MutableBlock(block_id)); diff --git a/paddle/fluid/framework/details/async_ssa_graph_executor.cc b/paddle/fluid/framework/details/async_ssa_graph_executor.cc index 0ae69695549e529d821b28480c0eec9ab0be532a..f22e62fa0aa5bd9b1a0445e46022d4fe9c605b88 100644 --- a/paddle/fluid/framework/details/async_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/async_ssa_graph_executor.cc @@ -174,7 +174,7 @@ FetchResultType AsyncSSAGraphExecutor::Run( HandleException(); FetchList ret; - auto &val = BOOST_GET(FetchList, fetch_data); + auto &val = boost::get(fetch_data); for (size_t fetch_idx = 0; fetch_idx < fetch_tensors.size(); ++fetch_idx) { if (data_is_lod_tensor(val.at(fetch_idx))) { std::vector lodtensor_ptrs; diff --git a/paddle/fluid/framework/details/fetch_async_op_handle.cc b/paddle/fluid/framework/details/fetch_async_op_handle.cc index a9e4bf826bc4b2ef44fe0e416429ed1b1ceb33f5..8d8bb96f5c8edb29ee9ac5295df7a28c98a834a1 100644 --- a/paddle/fluid/framework/details/fetch_async_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_async_op_handle.cc @@ -228,7 +228,7 @@ void FetchAsyncOpHandle::RunImpl() { } if (return_merged_) { - auto &val = BOOST_GET(FetchList, *data_); + auto &val = boost::get(*data_); if (src_vars[0]->IsType()) { // to lodtensor type std::vector src_lodtensors; @@ -263,7 +263,7 @@ void FetchAsyncOpHandle::RunImpl() { val.at(offset_) = std::move(dst_lodtensor_array); } } else { - auto &val = BOOST_GET(FetchUnmergedList, *data_); + auto &val = boost::get(*data_); auto &dst_tensors = val.at(offset_); dst_tensors.reserve(src_vars.size()); diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index a9f7de8ee312f914057471bc4741c0bf4aefb536..f160650f0b9f4c464411cb871d287eeb16fe5ba5 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -84,7 +84,7 @@ void FetchOpHandle::WaitAndMergeCPUFetchVars() const { for (auto &t : tensors_) { tensors_ptr.emplace_back(&BOOST_GET_CONST(LoDTensor, t)); } - auto &val = BOOST_GET(FetchList, *data_); + auto &val = boost::get(*data_); LoDTensor var; MergeLoDTensor(&var, tensors_ptr, platform::CPUPlace()); val.at(offset_) = std::move(var); @@ -106,11 +106,11 @@ void FetchOpHandle::WaitAndMergeCPUFetchVars() const { tmp_array.emplace_back(); MergeLoDTensor(&(tmp_array.back()), tensors_ptr, platform::CPUPlace()); } - auto &val = BOOST_GET(FetchList, *data_); + auto &val = boost::get(*data_); val.at(offset_) = std::move(tmp_array); } } else { - auto &val = BOOST_GET(FetchUnmergedList, *data_); + auto &val = boost::get(*data_); val.at(offset_) = std::move(tensors_); } } diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc index bc870c0eaa18d932b74a9a78f128aa8c16a1bfbd..86536b74a3d7c9f04c501a72dff433080eed3a42 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc @@ -278,8 +278,7 @@ FetchResultType ParallelSSAGraphExecutor::Run( if (!is_valid[scope_idx]) { continue; } - const auto &fetch_list = - BOOST_GET_CONST(FetchList, fetch_data[scope_idx]); + const auto &fetch_list = boost::get(fetch_data[scope_idx]); if (data_is_lod_tensor(fetch_list[fetch_idx])) { lodtensor_ptrs.push_back( &(BOOST_GET_CONST(LoDTensor, fetch_list[fetch_idx]))); @@ -318,7 +317,7 @@ FetchResultType ParallelSSAGraphExecutor::Run( continue; } const auto &fetch_list = - BOOST_GET_CONST(FetchUnmergedList, fetch_data[scope_idx]); + boost::get(fetch_data[scope_idx]); PADDLE_ENFORCE_EQ( fetch_list[fetch_idx].size(), 1, diff --git a/paddle/fluid/framework/feed_fetch_type.h b/paddle/fluid/framework/feed_fetch_type.h index 12c111e58f58a013cc0752569c4456074293f70d..c86cdc998133b8d674a667c98b90fb18e2e3eff3 100644 --- a/paddle/fluid/framework/feed_fetch_type.h +++ b/paddle/fluid/framework/feed_fetch_type.h @@ -23,10 +23,10 @@ limitations under the License. */ namespace paddle { namespace framework { -using FeedType = boost::variant; +using FeedType = paddle::variant; using FeedList = std::vector; -using FetchType = boost::variant; +using FetchType = paddle::variant; using FetchList = std::vector; using FetchUnmergedList = std::vector>; diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc index e87279fcc7d2a9eae26fc35c7e4b91d4857ca6b1..31caff8ce4265c145e910db9f47b934d1625c68c 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc @@ -121,7 +121,7 @@ void ConvElementwiseAddFusePass::ApplyImpl(ir::Graph* graph) const { elementwise_add_op_desc->GetNullableAttr("out_threshold"); // set the out_threshold of the elementwise add op to be the out_threshold // of the conv2d_fusion - if (out_threshold_attr.which()) { + if (out_threshold_attr.index()) { new_op_desc.SetAttr("out_threshold", out_threshold_attr); } new_op_desc.Flush(); diff --git a/paddle/fluid/framework/ir/fc_fuse_pass.cc b/paddle/fluid/framework/ir/fc_fuse_pass.cc index cb88a19713ede52748841da6d1efa9fe5198316e..f7a8ea407c02ff790268aaafcbc4ebf0a73ea15c 100644 --- a/paddle/fluid/framework/ir/fc_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_fuse_pass.cc @@ -261,7 +261,7 @@ int FCFusePass::ApplyFCPattern(Graph* graph, bool with_relu) const { // out_thrshold of fc auto out_threshold_attr = elementwise_add_op_desc->GetNullableAttr("out_threshold"); - if (out_threshold_attr.which()) { + if (out_threshold_attr.index()) { VLOG(4) << "setting out_threshold: " << BOOST_GET_CONST(float, out_threshold_attr); desc.SetAttr("out_threshold", out_threshold_attr); diff --git a/paddle/fluid/framework/ir/fusion_group/code_generator_helper.cc b/paddle/fluid/framework/ir/fusion_group/code_generator_helper.cc index 45fda31fe7458d78feddac22d6c33cc101062cd2..7d206236a42c7095632609f85f4051516296fe01 100644 --- a/paddle/fluid/framework/ir/fusion_group/code_generator_helper.cc +++ b/paddle/fluid/framework/ir/fusion_group/code_generator_helper.cc @@ -78,7 +78,7 @@ static std::string RefineTemplateWithAttr(const std::string& op_type, } Attribute attr = it->second; proto::AttrType attr_type = - static_cast(it->second.which() - 1); + static_cast(it->second.index() - 1); if (attr_type == proto::AttrType::BOOLEAN) { bool result = BOOST_GET(bool, attr); if (result) { diff --git a/paddle/fluid/framework/ir/generate_pass.cc b/paddle/fluid/framework/ir/generate_pass.cc index 089393144fb56232dccce1d38704a6e8825564a3..83c3ab9933d61169d73c5a3c50628fdd91d3c6af 100644 --- a/paddle/fluid/framework/ir/generate_pass.cc +++ b/paddle/fluid/framework/ir/generate_pass.cc @@ -14,6 +14,7 @@ #include "paddle/fluid/framework/ir/generate_pass.h" +#include "boost/blank.hpp" #include "paddle/fluid/framework/ir/graph_pattern_detector.h" namespace paddle { @@ -105,7 +106,7 @@ Attribute GetOpAttrValue(const OpDesc* desc, const proto::PassDesc::Attr& attr) { Attribute value = desc->GetAttr(attr.name()); if (attr.has_element_index()) { - value = boost::apply_visitor(element_visitor(attr.element_index()), value); + value = paddle::visit(element_visitor(attr.element_index()), value); } return value; } @@ -203,7 +204,7 @@ void InitGeneratePattern(const proto::PassDesc& pass_desc, PDPattern* pattern) { Attribute attr = GetVarAttrValue(x->Var(), condition.attr()); if (condition.has_operation()) { Attribute operation = GetAttrValue(condition.operation().value()); - attr = boost::apply_visitor( + attr = paddle::visit( operation_visitor(condition.operation().type()), attr, operation); } switch (condition.type()) { @@ -388,7 +389,7 @@ GraphPatternDetector::handle_t GetGenerateRewrite( if (attr_map.has_operation()) { Attribute operation = GetAttrValue(attr_map.operation().value()); - attr = boost::apply_visitor( + attr = paddle::visit( operation_visitor(attr_map.operation().type()), attr, operation); diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc index bf0d1d13bd56a568b9acb38414e8fc7cdddaaf06..89ac249c20d93628c7dcd4c51568af3a12515357 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc @@ -320,7 +320,7 @@ void CPUQuantizeSquashPass::RequantOpSquash(Graph* graph) const { "should have requantize output as input.", requant_out->Name())); float requant_scale_in = - boost::get(requant_op->Op()->GetAttr("Scale_in")); + paddle::get(requant_op->Op()->GetAttr("Scale_in")); auto scale_name = "Scale_in"; if (any_op->Op()->Type() == "matmul") diff --git a/paddle/fluid/framework/ir/mkldnn/reshape_transpose_matmul_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/reshape_transpose_matmul_mkldnn_fuse_pass.cc index df770c618bb4d4fd32510b61ed3a8716381af98a..20bfe5726f6593f725cfd123a55641386504e88e 100644 --- a/paddle/fluid/framework/ir/mkldnn/reshape_transpose_matmul_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/reshape_transpose_matmul_mkldnn_fuse_pass.cc @@ -118,9 +118,9 @@ void ReshapeTransposeMatmulMkldnnFusePass::Fuse( GET_IR_NODE_FROM_SUBGRAPH(matmul_out, matmul_out, rtm_pattern); auto reshape_shape = - boost::get>(reshape_op->Op()->GetAttr("shape")); + paddle::get>(reshape_op->Op()->GetAttr("shape")); auto transpose_axis = - boost::get>(transpose_op->Op()->GetAttr("axis")); + paddle::get>(transpose_op->Op()->GetAttr("axis")); OpDesc *matmul_desc = matmul_op->Op(); std::string input_var_name = transpose_out->Name(); diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 412278739c9e0218bded28cff033b7bcce3b5d7c..c8a9950ae5efb6f39a2ac99d041c0d6e2a35c620 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include +#include "boost/blank.hpp" #include "glog/logging.h" #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/op_call_stack.h" @@ -563,7 +564,7 @@ proto::AttrType OpDesc::GetAttrType(const std::string &name) const { it, attrs_.end(), platform::errors::NotFound("Attribute %s is not found.", name)); - return static_cast(it->second.which() - 1); + return static_cast(it->second.index() - 1); } std::vector OpDesc::AttrNames() const { @@ -584,7 +585,7 @@ void OpDesc::SetAttr(const std::string &name, const Attribute &v) { // NOTICE(minqiyang): pybind11 will take the empty list in python as // the std::vector type in C++; so we have to change the attr's type // here if we meet this issue - proto::AttrType attr_type = static_cast(v.which() - 1); + proto::AttrType attr_type = static_cast(v.index() - 1); if (attr_type == proto::AttrType::INTS && BOOST_GET_CONST(std::vector, v).size() == 0u) { // Find current attr via attr name and set the correct attribute value @@ -837,9 +838,9 @@ void OpDesc::Flush() { auto *attr_desc = desc_.add_attrs(); attr_desc->set_name(attr.first); attr_desc->set_type( - static_cast(attr.second.which() - 1)); + static_cast(attr.second.index() - 1)); SetAttrDescVisitor visitor(attr_desc); - boost::apply_visitor(visitor, attr.second); + paddle::visit(visitor, attr.second); } need_update_ = false; diff --git a/paddle/fluid/framework/op_version_registry.h b/paddle/fluid/framework/op_version_registry.h index 7665f597f2a855e356279a5b5b2122c535e0f23a..c88b947edc68656919088eafd6fb37f6f26f55f6 100644 --- a/paddle/fluid/framework/op_version_registry.h +++ b/paddle/fluid/framework/op_version_registry.h @@ -33,18 +33,18 @@ class OpVersionMap; } // namespace pb using OpAttrVariantT = - boost::variant, /* AttrType::BOOLS */ - std::vector, /* AttrType::FLOATS */ - std::vector, /* AttrType::INTS */ - std::vector, /* AttrType::LONGS */ - std::vector, /* AttrType::STRINGS */ - paddle::none_t /* None */ - >; + paddle::variant, /* AttrType::BOOLS */ + std::vector, /* AttrType::FLOATS */ + std::vector, /* AttrType::INTS */ + std::vector, /* AttrType::LONGS */ + std::vector, /* AttrType::STRINGS */ + paddle::none_t /* None */ + >; struct OpUpdateInfo { virtual ~OpUpdateInfo() = default; diff --git a/paddle/fluid/framework/paddle2cinn/transform_desc.cc b/paddle/fluid/framework/paddle2cinn/transform_desc.cc index 52b1395c732ace86c682b1502ed404649f132088..af2d1c06de53f5da45c494efdda54107b609d1f8 100644 --- a/paddle/fluid/framework/paddle2cinn/transform_desc.cc +++ b/paddle/fluid/framework/paddle2cinn/transform_desc.cc @@ -190,7 +190,7 @@ void OpAttrsToCinn(framework::OpDesc *pb_desc, cpp::OpDesc *cpp_desc) { IMPL_ONE(LONG, int64_t); IMPL_ONE(LONGS, std::vector); case AttrType::BLOCK: { - auto i = pb_desc->GetAttrIfExists(name); + auto i = pb_desc->GetAttrIfExists(name); cpp_desc->SetAttr(name, i); break; } diff --git a/paddle/fluid/framework/shape_inference.h b/paddle/fluid/framework/shape_inference.h index feec5c008b014c16e5c7f51b522495025e5dd427..72cfa4da2f245639a630b04db1c8643480d29181 100644 --- a/paddle/fluid/framework/shape_inference.h +++ b/paddle/fluid/framework/shape_inference.h @@ -56,7 +56,7 @@ namespace framework { class OperatorBase; -using InferShapeVarPtr = boost::variant; +using InferShapeVarPtr = paddle::variant; class InferShapeContext { public: diff --git a/paddle/fluid/framework/tuple.h b/paddle/fluid/framework/tuple.h index 07e30bd2729c59f8e75f35c05c2037ecbb559fb2..6c283f4d32e572e3e811f866219460a7dfb2f83c 100644 --- a/paddle/fluid/framework/tuple.h +++ b/paddle/fluid/framework/tuple.h @@ -27,13 +27,13 @@ limitations under the License. */ namespace paddle { namespace framework { -typedef boost::variant +typedef paddle::variant ElementVar; class Tuple { @@ -64,8 +64,8 @@ bool Tuple::isSameType(const Tuple& t) const { return false; } for (size_t j = 0; j < tuple_size; ++j) { - auto type1 = get(j).which(); - auto type2 = t.get(j).which(); + auto type1 = get(j).index(); + auto type2 = t.get(j).index(); if (type1 != type2) return false; } return true; diff --git a/paddle/fluid/framework/type_defs.h b/paddle/fluid/framework/type_defs.h index 68af540b63cd4b12bb147bf66be62c74aeed9031..ca9d6ec44a8d9e1a342ad0716f05d36657af0efa 100644 --- a/paddle/fluid/framework/type_defs.h +++ b/paddle/fluid/framework/type_defs.h @@ -22,9 +22,11 @@ limitations under the License. */ #include #include +#include "boost/blank.hpp" #include "paddle/fluid/imperative/type_defs.h" #include "paddle/fluid/platform/variant.h" #include "paddle/utils/small_vector.h" +#include "paddle/utils/variant.h" namespace paddle { namespace framework { @@ -40,38 +42,38 @@ class InferNoNeedBufferVarsFN; using VariableNameMap = std::map>; using VariableValueMap = std::map>; -using Attribute = boost::variant, - std::vector, - std::vector, - bool, - std::vector, - BlockDesc*, - int64_t, - std::vector, - std::vector, - std::vector>; +using Attribute = paddle::variant, + std::vector, + std::vector, + bool, + std::vector, + BlockDesc*, + int64_t, + std::vector, + std::vector, + std::vector>; using AttributeMap = std::unordered_map; #ifdef PADDLE_WITH_ASCEND_CL -using NPUAttribute = boost::variant, - std::vector, - std::vector, - bool, - std::vector, - BlockDesc*, - int64_t, - std::vector, - std::vector, - std::vector, - std::vector>>; +using NPUAttribute = paddle::variant, + std::vector, + std::vector, + bool, + std::vector, + BlockDesc*, + int64_t, + std::vector, + std::vector, + std::vector, + std::vector>>; using NPUAttributeMap = std::unordered_map; #endif diff --git a/paddle/fluid/framework/var_desc.cc b/paddle/fluid/framework/var_desc.cc index a396f7ebe883bac344b575b48141ef0997831042..55e2ae0969373a20a2e6c09cca1cb2eafa7201c8 100644 --- a/paddle/fluid/framework/var_desc.cc +++ b/paddle/fluid/framework/var_desc.cc @@ -315,7 +315,7 @@ void VarDesc::SetAttr(const std::string &name, const Attribute &v) { // NOTICE(sandyhouse): pybind11 will take the empty list in python as // the std::vector type in C++; so we have to change the attr's type // here if we meet this issue - proto::AttrType attr_type = static_cast(v.which() - 1); + proto::AttrType attr_type = static_cast(v.index() - 1); if (attr_type == proto::AttrType::INTS && BOOST_GET_CONST(std::vector, v).size() == 0u) { // Find current attr via attr name and set the correct attribute value diff --git a/paddle/fluid/operators/controlflow/feed_op.cc b/paddle/fluid/operators/controlflow/feed_op.cc index aef4a32248ac0fe9e3f5280fad0057a5f991eefa..00806d18c066fee1106cec10b8426c0b8b8eb28b 100644 --- a/paddle/fluid/operators/controlflow/feed_op.cc +++ b/paddle/fluid/operators/controlflow/feed_op.cc @@ -122,7 +122,7 @@ class FeedOp : public framework::OperatorBase { auto &feed_item = feed_list.at(static_cast(col)); FeedVariableVisitor visitor(out_var, place); - boost::apply_visitor(visitor, feed_item); + paddle::visit(visitor, feed_item); } }; diff --git a/paddle/fluid/operators/controlflow/op_variant.cc b/paddle/fluid/operators/controlflow/op_variant.cc index cebed022e550b8d41bb5674ddd3418efe8d8ba1c..60f58955adbedaa0a01fb7e698e173bd48bf2068 100644 --- a/paddle/fluid/operators/controlflow/op_variant.cc +++ b/paddle/fluid/operators/controlflow/op_variant.cc @@ -53,19 +53,19 @@ struct RawPointerVisitor : public boost::static_visitor { }; const framework::VariableNameMap &OpVariant::Inputs() const { - return *boost::apply_visitor(InputsVisitor(), op_); + return *paddle::visit(InputsVisitor(), op_); } const framework::VariableNameMap &OpVariant::Outputs() const { - return *boost::apply_visitor(OutputsVisitor(), op_); + return *paddle::visit(OutputsVisitor(), op_); } const framework::AttributeMap &OpVariant::Attrs() const { - return *boost::apply_visitor(AttributeMapVisitor(), op_); + return *paddle::visit(AttributeMapVisitor(), op_); } const void *OpVariant::RawPointer() const { - return boost::apply_visitor(RawPointerVisitor(), op_); + return paddle::visit(RawPointerVisitor(), op_); } void AppendOpVariantByOpName(const std::vector &op_descs, diff --git a/paddle/fluid/operators/controlflow/op_variant.h b/paddle/fluid/operators/controlflow/op_variant.h index 2c1bbc219ba00b4a5a0fb652e4c61353ddd7c0f0..c75294ce9ab7a32d861eac8a3f38f0abb1bacf82 100644 --- a/paddle/fluid/operators/controlflow/op_variant.h +++ b/paddle/fluid/operators/controlflow/op_variant.h @@ -61,7 +61,7 @@ class OpVariant { return RawPointer() == other.RawPointer(); } - int which() const { return static_cast(op_.which()); } + int index() const { return static_cast(op_.index()); } struct Hasher { size_t operator()(const OpVariant &op) const { @@ -70,8 +70,8 @@ class OpVariant { }; private: - const boost::variant + const paddle::variant op_; }; diff --git a/paddle/fluid/operators/detection/box_decoder_and_assign_op.cc b/paddle/fluid/operators/detection/box_decoder_and_assign_op.cc index cdfe070a3d249d88cab72e55af881b9b4c2882e5..05a44dda32a54e573b1018edd1cc47ada5cd9fbe 100644 --- a/paddle/fluid/operators/detection/box_decoder_and_assign_op.cc +++ b/paddle/fluid/operators/detection/box_decoder_and_assign_op.cc @@ -226,6 +226,7 @@ REGISTER_OPERATOR( ops::BoxDecoderAndAssignOpMaker, paddle::framework::EmptyGradOpMaker, paddle::framework::EmptyGradOpMaker); + REGISTER_OP_CPU_KERNEL( box_decoder_and_assign, ops::BoxDecoderAndAssignKernel, diff --git a/paddle/fluid/operators/detection/box_decoder_and_assign_op.cu b/paddle/fluid/operators/detection/box_decoder_and_assign_op.cu index d8265879513c6a690993aa8ff35edf7160eaba3e..7f66cb86b569345ad71624b4a6f4c0e4d301b63c 100644 --- a/paddle/fluid/operators/detection/box_decoder_and_assign_op.cu +++ b/paddle/fluid/operators/detection/box_decoder_and_assign_op.cu @@ -122,7 +122,7 @@ class BoxDecoderAndAssignCUDAKernel : public framework::OpKernel { int grid = (roi_num * class_num + block - 1) / block; auto& device_ctx = context.cuda_device_context(); - const T box_clip = context.Attr("box_clip"); + const T box_clip = static_cast(context.Attr("box_clip")); DecodeBoxKernel <<>>(prior_box_data, diff --git a/paddle/fluid/operators/detection/box_decoder_and_assign_op.h b/paddle/fluid/operators/detection/box_decoder_and_assign_op.h index 7eed920fb3d55a3eb78fabeddce4e40bd2b807f8..85ee3b76448ad760b5d5ef49d4dfbd8465d178b0 100644 --- a/paddle/fluid/operators/detection/box_decoder_and_assign_op.h +++ b/paddle/fluid/operators/detection/box_decoder_and_assign_op.h @@ -41,7 +41,7 @@ class BoxDecoderAndAssignKernel : public framework::OpKernel { output_assign_box->mutable_data({roi_num, 4}, context.GetPlace()); T* output_box_data = output_box->data(); T* output_assign_box_data = output_assign_box->data(); - const T bbox_clip = context.Attr("box_clip"); + const T bbox_clip = static_cast(context.Attr("box_clip")); for (int i = 0; i < roi_num; ++i) { T prior_box_width = prior_box_data[i * 4 + 2] - prior_box_data[i * 4] + 1; diff --git a/paddle/fluid/operators/math/matrix_bit_code.cc b/paddle/fluid/operators/math/matrix_bit_code.cc index eb7bd57017f55113d414ef4d8c354946ec0c2adc..133680ca9a8c79f6e3ac00823f8d6906db1d1b9e 100644 --- a/paddle/fluid/operators/math/matrix_bit_code.cc +++ b/paddle/fluid/operators/math/matrix_bit_code.cc @@ -47,7 +47,7 @@ template void MatrixBitCodeFunctor::Add(const framework::Tensor &vec, framework::Tensor *tmat) { MatrixBitCodeFunctorAdd func(vec, tmat); - code_table_.apply_visitor(func); + paddle::visit(func, code_table_); } template @@ -79,7 +79,7 @@ template void MatrixBitCodeFunctor::AddGrad(const framework::Tensor &tmat, framework::Tensor *vec) { MatrixBitCodeFunctorAddGrad func(tmat, vec); - code_table_.apply_visitor(func); + paddle::visit(func, code_table_); } template @@ -121,7 +121,7 @@ void MatrixBitCodeFunctor::Sum(const framework::Tensor &tmat, framework::Tensor *sum, T scale_sum) { MatrixBitCodeFunctorSum func(tmat, sum, scale_sum); - code_table_.apply_visitor(func); + paddle::visit(func, code_table_); } template @@ -165,7 +165,7 @@ void MatrixBitCodeFunctor::Mul(framework::Tensor *tmat, const framework::Tensor &weight, const framework::Tensor &input) { MatrixBitCodeFunctorMul func(tmat, weight, input); - code_table_.apply_visitor(func); + paddle::visit(func, code_table_); } template @@ -222,7 +222,7 @@ void MatrixBitCodeFunctor::MulGradWeight(const framework::Tensor &tmat, framework::Tensor *weight, const framework::Tensor &input) { MatrixBitCodeFunctorMulGradWeight func(tmat, weight, input); - code_table_.apply_visitor(func); + paddle::visit(func, code_table_); } template @@ -279,7 +279,7 @@ void MatrixBitCodeFunctor::MulGradWeight(const framework::Tensor &tmat, phi::SelectedRows *weight, const framework::Tensor &input) { MatrixBitCodeFunctorMulGradWeightSR func(tmat, weight, input); - code_table_.apply_visitor(func); + paddle::visit(func, code_table_); } template @@ -323,7 +323,7 @@ void MatrixBitCodeFunctor::MulGradError(const framework::Tensor &tmat, const framework::Tensor &weight, framework::Tensor *input) { MatrixBitCodeFunctorMulGradError func(tmat, weight, input); - code_table_.apply_visitor(func); + paddle::visit(func, code_table_); } template @@ -352,7 +352,7 @@ struct MatrixBitCodeFunctorSub : public boost::static_visitor { template void MatrixBitCodeFunctor::Sub(framework::Tensor *tmat) { MatrixBitCodeFunctorSub func(tmat); - code_table_.apply_visitor(func); + paddle::visit(func, code_table_); } template class MatrixBitCodeFunctor; diff --git a/paddle/fluid/operators/math/matrix_bit_code.h b/paddle/fluid/operators/math/matrix_bit_code.h index bda60787746ebf134b9e78580596b4bec156a4fe..780003c1b451e29c31154548641450ab9fdb9bae 100644 --- a/paddle/fluid/operators/math/matrix_bit_code.h +++ b/paddle/fluid/operators/math/matrix_bit_code.h @@ -208,7 +208,7 @@ class CustomCodeTable { const int64_t* ids_; }; -using CodeTable = boost::variant>; +using CodeTable = paddle::variant>; template class MatrixBitCodeFunctor { diff --git a/paddle/fluid/operators/slice_op.cc b/paddle/fluid/operators/slice_op.cc index 669f79bba400a4c8ad618b2d5e7b90c981b1ba97..4b6bcae7635b89185acfdd23a16aeab61156d840 100644 --- a/paddle/fluid/operators/slice_op.cc +++ b/paddle/fluid/operators/slice_op.cc @@ -201,7 +201,8 @@ class SliceOpVarTypeInference : public framework::VarTypeInference { auto x_name = "Input"; auto out_name = "Out"; auto decrease_axis = ctx->GetAttr("decrease_axis"); - auto not_decrease = boost::get>(decrease_axis).size() == 0; + auto not_decrease = + paddle::get>(decrease_axis).size() == 0; if (not_decrease) { // The default type of out is LoDTensor. // However, if no axis is decreased and the type of input is not diff --git a/paddle/fluid/platform/collective_helper.h b/paddle/fluid/platform/collective_helper.h index 82537e9bae8a6eb84ce0c0d860395ecbee1d5e4a..22a95d11867916a97352ce1f617257127e4273ba 100644 --- a/paddle/fluid/platform/collective_helper.h +++ b/paddle/fluid/platform/collective_helper.h @@ -19,11 +19,11 @@ #include #include -#include "boost/variant.hpp" #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/platform/device/npu/dynload/hccl.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" +#include "paddle/utils/variant.h" #if defined(PADDLE_WITH_CNCL) #include "paddle/fluid/platform/device/mlu/device_context.h" #endif diff --git a/paddle/fluid/platform/device/ipu/ipu_compiler.cc b/paddle/fluid/platform/device/ipu/ipu_compiler.cc index b44e812469f8d6c707b4cf20aba71cad970b174d..ca44fbd0a5cb17a7f32f4f5b07e2e5b4dde674c3 100644 --- a/paddle/fluid/platform/device/ipu/ipu_compiler.cc +++ b/paddle/fluid/platform/device/ipu/ipu_compiler.cc @@ -19,6 +19,8 @@ #include #include +#include "boost/blank.hpp" + #include "paddle/fluid/framework/ir/graph_helper.h" #include "paddle/fluid/platform/device/ipu/ipu_names.h" #include "paddle/fluid/platform/device/ipu/ipu_strategy.h" @@ -390,7 +392,7 @@ void Compiler::LowerConstants(const Scope* scope) { auto* tensor = var->GetMutable(); ConstantOpAttrVisitor visitor(tensor, dtype); auto value = op_desc->GetAttr("value"); - boost::apply_visitor(visitor, value); + paddle::visit(visitor, value); auto ddim = phi::make_ddim(shape); tensor->Resize(ddim); @@ -475,7 +477,7 @@ void Compiler::LowerBody() { auto attributes = std::map{}; for (auto& attr : op_desc->GetAttrMap()) { CustomOpAttrVisitor visitor(&attributes, attr.first); - boost::apply_visitor(visitor, attr.second); + paddle::visit(visitor, attr.second); } auto __op_type = BOOST_GET_CONST(std::string, op_desc->GetAttr("__op_type")); diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index 6cf05165431dcaf4fbae5ab394d122800a4949ef..4f26ce0b27dbf4051cf18e51637af9474001ca36 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -122,20 +122,21 @@ using namespace ::phi::enforce; // NOLINT #endif /* - * Summary: This BOOST_GET(_**) series macros are used to call boost::get - * safely. boost::get is not a completely safe api, although it will not + * Summary: This BOOST_GET(_**) series macros are used to call paddle::get + * safely. paddle::get is not a completely safe api, although it will not * go wrong in most cases, but in extreme cases, it may fail and directly - * throw a boost::bad_get exception, without any stack information. + * throw a paddle::bad_variant_access const exception, without any stack + *information. * This kind of problems is difficult to debug, so add these macros to - * enrich boost::get error information. At the same time, we restrict - * the direct use of boost::get by CI rule. + * enrich paddle::get error information. At the same time, we restrict + * the direct use of paddle::get by CI rule. * * Parameters: *     __TYPE: the target variable type * __VALUE: the target variable to get * * Examples: - * - unsafe writing: int x = boost::get(y); + * - unsafe writing: int x = paddle::get(y); * - safe writing: int x = BOOST_GET(int, y); * * Note: GCC 4.8 cannot select right overloaded function here, so need @@ -155,12 +156,12 @@ using namespace phi::enforce::details; // NOLINT __OutputTypePtr, \ __OutputType>::type { \ try { \ - return boost::get(input); \ - } catch (boost::bad_get&) { \ + return paddle::get(input); \ + } catch (paddle::bad_variant_access const&) { \ HANDLE_THE_ERROR \ throw ::phi::enforce::EnforceNotMet( \ phi::errors::InvalidArgument( \ - "boost::get failed, cannot get value " \ + "paddle::get failed, cannot get value " \ "(%s) by type %s, its type is %s.", \ expression, \ phi::enforce::demangle(typeid(OutputType).name()), \ diff --git a/paddle/fluid/platform/flags.h b/paddle/fluid/platform/flags.h index 5b2629525c07f6da1a36fa5140bece3242922c8c..03986816c53f9f1932eb9393311953ff63791d7f 100644 --- a/paddle/fluid/platform/flags.h +++ b/paddle/fluid/platform/flags.h @@ -22,13 +22,14 @@ #include "gflags/gflags.h" #include "paddle/fluid/platform/macros.h" #include "paddle/fluid/platform/variant.h" +#include "paddle/utils/variant.h" namespace paddle { namespace platform { struct FlagInfo { using ValueType = - boost::variant; + paddle::variant; std::string name; mutable void *value_ptr; ValueType default_value; diff --git a/paddle/fluid/pybind/global_value_getter_setter.cc b/paddle/fluid/pybind/global_value_getter_setter.cc index 64489c354cb446f5a08d73dc02b81bfa19eb3e9b..2871d1de56780f70777f43fa730cbdc34843ec77 100644 --- a/paddle/fluid/pybind/global_value_getter_setter.cc +++ b/paddle/fluid/pybind/global_value_getter_setter.cc @@ -259,7 +259,7 @@ static void RegisterGlobalVarGetterSetter() { const auto &default_value = pair.second.default_value; RegisterGetterSetterVisitor visitor( "FLAGS_" + name, is_writable, value_ptr); - boost::apply_visitor(visitor, default_value); + paddle::visit(visitor, default_value); } } diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index fef27358498e0922cf22ecb87ff51326518d1013..e02d5cee3dc512c76c0d57dcf2e70e4a75836aa1 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -3338,7 +3338,7 @@ All parameter, weight, gradient are variables in Paddle. py::return_value_policy::take_ownership); py::class_(m, "FetchList", R"DOC( FetchList is a - vector of boost::variant. + vector of paddle::variant. )DOC") .def( "_move_to_list", @@ -3385,7 +3385,7 @@ All parameter, weight, gradient are variables in Paddle. py::arg("var")); py::class_(m, "FetchUnmergedList", R"DOC( - FetchUnmergedList is 2-D array of FetchType(boost::variant(LoDTensor, LoDTensorArray)). + FetchUnmergedList is 2-D array of FetchType(paddle::variant(LoDTensor, LoDTensorArray)). )DOC") .def( "_move_to_list", @@ -4606,12 +4606,15 @@ All parameter, weight, gradient are variables in Paddle. pybind11::gil_scoped_release release; ret = self.Run(fetch_tensors, return_merged); } + + // TODO(Ruibiao): Refactor the run interface of PE to avoid use + // boost::get here if (return_merged) { return py::cast( - std::move(BOOST_GET(paddle::framework::FetchList, ret))); + std::move(boost::get(ret))); } else { return py::cast(std::move( - BOOST_GET(paddle::framework::FetchUnmergedList, ret))); + boost::get(ret))); } }) .def("device_count", &ParallelExecutor::DeviceCount); diff --git a/paddle/fluid/pybind/pybind_boost_headers.h b/paddle/fluid/pybind/pybind_boost_headers.h index 19c1fcf29534b3ec3d25945644838087f3a76a67..623ec84acda6f67409b50c565fa0abea1fac8d17 100644 --- a/paddle/fluid/pybind/pybind_boost_headers.h +++ b/paddle/fluid/pybind/pybind_boost_headers.h @@ -19,11 +19,13 @@ limitations under the License. */ #include "glog/logging.h" #include "paddle/fluid/platform/variant.h" +#include "paddle/utils/variant.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" -// Cast boost::variant for PyBind. +// Cast paddle::variant for PyBind. // Copy from + // https://github.com/pybind/pybind11/issues/576#issuecomment-269563199 namespace pybind11 { namespace detail { @@ -37,8 +39,7 @@ namespace detail { #endif // Can be replaced by a generic lambda in C++14 -struct PYBIND11_HIDDEN paddle_variant_caster_visitor - : public boost::static_visitor { +struct PYBIND11_HIDDEN paddle_variant_caster_visitor { return_value_policy policy; handle parent; @@ -127,8 +128,13 @@ struct paddle_variant_caster> { static handle cast(Type const& src, return_value_policy policy, handle parent) { + /* + auto paddle_variant_caster_visitor = [&](Type const& src)->handle { + return make_caster::cast(src, policy, parent); + } + */ paddle_variant_caster_visitor visitor(policy, parent); - return boost::apply_visitor(visitor, src); + return paddle::visit(visitor, src); } PYBIND11_TYPE_CASTER(Type, _("Variant")); @@ -137,8 +143,8 @@ struct paddle_variant_caster> { // Add specialization for concrete variant type template -struct type_caster> - : paddle_variant_caster> {}; +struct type_caster> + : paddle_variant_caster> {}; } // namespace detail } // namespace pybind11 diff --git a/paddle/infrt/dialect/phi/pass/proto_arg_map_context.h b/paddle/infrt/dialect/phi/pass/proto_arg_map_context.h index 30bde83cd8199c9d1de404e4b5c77abd680deb3d..a1c730fd84918c8855d6e72346e446777132e97e 100644 --- a/paddle/infrt/dialect/phi/pass/proto_arg_map_context.h +++ b/paddle/infrt/dialect/phi/pass/proto_arg_map_context.h @@ -36,7 +36,7 @@ class ProtoArgumentMappingContext : public ::phi::ArgumentMappingContext { bool HasAttr(const std::string& name) const override; // now we can't use Attribute here, it will cause phi relay on - // boost::variant and BlockDesc + // paddle::variant and BlockDesc paddle::any Attr(const std::string& name) const override; size_t InputSize(const std::string& name) const override; diff --git a/paddle/phi/core/compat/arg_map_context.h b/paddle/phi/core/compat/arg_map_context.h index f47e8d550e693b0c731028b57ce9791b17d7da28..6cfd18369c973f824906ce4fcc33730b8d870b24 100644 --- a/paddle/phi/core/compat/arg_map_context.h +++ b/paddle/phi/core/compat/arg_map_context.h @@ -99,7 +99,7 @@ class ArgumentMappingContext { virtual bool HasAttr(const std::string& name) const = 0; // now we can't use Attribute here, it will cause phi relay on - // boost::variant and BlockDesc + // paddle::variant and BlockDesc virtual paddle::any Attr(const std::string& name) const = 0; virtual size_t InputSize(const std::string& name) const = 0; diff --git a/paddle/phi/core/dense_tensor_impl.cc b/paddle/phi/core/dense_tensor_impl.cc index a59b910b7e0069e79bf710c367c322927805b2cd..bc05ade76ea5d3dc2cde943306ff5b8b8e912713 100644 --- a/paddle/phi/core/dense_tensor_impl.cc +++ b/paddle/phi/core/dense_tensor_impl.cc @@ -113,7 +113,7 @@ void* DenseTensor::mutable_data(const Place& place, size = requested_size; } - /* some versions of boost::variant don't have operator!= */ + /* some versions of paddle::variant don't have operator!= */ if (holder_ == nullptr || !(holder_->place() == place) || holder_->size() < size + meta_.offset) { holder_.reset(); @@ -142,7 +142,7 @@ void* DenseTensor::mutable_data(const Place& place, "] now")); size_t size = numel() * SizeOf(dtype()); - /* some versions of boost::variant don't have operator!= */ + /* some versions of paddle::variant don't have operator!= */ if (holder_ == nullptr || !(holder_->place() == place) || holder_->size() < size + meta_.offset || !(place.GetType() == phi::AllocationType::GPU && diff --git a/paddle/phi/core/enforce.cc b/paddle/phi/core/enforce.cc index 91e0316ff75589ef748c619d9b683fc2dfb56bfc..0dd415d13130ea0071a55fb190b31ed643af94d3 100644 --- a/paddle/phi/core/enforce.cc +++ b/paddle/phi/core/enforce.cc @@ -20,31 +20,29 @@ limitations under the License. */ #include #include -// is not suitable to be placed in the header file, -// it will introduce a large number of unnecessary includes, and these type -// declarations that depend on boost are also not suitable for the phi header -// file. Do some repeated forward declarations here to avoid -// spreading to a large number of phi kernel files +#include "boost/blank.hpp" +#include "paddle/utils/variant.h" + namespace egr { class EagerVariable; } namespace paddle { namespace framework { class BlockDesc; -using Attribute = boost::variant, - std::vector, - std::vector, - bool, - std::vector, - BlockDesc*, - int64_t, - std::vector, - std::vector, - std::vector>; +using Attribute = paddle::variant, + std::vector, + std::vector, + bool, + std::vector, + BlockDesc*, + int64_t, + std::vector, + std::vector, + std::vector>; using AttributeMap = std::unordered_map; } // namespace framework namespace imperative { diff --git a/paddle/phi/core/string_tensor.cc b/paddle/phi/core/string_tensor.cc index f1f35364344475f884c1648cecb18f3830d98667..89272e1de5969d96d8d6375f76dc383ef38661a4 100644 --- a/paddle/phi/core/string_tensor.cc +++ b/paddle/phi/core/string_tensor.cc @@ -179,7 +179,7 @@ dtype::pstring* StringTensor::mutable_data(const phi::Place& place, size = requested_size; } - /* some versions of boost::variant don't have operator!= */ + /* some versions of paddle::variant don't have operator!= */ if (holder_ == nullptr || !(holder_->place() == place) || holder_->size() < size + meta_.offset) { holder_.reset(); diff --git a/paddle/utils/variant.h b/paddle/utils/variant.h index a6822920dd7050c76a6319427b6b5c2b0b8a8089..045ffbb6a3fa36a0d2220e092403f3bf866f9b1e 100644 --- a/paddle/utils/variant.h +++ b/paddle/utils/variant.h @@ -2,6 +2,11 @@ // https://github.com/mpark/variant/blob/single-header/v1.4.0/variant.hpp // Modify the following points: // 1. modify namespace mpark to namespace paddle +// 2. add type() member function for variant class +// 3. remove the visitation implementation under the branhch with +// MPARK_CPP14_CONSTEXPR defined since lib::cpp14::array could not be converted +// to std::initializer_list in Paddle's compilation +// 4. decorate PYBIND11_HIDDEN for struct value_visitor // MPark.Variant // @@ -22,6 +27,14 @@ #pragma GCC diagnostic ignored "-Wdeprecated-copy" #endif +#if !defined(PYBIND11_HIDDEN) +#ifdef _WIN32 +#define PYBIND11_HIDDEN __declspec(dllexport) +#else +#define PYBIND11_HIDDEN __attribute__((visibility("hidden"))) +#endif +#endif + /* variant synopsis @@ -1649,7 +1662,7 @@ struct variant { }; template - struct value_visitor { + struct PYBIND11_HIDDEN value_visitor { Visitor &&visitor_; template @@ -2454,7 +2467,7 @@ class variant { impl_.swap(that.impl_); } - inline const std::type_info &type() noexcept { return impl_.type(); } + inline const std::type_info &type() const noexcept { return impl_.type(); } private: detail::impl impl_; @@ -2708,30 +2721,6 @@ inline constexpr bool operator!=(monostate, monostate) noexcept { return false; } -#ifdef MPARK_CPP14_CONSTEXPR -namespace detail { - -inline constexpr bool all(std::initializer_list bs) { - for (bool b : bs) { - if (!b) { - return false; - } - } - return true; -} - -} // namespace detail - -template -inline constexpr decltype(auto) visit(Visitor &&visitor, Vs &&...vs) { - return (detail::all( - lib::array{!vs.valueless_by_exception()...}) - ? (void)0 - : throw_bad_variant_access()), - detail::visitation::variant::visit_value( - lib::forward(visitor), lib::forward(vs)...); -} -#else namespace detail { template @@ -2755,12 +2744,11 @@ inline constexpr DECLTYPE_AUTO visit(Visitor &&visitor, Vs &&...vs) : throw_bad_variant_access()), detail::visitation::variant::visit_value(lib::forward(visitor), lib::forward(vs)...)) -#endif -template -inline auto swap(variant &lhs, - variant &rhs) noexcept(noexcept(lhs.swap(rhs))) - -> decltype(lhs.swap(rhs)) { + template + inline auto swap(variant &lhs, + variant &rhs) noexcept(noexcept(lhs.swap(rhs))) + -> decltype(lhs.swap(rhs)) { lhs.swap(rhs); }