diff --git a/paddle/fluid/distributed/common/sparse_sharding_merge.h b/paddle/fluid/distributed/common/sparse_sharding_merge.h index 3f84b5c4b212e2b261a4ef9b3f21163e5ef705b2..3bd36e65ba852173d14978b125ac2d249c65333a 100644 --- a/paddle/fluid/distributed/common/sparse_sharding_merge.h +++ b/paddle/fluid/distributed/common/sparse_sharding_merge.h @@ -21,7 +21,6 @@ #include #include -#include "boost/lexical_cast.hpp" #include "glog/logging.h" #include "paddle/fluid/distributed/common/utils.h" #include "paddle/fluid/framework/blocking_queue.h" @@ -36,8 +35,6 @@ constexpr int Q_SIZE = 10000; constexpr int BUCKET = 10; constexpr char XEOF[] = "EOF"; -using boost::lexical_cast; - inline double GetCurrentUS() { struct timeval time; gettimeofday(&time, NULL); @@ -208,8 +205,10 @@ class ShardingMerge { for (int x = 0; x < embedding_dim; ++x) { float v = 0.0; try { - v = lexical_cast(values_str[x]); - } catch (boost::bad_lexical_cast &e) { + v = std::stof(values_str[x]); + } catch (std::invalid_argument &e) { + VLOG(0) << " get unexpected line: " << line; + } catch (std::out_of_range &e) { VLOG(0) << " get unexpected line: " << line; } out->push_back(v); diff --git a/paddle/fluid/distributed/index_dataset/index_wrapper.cc b/paddle/fluid/distributed/index_dataset/index_wrapper.cc index 99fe4ca0c6d043caef01a867a5acc0d40841ee01..7a9691f3602e2622c6adc6ddbeb1a1507a174f70 100644 --- a/paddle/fluid/distributed/index_dataset/index_wrapper.cc +++ b/paddle/fluid/distributed/index_dataset/index_wrapper.cc @@ -17,8 +17,6 @@ limitations under the License. */ #include #include "paddle/fluid/framework/io/fs.h" -#include -#include #include "paddle/fluid/distributed/index_dataset/index_wrapper.h" namespace paddle { @@ -65,7 +63,7 @@ int TreeIndex::Load(const std::string filename) { if (item.key() == ".tree_meta") { meta_.ParseFromString(item.value()); } else { - auto code = boost::lexical_cast(item.key()); + auto code = std::stoull(item.key()); IndexNode node; node.ParseFromString(item.value()); PADDLE_ENFORCE_NE(node.id(), 0, diff --git a/paddle/fluid/distributed/table/common_sparse_table.cc b/paddle/fluid/distributed/table/common_sparse_table.cc index e1223face0f54ac782fa41ff16a2db1b08aa413a..8b79b1c02fce5e8d3e4b803bd27bc9f3741ae9ea 100644 --- a/paddle/fluid/distributed/table/common_sparse_table.cc +++ b/paddle/fluid/distributed/table/common_sparse_table.cc @@ -15,7 +15,6 @@ #include "paddle/fluid/distributed/table/common_sparse_table.h" #include -#include "boost/lexical_cast.hpp" #include "glog/logging.h" #include "paddle/fluid/platform/enforce.h" @@ -50,8 +49,11 @@ void CommonSparseTable::ProcessALine(const std::vector& columns, float v = 0.0; try { - v = lexical_cast(va); - } catch (boost::bad_lexical_cast& e) { + v = std::stof(va); + } catch (std::invalid_argument& e) { + VLOG(0) << "id: " << id << " get unexpected value: " << va + << " and be reset to: 0.0"; + } catch (std::out_of_range& e) { VLOG(0) << "id: " << id << " get unexpected value: " << va << " and be reset to: 0.0"; } @@ -131,7 +133,7 @@ int64_t CommonSparseTable::LoadFromText( while (std::getline(file, line)) { auto values = paddle::string::split_string(line, "\t"); - auto id = lexical_cast(values[0]); + auto id = std::stoull(values[0]); if (id % pserver_num != pserver_id) { VLOG(3) << "will not load " << values[0] << " from " << valuepath @@ -150,10 +152,9 @@ int64_t CommonSparseTable::LoadFromText( VALUE* value_instant = block->GetValue(id); if (values.size() == 5) { - value_instant->count_ = lexical_cast(values[1]); - value_instant->unseen_days_ = lexical_cast(values[2]); - value_instant->is_entry_ = - static_cast(lexical_cast(values[3])); + value_instant->count_ = std::stoi(values[1]); + value_instant->unseen_days_ = std::stoi(values[2]); + value_instant->is_entry_ = static_cast(std::stoi(values[3])); } std::vector block_values = block->Get(id, meta.names, meta.dims); diff --git a/paddle/fluid/distributed/table/common_sparse_table.h b/paddle/fluid/distributed/table/common_sparse_table.h index ce3cc11686a4807e9de616e2de2dc1d9b1e7c3f9..a443710bf0fd82bc157db26184d5c2d87f191004 100644 --- a/paddle/fluid/distributed/table/common_sparse_table.h +++ b/paddle/fluid/distributed/table/common_sparse_table.h @@ -33,7 +33,6 @@ #include "paddle/fluid/string/string_helper.h" #define PSERVER_SAVE_SUFFIX ".shard" -using boost::lexical_cast; namespace paddle { namespace distributed { diff --git a/paddle/fluid/distributed/table/ssd_sparse_table.cc b/paddle/fluid/distributed/table/ssd_sparse_table.cc index 5de6de3d2909d670c4bfdabdac37e72fcb125d5e..41eca72cf80717cb5f0ad731d19a9da79009ec96 100644 --- a/paddle/fluid/distributed/table/ssd_sparse_table.cc +++ b/paddle/fluid/distributed/table/ssd_sparse_table.cc @@ -310,7 +310,7 @@ int64_t SSDSparseTable::LoadFromText( while (std::getline(file, line)) { auto values = paddle::string::split_string(line, "\t"); - auto id = lexical_cast(values[0]); + auto id = std::stoull(values[0]); if (id % pserver_num != pserver_id) { VLOG(3) << "will not load " << values[0] << " from " << valuepath @@ -329,10 +329,9 @@ int64_t SSDSparseTable::LoadFromText( VALUE* value_instant = block->GetValue(id); if (values.size() == 5) { - value_instant->count_ = lexical_cast(values[1]); - value_instant->unseen_days_ = lexical_cast(values[2]); - value_instant->is_entry_ = - static_cast(lexical_cast(values[3])); + value_instant->count_ = std::stoi(values[1]); + value_instant->unseen_days_ = std::stoi(values[2]); + value_instant->is_entry_ = static_cast(std::stoi(values[3])); } std::vector block_values = block->Get(id, meta.names, meta.dims); diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index 1546027b794bb509abc99b04cf8c2f64057c2242..bbb781c8664baff5a260ad9b2d8f8f3348ea089b 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -141,7 +141,7 @@ if(NOT APPLE AND NOT WIN32 AND (WITH_GPU OR WITH_ROCM)) endif() cc_library(build_strategy SRCS build_strategy.cc DEPS pass_builder ${IR_PASS_DEPS}) cc_test(build_strategy_test SRCS build_strategy_test.cc - DEPS build_strategy op_registry op_proto_maker graph) + DEPS build_strategy op_registry op_proto_maker graph string_helper) if (WITH_MKLDNN) target_link_libraries(build_strategy mkldnn_placement_pass) diff --git a/paddle/fluid/framework/fleet/fleet_wrapper.cc b/paddle/fluid/framework/fleet/fleet_wrapper.cc index 54a647a73cfebb82157ea153da7fc51ec9a3d882..bb318e59e46e41521144d9e2ec9a7455a5d19b26 100644 --- a/paddle/fluid/framework/fleet/fleet_wrapper.cc +++ b/paddle/fluid/framework/fleet/fleet_wrapper.cc @@ -262,7 +262,7 @@ void FleetWrapper::HeterPushSparseVars( int64_t* ids = tensor->data(); int slot = 0; if (dump_slot) { - slot = boost::lexical_cast(sparse_key_names[i]); + slot = std::stoi(sparse_key_names[i]); } Variable* g_var = scope.FindVar(sparse_grad_names[i]); if (g_var == nullptr) { @@ -915,12 +915,17 @@ void FleetWrapper::PushSparseVarsWithLabelAsync( int slot = 0; if (dump_slot) { try { - slot = boost::lexical_cast(sparse_key_names[i]); - } catch (boost::bad_lexical_cast const& e) { + slot = std::stoi(sparse_key_names[i]); + } catch (std::invalid_argument const& e) { PADDLE_THROW(platform::errors::PreconditionNotMet( "sparse var's name: %s, doesn't support non-integer type name when " "dump_slot=True", sparse_key_names[i])); + } catch (std::out_of_range const& e) { + PADDLE_THROW(platform::errors::PreconditionNotMet( + "sparse var's name: %s, integer type name out of range when " + "dump_slot=True", + sparse_key_names[i])); } } Variable* g_var = scope.FindVar(sparse_grad_names[i]); @@ -1121,7 +1126,7 @@ void FleetWrapper::PushSparseFromTensorWithLabelAsync( data[click_index] = static_cast(fea_labels.at(input_idx)); } if (dump_slot) { - int slot = boost::lexical_cast(input_names[index]); + int slot = std::stoi(input_names[index]); data[0] = static_cast(slot); } ++input_idx; diff --git a/paddle/fluid/framework/ir/lock_free_optimize_pass.h b/paddle/fluid/framework/ir/lock_free_optimize_pass.h index 26ec61fd36eb3c309d864221b41681f3d712d81f..93b6396bf7f3101f7def6c4876355d7e80e4db7e 100644 --- a/paddle/fluid/framework/ir/lock_free_optimize_pass.h +++ b/paddle/fluid/framework/ir/lock_free_optimize_pass.h @@ -17,10 +17,9 @@ #include #include -#include - #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/pass.h" +#include "paddle/fluid/string/string_helper.h" namespace paddle { namespace framework { @@ -109,7 +108,7 @@ class LockFreeOptimizePass : public Pass { "Input argument node cannot be nullptr.")); return node->NodeType() == Node::Type::kVariable && - boost::algorithm::ends_with(node->Name(), name); + paddle::string::ends_with(node->Name(), name); } inline bool IsVarNameContains(ir::Node* node, const std::string& name) const { diff --git a/paddle/fluid/operators/expand_as_op.h b/paddle/fluid/operators/expand_as_op.h old mode 100755 new mode 100644 index 406455af741715f9188b02649cc976ca5562e3b5..07ba0e5ad87133193f3be5d4259ae7dd38621372 --- a/paddle/fluid/operators/expand_as_op.h +++ b/paddle/fluid/operators/expand_as_op.h @@ -13,42 +13,12 @@ limitations under the License. */ #include -#include -#include -#include -#include -#include -#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/eigen/eigen_function.h" #define MAX_RANK_SUPPORTED 6 -// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct. -// Usage: BOOST_PP_REPEAT(count, macro, data). -// This macro expands to the sequence: -// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data). -// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6). -// So the range of n is 0-5(which is count-1). -// We want to generate case 1-6 instead of case 0-5. -// So we need to change n to n + 1. -#define EXPAND_AS_TEMPLATE(z, n, data) \ - case n + 1: { \ - ExpandAs(context); \ - break; \ - } -#define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~) -#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) -#define EXPAND_AS_GRAD_CASE(n) \ - case n + 1: { \ - ExpandAsBackward(context, reshape_dims_vec, reduce_dims_vec); \ - break; \ - } -#define EXPAND_AS_GRAD_TEMPLATE(z, n, data) \ - BOOST_PP_IF(COND(n), EXPAND_AS_GRAD_CASE(n), ) -#define REP_EXPAND_AS_GRAD_TEMPLATE(n) \ - BOOST_PP_REPEAT(n, EXPAND_AS_GRAD_TEMPLATE, ~) namespace paddle { namespace operators { @@ -67,7 +37,24 @@ class ExpandAsKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto rank = context.Input("X")->dims().size(); switch (rank) { - REP_EXPAND_AS_TEMPLATE(MAX_RANK_SUPPORTED) + case 1: + ExpandAs<1>(context); + break; + case 2: + ExpandAs<2>(context); + break; + case 3: + ExpandAs<3>(context); + break; + case 4: + ExpandAs<4>(context); + break; + case 5: + ExpandAs<5>(context); + break; + case 6: + ExpandAs<6>(context); + break; default: PADDLE_THROW(platform::errors::InvalidArgument( "Only support tensor with rank being between 1 and 6. But received " @@ -165,7 +152,24 @@ class ExpandAsGradKernel : public framework::OpKernel { "to %d, but the value received is %d.", MAX_RANK_SUPPORTED, dims)); switch (dims) { - REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) + case 1: + ExpandAsBackward<1>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 2: + ExpandAsBackward<2>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 3: + ExpandAsBackward<3>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 4: + ExpandAsBackward<4>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 5: + ExpandAsBackward<5>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 6: + ExpandAsBackward<6>(context, reshape_dims_vec, reduce_dims_vec); + break; default: PADDLE_THROW(platform::errors::InvalidArgument( "Only support tensor with rank being between 1 and 6. But " diff --git a/paddle/fluid/operators/expand_as_v2_op.h b/paddle/fluid/operators/expand_as_v2_op.h old mode 100755 new mode 100644 index 6df4c592378cb24b4aa2557d768e6ae7ad34a4a4..3e8f7d15880bcd16ed040637f3e80c43b4d287b7 --- a/paddle/fluid/operators/expand_as_v2_op.h +++ b/paddle/fluid/operators/expand_as_v2_op.h @@ -14,42 +14,12 @@ limitations under the License. */ #include #include -#include -#include -#include -#include -#include -#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/eigen/eigen_function.h" #define MAX_RANK_SUPPORTED 6 -// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct. -// Usage: BOOST_PP_REPEAT(count, macro, data). -// This macro expands to the sequence: -// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data). -// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6). -// So the range of n is 0-5(which is count-1). -// We want to generate case 1-6 instead of case 0-5. -// So we need to change n to n + 1. -#define EXPAND_AS_TEMPLATE(z, n, data) \ - case n + 1: { \ - ExpandAs(context); \ - break; \ - } -#define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~) -#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) -#define EXPAND_AS_GRAD_CASE(n) \ - case n + 1: { \ - ExpandAsBackward(context, reshape_dims_vec, reduce_dims_vec); \ - break; \ - } -#define EXPAND_AS_GRAD_TEMPLATE(z, n, data) \ - BOOST_PP_IF(COND(n), EXPAND_AS_GRAD_CASE(n), ) -#define REP_EXPAND_AS_GRAD_TEMPLATE(n) \ - BOOST_PP_REPEAT(n, EXPAND_AS_GRAD_TEMPLATE, ~) namespace paddle { namespace operators { @@ -85,7 +55,26 @@ class ExpandAsV2Kernel : public framework::OpKernel { "expand_as_v2 op must be less than or equal to %d.", target_rank, MAX_RANK_SUPPORTED)); - switch (target_rank) { REP_EXPAND_AS_TEMPLATE(MAX_RANK_SUPPORTED) } + switch (target_rank) { + case 1: + ExpandAs<1>(context); + break; + case 2: + ExpandAs<2>(context); + break; + case 3: + ExpandAs<3>(context); + break; + case 4: + ExpandAs<4>(context); + break; + case 5: + ExpandAs<5>(context); + break; + case 6: + ExpandAs<6>(context); + break; + } } protected: @@ -186,7 +175,24 @@ class ExpandAsV2GradKernel : public framework::OpKernel { "to %d, but the value received is %d.", MAX_RANK_SUPPORTED, dims)); switch (dims) { - REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) + case 1: + ExpandAsBackward<1>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 2: + ExpandAsBackward<2>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 3: + ExpandAsBackward<3>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 4: + ExpandAsBackward<4>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 5: + ExpandAsBackward<5>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 6: + ExpandAsBackward<6>(context, reshape_dims_vec, reduce_dims_vec); + break; default: PADDLE_THROW(platform::errors::InvalidArgument( "Only support tensor with rank being between 1 and 6. But " diff --git a/paddle/fluid/operators/expand_op.h b/paddle/fluid/operators/expand_op.h old mode 100755 new mode 100644 index e566d69096595ce5ea9e753b58a2bf3e923a9c10..809bad1d6c1eec62214324849237b489eeb51b74 --- a/paddle/fluid/operators/expand_op.h +++ b/paddle/fluid/operators/expand_op.h @@ -16,41 +16,12 @@ limitations under the License. */ #include -#include -#include -#include -#include -#include -#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/eigen/eigen_function.h" #define MAX_RANK_SUPPORTED 6 -// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct. -// Usage: BOOST_PP_REPEAT(count, macro, data). -// This macro expands to the sequence: -// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data). -// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6). -// So the range of n is 0-5(which is count-1). -// We want to generate case 1-6 instead of case 0-5. -// So we need to change n to n + 1. -#define EXPAND_TEMPLATE(z, n, data) \ - case n + 1: { \ - Expand(context); \ - break; \ - } -#define REP_EXPAND_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_TEMPLATE, ~) -#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) -#define EXPAND_GRAD_CASE(n) \ - case n + 1: { \ - ExpandBackward(context, reshape_dims_vec, reduce_dims_vec); \ - break; \ - } -#define EXPAND_GRAD_TEMPLATE(z, n, data) \ - BOOST_PP_IF(COND(n), EXPAND_GRAD_CASE(n), ) -#define REP_EXPAND_GRAD_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_GRAD_TEMPLATE, ~) namespace paddle { namespace operators { @@ -137,7 +108,26 @@ class ExpandKernel : public framework::OpKernel { "The number of dimensions of the input 'x' for Op(expand) " "must be less than or equal to %d, but the value received is %d.", MAX_RANK_SUPPORTED, rank)); - switch (rank) { REP_EXPAND_TEMPLATE(MAX_RANK_SUPPORTED) } + switch (rank) { + case 1: + Expand<1>(context); + break; + case 2: + Expand<2>(context); + break; + case 3: + Expand<3>(context); + break; + case 4: + Expand<4>(context); + break; + case 5: + Expand<5>(context); + break; + case 6: + Expand<6>(context); + break; + } } protected: @@ -233,7 +223,24 @@ class ExpandGradKernel : public framework::OpKernel { "to %d, but the value received is %d.", MAX_RANK_SUPPORTED, dims)); switch (dims) { - REP_EXPAND_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) + case 1: + ExpandBackward<1>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 2: + ExpandBackward<2>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 3: + ExpandBackward<3>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 4: + ExpandBackward<4>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 5: + ExpandBackward<5>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 6: + ExpandBackward<6>(context, reshape_dims_vec, reduce_dims_vec); + break; default: PADDLE_THROW(platform::errors::InvalidArgument( "Only support tensor with rank being between 1 and 6. But " diff --git a/paddle/fluid/operators/expand_v2_op.h b/paddle/fluid/operators/expand_v2_op.h old mode 100755 new mode 100644 index 8a87a067c51f1147108eec26278ce823d57656b9..a720bd7b5518238c8135c5da6d30cb6c980212a4 --- a/paddle/fluid/operators/expand_v2_op.h +++ b/paddle/fluid/operators/expand_v2_op.h @@ -17,41 +17,12 @@ limitations under the License. */ #include #include -#include -#include -#include -#include -#include -#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/eigen/eigen_function.h" #define MAX_RANK_SUPPORTED 6 -// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct. -// Usage: BOOST_PP_REPEAT(count, macro, data). -// This macro expands to the sequence: -// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data). -// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6). -// So the range of n is 0-5(which is count-1). -// We want to generate case 1-6 instead of case 0-5. -// So we need to change n to n + 1. -#define EXPAND_TEMPLATE(z, n, data) \ - case n + 1: { \ - Expand(context); \ - break; \ - } -#define REP_EXPAND_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_TEMPLATE, ~) -#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) -#define EXPAND_GRAD_CASE(n) \ - case n + 1: { \ - ExpandBackward(context, reshape_dims_vec, reduce_dims_vec); \ - break; \ - } -#define EXPAND_GRAD_TEMPLATE(z, n, data) \ - BOOST_PP_IF(COND(n), EXPAND_GRAD_CASE(n), ) -#define REP_EXPAND_GRAD_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_GRAD_TEMPLATE, ~) namespace paddle { namespace operators { @@ -132,7 +103,26 @@ class ExpandV2Kernel : public framework::OpKernel { "less than or equal to %d.", shape_size, MAX_RANK_SUPPORTED)); rank = std::max(rank, static_cast(shape_size)); - switch (rank) { REP_EXPAND_TEMPLATE(MAX_RANK_SUPPORTED) } + switch (rank) { + case 1: + Expand<1>(context); + break; + case 2: + Expand<2>(context); + break; + case 3: + Expand<3>(context); + break; + case 4: + Expand<4>(context); + break; + case 5: + Expand<5>(context); + break; + case 6: + Expand<6>(context); + break; + } } protected: @@ -271,7 +261,24 @@ class ExpandV2GradKernel : public framework::OpKernel { "to %d, but the value received is %d.", MAX_RANK_SUPPORTED, dims)); switch (dims) { - REP_EXPAND_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) + case 1: + ExpandBackward<1>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 2: + ExpandBackward<2>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 3: + ExpandBackward<3>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 4: + ExpandBackward<4>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 5: + ExpandBackward<5>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 6: + ExpandBackward<6>(context, reshape_dims_vec, reduce_dims_vec); + break; default: PADDLE_THROW(platform::errors::InvalidArgument( "Only support tensor with rank being between 1 and 6. But " diff --git a/paddle/fluid/operators/meshgrid_op.h b/paddle/fluid/operators/meshgrid_op.h old mode 100755 new mode 100644 index 2aad894e11d4b43b5121d18bf431c0195586926e..e01469f26d74fa84afc6bfd4f5138e30f84730d2 --- a/paddle/fluid/operators/meshgrid_op.h +++ b/paddle/fluid/operators/meshgrid_op.h @@ -16,12 +16,6 @@ #include -#include -#include -#include -#include -#include - #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" @@ -29,31 +23,6 @@ #include "paddle/fluid/platform/errors.h" #define MAX_RANK_SUPPORTED 6 -// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct. -// Usage: BOOST_PP_REPEAT(count, macro, data). -// This macro expands to the sequence: -// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data). -// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6). -// So the range of n is 0-5(which is count-1). -// We want to generate case 1-6 instead of case 0-5. -// So we need to change n to n + 1. -#define MESHGRID_TEMPLATE(z, n, data) \ - case n + 1: { \ - MeshgridForward(context); \ - break; \ - } -#define REP_MESHGRID_TEMPLATE(n) BOOST_PP_REPEAT(n, MESHGRID_TEMPLATE, ~) -#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) - -#define MESHGRID_GRAD_CASE(n) \ - case n + 1: { \ - MeshgridBackward(context); \ - break; \ - } -#define MESHGRID_GRAD_TEMPLATE(z, n, data) \ - BOOST_PP_IF(COND(n), MESHGRID_GRAD_CASE(n), ) -#define REP_MESHGRID_GRAD_TEMPLATE(n) \ - BOOST_PP_REPEAT(n, MESHGRID_GRAD_TEMPLATE, ~) namespace paddle { namespace operators { @@ -65,7 +34,24 @@ class MeshgridKernel : public framework::OpKernel { auto ins = context.MultiInput("X"); auto rank = ins.size(); switch (rank) { - REP_MESHGRID_TEMPLATE(MAX_RANK_SUPPORTED) + case 1: + MeshgridForward<1>(context); + break; + case 2: + MeshgridForward<2>(context); + break; + case 3: + MeshgridForward<3>(context); + break; + case 4: + MeshgridForward<4>(context); + break; + case 5: + MeshgridForward<5>(context); + break; + case 6: + MeshgridForward<6>(context); + break; default: PADDLE_THROW(platform::errors::InvalidArgument( "Excepted Tensor numbers between 1 and 6, but only received d% .", @@ -141,7 +127,24 @@ class MeshgridGradKernel : public framework::OpKernel { context.MultiInput(framework::GradVarName("Out")); int n = out_grad.size(); switch (n) { - REP_MESHGRID_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) + case 1: + MeshgridBackward<1>(context); + break; + case 2: + MeshgridBackward<2>(context); + break; + case 3: + MeshgridBackward<3>(context); + break; + case 4: + MeshgridBackward<4>(context); + break; + case 5: + MeshgridBackward<5>(context); + break; + case 6: + MeshgridBackward<6>(context); + break; default: PADDLE_THROW(platform::errors::InvalidArgument( "Excepted Tensor numbers between 1 and 6, but only received d% .", diff --git a/paddle/fluid/operators/tile_op.h b/paddle/fluid/operators/tile_op.h old mode 100755 new mode 100644 index 1fb0fa6ce5176f31cd1c2c3314152f711fb55355..1e4a4dff27d2da900e57a123db4859eb1bdf4f95 --- a/paddle/fluid/operators/tile_op.h +++ b/paddle/fluid/operators/tile_op.h @@ -17,40 +17,12 @@ limitations under the License. */ #include #include -#include -#include -#include -#include -#include -#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/operators/eigen/eigen_function.h" #define MAX_RANK_SUPPORTED 6 -// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct. -// Usage: BOOST_PP_REPEAT(count, macro, data). -// This macro expands to the sequence: -// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data). -// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6). -// So the range of n is 0-5(which is count-1). -// We want to generate case 1-6 instead of case 0-5. -// So we need to change n to n + 1. -#define TILE_TEMPLATE(z, n, data) \ - case n + 1: { \ - Tile(context); \ - break; \ - } -#define REP_TILE_TEMPLATE(n) BOOST_PP_REPEAT(n, TILE_TEMPLATE, ~) -#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) -#define TILE_GRAD_CASE(n) \ - case n + 1: { \ - TileBackward(context, reshape_dims_vec, reduce_dims_vec); \ - break; \ - } -#define TILE_GRAD_TEMPLATE(z, n, data) BOOST_PP_IF(COND(n), TILE_GRAD_CASE(n), ) -#define REP_TILE_GRAD_TEMPLATE(n) BOOST_PP_REPEAT(n, TILE_GRAD_TEMPLATE, ~) namespace paddle { namespace operators { @@ -130,7 +102,26 @@ class TileKernel : public framework::OpKernel { "must be less than or equal to %d, but the value received is %d.", MAX_RANK_SUPPORTED, repeat_times_size)); rank = std::max(rank, repeat_times_size); - switch (rank) { REP_TILE_TEMPLATE(MAX_RANK_SUPPORTED) } + switch (rank) { + case 1: + Tile<1>(context); + break; + case 2: + Tile<2>(context); + break; + case 3: + Tile<3>(context); + break; + case 4: + Tile<4>(context); + break; + case 5: + Tile<5>(context); + break; + case 6: + Tile<6>(context); + break; + } } protected: @@ -251,7 +242,24 @@ class TileGradKernel : public framework::OpKernel { "to %d, but the value received is %d.", MAX_RANK_SUPPORTED, dims)); switch (dims) { - REP_TILE_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) + case 1: + TileBackward<1>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 2: + TileBackward<2>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 3: + TileBackward<3>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 4: + TileBackward<4>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 5: + TileBackward<5>(context, reshape_dims_vec, reduce_dims_vec); + break; + case 6: + TileBackward<6>(context, reshape_dims_vec, reduce_dims_vec); + break; default: PADDLE_THROW(platform::errors::InvalidArgument( "Only support tensor with rank being between 1 and 6. But " diff --git a/paddle/fluid/platform/cudnn_workspace_helper.cc b/paddle/fluid/platform/cudnn_workspace_helper.cc index c4e71c86f9e7501709f812c2341bf29337e71837..bb0e9a226d15001d8f19eff136cb29152e1906fb 100644 --- a/paddle/fluid/platform/cudnn_workspace_helper.cc +++ b/paddle/fluid/platform/cudnn_workspace_helper.cc @@ -15,13 +15,14 @@ #include "paddle/fluid/platform/cudnn_workspace_helper.h" #include -#include "boost/lexical_cast.hpp" +#include + namespace paddle { namespace platform { static int GetDefaultConvWorkspaceSizeLimitMBImpl() { const char *env_str = std::getenv("FLAGS_conv_workspace_size_limit"); - return env_str ? boost::lexical_cast(std::string(env_str)) + return env_str ? std::stoi(std::string(env_str)) : kDefaultConvWorkspaceSizeLimitMB; } diff --git a/paddle/fluid/string/CMakeLists.txt b/paddle/fluid/string/CMakeLists.txt index a465f5909a7c6ee83211b8e03f1c3e7d3103022c..9667e18bc6a1e34fee6e039a710dd1bd8b24481e 100644 --- a/paddle/fluid/string/CMakeLists.txt +++ b/paddle/fluid/string/CMakeLists.txt @@ -1,7 +1,8 @@ cc_library(stringpiece SRCS piece.cc DEPS flags) cc_library(pretty_log SRCS pretty_log.cc DEPS flags) -cc_library(string_helper SRCS string_helper.cc DEPS boost flags) +cc_library(string_helper SRCS string_helper.cc DEPS flags) cc_test(stringpiece_test SRCS piece_test.cc DEPS stringpiece glog gflags) cc_test(stringprintf_test SRCS printf_test.cc DEPS glog gflags) cc_test(to_string_test SRCS to_string_test.cc) cc_test(split_test SRCS split_test.cc) +cc_test(string_helper_test SRCS string_helper_test.cc DEPS string_helper) diff --git a/paddle/fluid/string/string_helper.cc b/paddle/fluid/string/string_helper.cc index 8731e8fca8a5c4d8b13cf5fb6b38c5cf710225aa..141ac2ba47c5b9cbd3f4c4ba2bddf4657349ce6f 100644 --- a/paddle/fluid/string/string_helper.cc +++ b/paddle/fluid/string/string_helper.cc @@ -88,6 +88,11 @@ inline int str_to_float(const char* str, float* v) { return index; } +bool ends_with(std::string const& input, std::string const& test) { + if (test.size() > input.size()) return false; + return std::equal(test.rbegin(), test.rend(), input.rbegin()); +} + // A helper class for reading lines from file. // A line buffer is maintained. It // doesn't need to know the maximum possible length of a line. @@ -100,7 +105,7 @@ char* LineFileReader::getdelim(FILE* f, char delim) { _buffer[--ret] = 0; } - _length = (size_t)ret; + _length = static_cast(ret); return _buffer; } else { _length = 0; diff --git a/paddle/fluid/string/string_helper.h b/paddle/fluid/string/string_helper.h index f7387e877af2cd3f00e1ff61370b6469bfbe7771..37b713766dd558fefc8402f29814654759593f8c 100644 --- a/paddle/fluid/string/string_helper.h +++ b/paddle/fluid/string/string_helper.h @@ -21,7 +21,6 @@ #include #include -#include "boost/lexical_cast.hpp" #include "glog/logging.h" namespace paddle { @@ -38,6 +37,7 @@ void format_string_append(std::string& str, const char* fmt, // NOLINT CHECK_GE(len, 0); size_t oldlen = str.length(); str.resize(oldlen + len + 1); + CHECK(snprintf(&str[oldlen], (size_t)len + 1, fmt, args...) == // NOLINT len); str.resize(oldlen + len); @@ -69,6 +69,9 @@ std::string erase_spaces(const std::string& str); int str_to_float(const char* str, float* v); +// checks whether the test string is a suffix of the input string. +bool ends_with(std::string const& input, std::string const& test); + // split string by delim template std::vector split_string(const std::string& str, const std::string& delim) { @@ -134,7 +137,9 @@ std::string join_strings(const Container& strs, char delim) { str += delim; } - str += boost::lexical_cast(elem); + std::stringstream ss; + ss << elem; + str += ss.str(); ++i; } @@ -151,7 +156,9 @@ std::string join_strings(const Container& strs, const std::string& delim) { str += delim; } - str += boost::lexical_cast(elem); + std::stringstream ss; + ss << elem; + str += ss.str(); ++i; } diff --git a/paddle/fluid/string/string_helper_test.cc b/paddle/fluid/string/string_helper_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..deeb4bdcc829179f10669d716d26284bac9f5e8f --- /dev/null +++ b/paddle/fluid/string/string_helper_test.cc @@ -0,0 +1,58 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/string/string_helper.h" + +#include + +#include "gtest/gtest.h" + +TEST(StringHelper, EndsWith) { + std::string input("hello world"); + std::string test1("world"); + std::string test2("helloworld"); + std::string test3("hello world hello world"); + + EXPECT_TRUE(paddle::string::ends_with(input, test1)); + EXPECT_TRUE(paddle::string::ends_with(input, input)); + + EXPECT_FALSE(paddle::string::ends_with(input, test2)); + EXPECT_FALSE(paddle::string::ends_with(input, test3)); +} + +TEST(StringHelper, FormatStringAppend) { + std::string str("hello"); + char fmt[] = "hhh"; + + paddle::string::format_string_append(str, fmt); + EXPECT_EQ(str, "hellohhh"); +} + +TEST(StringHelper, JoinStrings) { + std::vector v; + v.push_back("hello"); + v.push_back("world"); + + std::string result = paddle::string::join_strings(v, ' '); + EXPECT_EQ(result, "hello world"); + + result = paddle::string::join_strings(v, '\n'); + EXPECT_EQ(result, "hello\nworld"); + + result = paddle::string::join_strings(v, ','); + EXPECT_EQ(result, "hello,world"); + + result = paddle::string::join_strings(v, " new "); + EXPECT_EQ(result, "hello new world"); +}