未验证 提交 bb7b4c0c 编写于 作者: C chentianyu03 提交者: GitHub

remove boost::algorithm::ends_with ,boost macro and boost::lexical_cast apis (#34310)

* replace boost::algorithm::ends_with with self define ends_with function

* remove BOOST macro in certain operators

* remove boost::lexical_cast

* add test for string_helper

* add more test case for string_helper

* modify join_string func and test case

* fix build_strategy_test failed bug

* remove string_helper_test from parallel_UT_rule.py
上级 911c8593
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include <vector> #include <vector>
#include <ThreadPool.h> #include <ThreadPool.h>
#include "boost/lexical_cast.hpp"
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/fluid/distributed/common/utils.h" #include "paddle/fluid/distributed/common/utils.h"
#include "paddle/fluid/framework/blocking_queue.h" #include "paddle/fluid/framework/blocking_queue.h"
...@@ -36,8 +35,6 @@ constexpr int Q_SIZE = 10000; ...@@ -36,8 +35,6 @@ constexpr int Q_SIZE = 10000;
constexpr int BUCKET = 10; constexpr int BUCKET = 10;
constexpr char XEOF[] = "EOF"; constexpr char XEOF[] = "EOF";
using boost::lexical_cast;
inline double GetCurrentUS() { inline double GetCurrentUS() {
struct timeval time; struct timeval time;
gettimeofday(&time, NULL); gettimeofday(&time, NULL);
...@@ -208,8 +205,10 @@ class ShardingMerge { ...@@ -208,8 +205,10 @@ class ShardingMerge {
for (int x = 0; x < embedding_dim; ++x) { for (int x = 0; x < embedding_dim; ++x) {
float v = 0.0; float v = 0.0;
try { try {
v = lexical_cast<float>(values_str[x]); v = std::stof(values_str[x]);
} catch (boost::bad_lexical_cast &e) { } catch (std::invalid_argument &e) {
VLOG(0) << " get unexpected line: " << line;
} catch (std::out_of_range &e) {
VLOG(0) << " get unexpected line: " << line; VLOG(0) << " get unexpected line: " << line;
} }
out->push_back(v); out->push_back(v);
......
...@@ -17,8 +17,6 @@ limitations under the License. */ ...@@ -17,8 +17,6 @@ limitations under the License. */
#include <vector> #include <vector>
#include "paddle/fluid/framework/io/fs.h" #include "paddle/fluid/framework/io/fs.h"
#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include "paddle/fluid/distributed/index_dataset/index_wrapper.h" #include "paddle/fluid/distributed/index_dataset/index_wrapper.h"
namespace paddle { namespace paddle {
...@@ -65,7 +63,7 @@ int TreeIndex::Load(const std::string filename) { ...@@ -65,7 +63,7 @@ int TreeIndex::Load(const std::string filename) {
if (item.key() == ".tree_meta") { if (item.key() == ".tree_meta") {
meta_.ParseFromString(item.value()); meta_.ParseFromString(item.value());
} else { } else {
auto code = boost::lexical_cast<uint64_t>(item.key()); auto code = std::stoull(item.key());
IndexNode node; IndexNode node;
node.ParseFromString(item.value()); node.ParseFromString(item.value());
PADDLE_ENFORCE_NE(node.id(), 0, PADDLE_ENFORCE_NE(node.id(), 0,
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include "paddle/fluid/distributed/table/common_sparse_table.h" #include "paddle/fluid/distributed/table/common_sparse_table.h"
#include <sstream> #include <sstream>
#include "boost/lexical_cast.hpp"
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
...@@ -50,8 +49,11 @@ void CommonSparseTable::ProcessALine(const std::vector<std::string>& columns, ...@@ -50,8 +49,11 @@ void CommonSparseTable::ProcessALine(const std::vector<std::string>& columns,
float v = 0.0; float v = 0.0;
try { try {
v = lexical_cast<float>(va); v = std::stof(va);
} catch (boost::bad_lexical_cast& e) { } catch (std::invalid_argument& e) {
VLOG(0) << "id: " << id << " get unexpected value: " << va
<< " and be reset to: 0.0";
} catch (std::out_of_range& e) {
VLOG(0) << "id: " << id << " get unexpected value: " << va VLOG(0) << "id: " << id << " get unexpected value: " << va
<< " and be reset to: 0.0"; << " and be reset to: 0.0";
} }
...@@ -131,7 +133,7 @@ int64_t CommonSparseTable::LoadFromText( ...@@ -131,7 +133,7 @@ int64_t CommonSparseTable::LoadFromText(
while (std::getline(file, line)) { while (std::getline(file, line)) {
auto values = paddle::string::split_string<std::string>(line, "\t"); auto values = paddle::string::split_string<std::string>(line, "\t");
auto id = lexical_cast<uint64_t>(values[0]); auto id = std::stoull(values[0]);
if (id % pserver_num != pserver_id) { if (id % pserver_num != pserver_id) {
VLOG(3) << "will not load " << values[0] << " from " << valuepath VLOG(3) << "will not load " << values[0] << " from " << valuepath
...@@ -150,10 +152,9 @@ int64_t CommonSparseTable::LoadFromText( ...@@ -150,10 +152,9 @@ int64_t CommonSparseTable::LoadFromText(
VALUE* value_instant = block->GetValue(id); VALUE* value_instant = block->GetValue(id);
if (values.size() == 5) { if (values.size() == 5) {
value_instant->count_ = lexical_cast<int>(values[1]); value_instant->count_ = std::stoi(values[1]);
value_instant->unseen_days_ = lexical_cast<int>(values[2]); value_instant->unseen_days_ = std::stoi(values[2]);
value_instant->is_entry_ = value_instant->is_entry_ = static_cast<bool>(std::stoi(values[3]));
static_cast<bool>(lexical_cast<int>(values[3]));
} }
std::vector<float*> block_values = block->Get(id, meta.names, meta.dims); std::vector<float*> block_values = block->Get(id, meta.names, meta.dims);
......
...@@ -33,7 +33,6 @@ ...@@ -33,7 +33,6 @@
#include "paddle/fluid/string/string_helper.h" #include "paddle/fluid/string/string_helper.h"
#define PSERVER_SAVE_SUFFIX ".shard" #define PSERVER_SAVE_SUFFIX ".shard"
using boost::lexical_cast;
namespace paddle { namespace paddle {
namespace distributed { namespace distributed {
......
...@@ -310,7 +310,7 @@ int64_t SSDSparseTable::LoadFromText( ...@@ -310,7 +310,7 @@ int64_t SSDSparseTable::LoadFromText(
while (std::getline(file, line)) { while (std::getline(file, line)) {
auto values = paddle::string::split_string<std::string>(line, "\t"); auto values = paddle::string::split_string<std::string>(line, "\t");
auto id = lexical_cast<uint64_t>(values[0]); auto id = std::stoull(values[0]);
if (id % pserver_num != pserver_id) { if (id % pserver_num != pserver_id) {
VLOG(3) << "will not load " << values[0] << " from " << valuepath VLOG(3) << "will not load " << values[0] << " from " << valuepath
...@@ -329,10 +329,9 @@ int64_t SSDSparseTable::LoadFromText( ...@@ -329,10 +329,9 @@ int64_t SSDSparseTable::LoadFromText(
VALUE* value_instant = block->GetValue(id); VALUE* value_instant = block->GetValue(id);
if (values.size() == 5) { if (values.size() == 5) {
value_instant->count_ = lexical_cast<int>(values[1]); value_instant->count_ = std::stoi(values[1]);
value_instant->unseen_days_ = lexical_cast<int>(values[2]); value_instant->unseen_days_ = std::stoi(values[2]);
value_instant->is_entry_ = value_instant->is_entry_ = static_cast<bool>(std::stoi(values[3]));
static_cast<bool>(lexical_cast<int>(values[3]));
} }
std::vector<float*> block_values = block->Get(id, meta.names, meta.dims); std::vector<float*> block_values = block->Get(id, meta.names, meta.dims);
......
...@@ -141,7 +141,7 @@ if(NOT APPLE AND NOT WIN32 AND (WITH_GPU OR WITH_ROCM)) ...@@ -141,7 +141,7 @@ if(NOT APPLE AND NOT WIN32 AND (WITH_GPU OR WITH_ROCM))
endif() endif()
cc_library(build_strategy SRCS build_strategy.cc DEPS pass_builder ${IR_PASS_DEPS}) cc_library(build_strategy SRCS build_strategy.cc DEPS pass_builder ${IR_PASS_DEPS})
cc_test(build_strategy_test SRCS build_strategy_test.cc cc_test(build_strategy_test SRCS build_strategy_test.cc
DEPS build_strategy op_registry op_proto_maker graph) DEPS build_strategy op_registry op_proto_maker graph string_helper)
if (WITH_MKLDNN) if (WITH_MKLDNN)
target_link_libraries(build_strategy mkldnn_placement_pass) target_link_libraries(build_strategy mkldnn_placement_pass)
......
...@@ -262,7 +262,7 @@ void FleetWrapper::HeterPushSparseVars( ...@@ -262,7 +262,7 @@ void FleetWrapper::HeterPushSparseVars(
int64_t* ids = tensor->data<int64_t>(); int64_t* ids = tensor->data<int64_t>();
int slot = 0; int slot = 0;
if (dump_slot) { if (dump_slot) {
slot = boost::lexical_cast<int>(sparse_key_names[i]); slot = std::stoi(sparse_key_names[i]);
} }
Variable* g_var = scope.FindVar(sparse_grad_names[i]); Variable* g_var = scope.FindVar(sparse_grad_names[i]);
if (g_var == nullptr) { if (g_var == nullptr) {
...@@ -915,12 +915,17 @@ void FleetWrapper::PushSparseVarsWithLabelAsync( ...@@ -915,12 +915,17 @@ void FleetWrapper::PushSparseVarsWithLabelAsync(
int slot = 0; int slot = 0;
if (dump_slot) { if (dump_slot) {
try { try {
slot = boost::lexical_cast<int>(sparse_key_names[i]); slot = std::stoi(sparse_key_names[i]);
} catch (boost::bad_lexical_cast const& e) { } catch (std::invalid_argument const& e) {
PADDLE_THROW(platform::errors::PreconditionNotMet( PADDLE_THROW(platform::errors::PreconditionNotMet(
"sparse var's name: %s, doesn't support non-integer type name when " "sparse var's name: %s, doesn't support non-integer type name when "
"dump_slot=True", "dump_slot=True",
sparse_key_names[i])); sparse_key_names[i]));
} catch (std::out_of_range const& e) {
PADDLE_THROW(platform::errors::PreconditionNotMet(
"sparse var's name: %s, integer type name out of range when "
"dump_slot=True",
sparse_key_names[i]));
} }
} }
Variable* g_var = scope.FindVar(sparse_grad_names[i]); Variable* g_var = scope.FindVar(sparse_grad_names[i]);
...@@ -1121,7 +1126,7 @@ void FleetWrapper::PushSparseFromTensorWithLabelAsync( ...@@ -1121,7 +1126,7 @@ void FleetWrapper::PushSparseFromTensorWithLabelAsync(
data[click_index] = static_cast<float>(fea_labels.at(input_idx)); data[click_index] = static_cast<float>(fea_labels.at(input_idx));
} }
if (dump_slot) { if (dump_slot) {
int slot = boost::lexical_cast<int>(input_names[index]); int slot = std::stoi(input_names[index]);
data[0] = static_cast<float>(slot); data[0] = static_cast<float>(slot);
} }
++input_idx; ++input_idx;
......
...@@ -17,10 +17,9 @@ ...@@ -17,10 +17,9 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include <boost/algorithm/string/predicate.hpp>
#include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/string/string_helper.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -109,7 +108,7 @@ class LockFreeOptimizePass : public Pass { ...@@ -109,7 +108,7 @@ class LockFreeOptimizePass : public Pass {
"Input argument node cannot be nullptr.")); "Input argument node cannot be nullptr."));
return node->NodeType() == Node::Type::kVariable && return node->NodeType() == Node::Type::kVariable &&
boost::algorithm::ends_with(node->Name(), name); paddle::string::ends_with(node->Name(), name);
} }
inline bool IsVarNameContains(ir::Node* node, const std::string& name) const { inline bool IsVarNameContains(ir::Node* node, const std::string& name) const {
......
...@@ -13,42 +13,12 @@ limitations under the License. */ ...@@ -13,42 +13,12 @@ limitations under the License. */
#include <vector> #include <vector>
#include <boost/preprocessor/arithmetic/div.hpp>
#include <boost/preprocessor/arithmetic/mod.hpp>
#include <boost/preprocessor/comparison/greater.hpp>
#include <boost/preprocessor/comparison/greater_equal.hpp>
#include <boost/preprocessor/control/if.hpp>
#include <boost/preprocessor/repetition/repeat.hpp>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#define MAX_RANK_SUPPORTED 6 #define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define EXPAND_AS_TEMPLATE(z, n, data) \
case n + 1: { \
ExpandAs<n + 1>(context); \
break; \
}
#define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define EXPAND_AS_GRAD_CASE(n) \
case n + 1: { \
ExpandAsBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
break; \
}
#define EXPAND_AS_GRAD_TEMPLATE(z, n, data) \
BOOST_PP_IF(COND(n), EXPAND_AS_GRAD_CASE(n), )
#define REP_EXPAND_AS_GRAD_TEMPLATE(n) \
BOOST_PP_REPEAT(n, EXPAND_AS_GRAD_TEMPLATE, ~)
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -67,7 +37,24 @@ class ExpandAsKernel : public framework::OpKernel<T> { ...@@ -67,7 +37,24 @@ class ExpandAsKernel : public framework::OpKernel<T> {
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto rank = context.Input<Tensor>("X")->dims().size(); auto rank = context.Input<Tensor>("X")->dims().size();
switch (rank) { switch (rank) {
REP_EXPAND_AS_TEMPLATE(MAX_RANK_SUPPORTED) case 1:
ExpandAs<1>(context);
break;
case 2:
ExpandAs<2>(context);
break;
case 3:
ExpandAs<3>(context);
break;
case 4:
ExpandAs<4>(context);
break;
case 5:
ExpandAs<5>(context);
break;
case 6:
ExpandAs<6>(context);
break;
default: default:
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But received " "Only support tensor with rank being between 1 and 6. But received "
...@@ -165,7 +152,24 @@ class ExpandAsGradKernel : public framework::OpKernel<T> { ...@@ -165,7 +152,24 @@ class ExpandAsGradKernel : public framework::OpKernel<T> {
"to %d, but the value received is %d.", "to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, dims)); MAX_RANK_SUPPORTED, dims));
switch (dims) { switch (dims) {
REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) case 1:
ExpandAsBackward<1>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 2:
ExpandAsBackward<2>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 3:
ExpandAsBackward<3>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 4:
ExpandAsBackward<4>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 5:
ExpandAsBackward<5>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 6:
ExpandAsBackward<6>(context, reshape_dims_vec, reduce_dims_vec);
break;
default: default:
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But " "Only support tensor with rank being between 1 and 6. But "
......
...@@ -14,42 +14,12 @@ limitations under the License. */ ...@@ -14,42 +14,12 @@ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <vector> #include <vector>
#include <boost/preprocessor/arithmetic/div.hpp>
#include <boost/preprocessor/arithmetic/mod.hpp>
#include <boost/preprocessor/comparison/greater.hpp>
#include <boost/preprocessor/comparison/greater_equal.hpp>
#include <boost/preprocessor/control/if.hpp>
#include <boost/preprocessor/repetition/repeat.hpp>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#define MAX_RANK_SUPPORTED 6 #define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define EXPAND_AS_TEMPLATE(z, n, data) \
case n + 1: { \
ExpandAs<n + 1>(context); \
break; \
}
#define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define EXPAND_AS_GRAD_CASE(n) \
case n + 1: { \
ExpandAsBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
break; \
}
#define EXPAND_AS_GRAD_TEMPLATE(z, n, data) \
BOOST_PP_IF(COND(n), EXPAND_AS_GRAD_CASE(n), )
#define REP_EXPAND_AS_GRAD_TEMPLATE(n) \
BOOST_PP_REPEAT(n, EXPAND_AS_GRAD_TEMPLATE, ~)
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -85,7 +55,26 @@ class ExpandAsV2Kernel : public framework::OpKernel<T> { ...@@ -85,7 +55,26 @@ class ExpandAsV2Kernel : public framework::OpKernel<T> {
"expand_as_v2 op must be less than or equal to %d.", "expand_as_v2 op must be less than or equal to %d.",
target_rank, MAX_RANK_SUPPORTED)); target_rank, MAX_RANK_SUPPORTED));
switch (target_rank) { REP_EXPAND_AS_TEMPLATE(MAX_RANK_SUPPORTED) } switch (target_rank) {
case 1:
ExpandAs<1>(context);
break;
case 2:
ExpandAs<2>(context);
break;
case 3:
ExpandAs<3>(context);
break;
case 4:
ExpandAs<4>(context);
break;
case 5:
ExpandAs<5>(context);
break;
case 6:
ExpandAs<6>(context);
break;
}
} }
protected: protected:
...@@ -186,7 +175,24 @@ class ExpandAsV2GradKernel : public framework::OpKernel<T> { ...@@ -186,7 +175,24 @@ class ExpandAsV2GradKernel : public framework::OpKernel<T> {
"to %d, but the value received is %d.", "to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, dims)); MAX_RANK_SUPPORTED, dims));
switch (dims) { switch (dims) {
REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) case 1:
ExpandAsBackward<1>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 2:
ExpandAsBackward<2>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 3:
ExpandAsBackward<3>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 4:
ExpandAsBackward<4>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 5:
ExpandAsBackward<5>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 6:
ExpandAsBackward<6>(context, reshape_dims_vec, reduce_dims_vec);
break;
default: default:
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But " "Only support tensor with rank being between 1 and 6. But "
......
...@@ -16,41 +16,12 @@ limitations under the License. */ ...@@ -16,41 +16,12 @@ limitations under the License. */
#include <vector> #include <vector>
#include <boost/preprocessor/arithmetic/div.hpp>
#include <boost/preprocessor/arithmetic/mod.hpp>
#include <boost/preprocessor/comparison/greater.hpp>
#include <boost/preprocessor/comparison/greater_equal.hpp>
#include <boost/preprocessor/control/if.hpp>
#include <boost/preprocessor/repetition/repeat.hpp>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#define MAX_RANK_SUPPORTED 6 #define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define EXPAND_TEMPLATE(z, n, data) \
case n + 1: { \
Expand<n + 1>(context); \
break; \
}
#define REP_EXPAND_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define EXPAND_GRAD_CASE(n) \
case n + 1: { \
ExpandBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
break; \
}
#define EXPAND_GRAD_TEMPLATE(z, n, data) \
BOOST_PP_IF(COND(n), EXPAND_GRAD_CASE(n), )
#define REP_EXPAND_GRAD_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_GRAD_TEMPLATE, ~)
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -137,7 +108,26 @@ class ExpandKernel : public framework::OpKernel<T> { ...@@ -137,7 +108,26 @@ class ExpandKernel : public framework::OpKernel<T> {
"The number of dimensions of the input 'x' for Op(expand) " "The number of dimensions of the input 'x' for Op(expand) "
"must be less than or equal to %d, but the value received is %d.", "must be less than or equal to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, rank)); MAX_RANK_SUPPORTED, rank));
switch (rank) { REP_EXPAND_TEMPLATE(MAX_RANK_SUPPORTED) } switch (rank) {
case 1:
Expand<1>(context);
break;
case 2:
Expand<2>(context);
break;
case 3:
Expand<3>(context);
break;
case 4:
Expand<4>(context);
break;
case 5:
Expand<5>(context);
break;
case 6:
Expand<6>(context);
break;
}
} }
protected: protected:
...@@ -233,7 +223,24 @@ class ExpandGradKernel : public framework::OpKernel<T> { ...@@ -233,7 +223,24 @@ class ExpandGradKernel : public framework::OpKernel<T> {
"to %d, but the value received is %d.", "to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, dims)); MAX_RANK_SUPPORTED, dims));
switch (dims) { switch (dims) {
REP_EXPAND_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) case 1:
ExpandBackward<1>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 2:
ExpandBackward<2>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 3:
ExpandBackward<3>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 4:
ExpandBackward<4>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 5:
ExpandBackward<5>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 6:
ExpandBackward<6>(context, reshape_dims_vec, reduce_dims_vec);
break;
default: default:
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But " "Only support tensor with rank being between 1 and 6. But "
......
...@@ -17,41 +17,12 @@ limitations under the License. */ ...@@ -17,41 +17,12 @@ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <vector> #include <vector>
#include <boost/preprocessor/arithmetic/div.hpp>
#include <boost/preprocessor/arithmetic/mod.hpp>
#include <boost/preprocessor/comparison/greater.hpp>
#include <boost/preprocessor/comparison/greater_equal.hpp>
#include <boost/preprocessor/control/if.hpp>
#include <boost/preprocessor/repetition/repeat.hpp>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#define MAX_RANK_SUPPORTED 6 #define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define EXPAND_TEMPLATE(z, n, data) \
case n + 1: { \
Expand<n + 1>(context); \
break; \
}
#define REP_EXPAND_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define EXPAND_GRAD_CASE(n) \
case n + 1: { \
ExpandBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
break; \
}
#define EXPAND_GRAD_TEMPLATE(z, n, data) \
BOOST_PP_IF(COND(n), EXPAND_GRAD_CASE(n), )
#define REP_EXPAND_GRAD_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_GRAD_TEMPLATE, ~)
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -132,7 +103,26 @@ class ExpandV2Kernel : public framework::OpKernel<T> { ...@@ -132,7 +103,26 @@ class ExpandV2Kernel : public framework::OpKernel<T> {
"less than or equal to %d.", "less than or equal to %d.",
shape_size, MAX_RANK_SUPPORTED)); shape_size, MAX_RANK_SUPPORTED));
rank = std::max(rank, static_cast<int>(shape_size)); rank = std::max(rank, static_cast<int>(shape_size));
switch (rank) { REP_EXPAND_TEMPLATE(MAX_RANK_SUPPORTED) } switch (rank) {
case 1:
Expand<1>(context);
break;
case 2:
Expand<2>(context);
break;
case 3:
Expand<3>(context);
break;
case 4:
Expand<4>(context);
break;
case 5:
Expand<5>(context);
break;
case 6:
Expand<6>(context);
break;
}
} }
protected: protected:
...@@ -271,7 +261,24 @@ class ExpandV2GradKernel : public framework::OpKernel<T> { ...@@ -271,7 +261,24 @@ class ExpandV2GradKernel : public framework::OpKernel<T> {
"to %d, but the value received is %d.", "to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, dims)); MAX_RANK_SUPPORTED, dims));
switch (dims) { switch (dims) {
REP_EXPAND_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) case 1:
ExpandBackward<1>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 2:
ExpandBackward<2>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 3:
ExpandBackward<3>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 4:
ExpandBackward<4>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 5:
ExpandBackward<5>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 6:
ExpandBackward<6>(context, reshape_dims_vec, reduce_dims_vec);
break;
default: default:
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But " "Only support tensor with rank being between 1 and 6. But "
......
...@@ -16,12 +16,6 @@ ...@@ -16,12 +16,6 @@
#include <vector> #include <vector>
#include <boost/preprocessor/arithmetic/mod.hpp>
#include <boost/preprocessor/comparison/greater.hpp>
#include <boost/preprocessor/comparison/greater_equal.hpp>
#include <boost/preprocessor/control/if.hpp>
#include <boost/preprocessor/repetition/repeat.hpp>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
...@@ -29,31 +23,6 @@ ...@@ -29,31 +23,6 @@
#include "paddle/fluid/platform/errors.h" #include "paddle/fluid/platform/errors.h"
#define MAX_RANK_SUPPORTED 6 #define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define MESHGRID_TEMPLATE(z, n, data) \
case n + 1: { \
MeshgridForward<n + 1>(context); \
break; \
}
#define REP_MESHGRID_TEMPLATE(n) BOOST_PP_REPEAT(n, MESHGRID_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define MESHGRID_GRAD_CASE(n) \
case n + 1: { \
MeshgridBackward<n + 1>(context); \
break; \
}
#define MESHGRID_GRAD_TEMPLATE(z, n, data) \
BOOST_PP_IF(COND(n), MESHGRID_GRAD_CASE(n), )
#define REP_MESHGRID_GRAD_TEMPLATE(n) \
BOOST_PP_REPEAT(n, MESHGRID_GRAD_TEMPLATE, ~)
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -65,7 +34,24 @@ class MeshgridKernel : public framework::OpKernel<T> { ...@@ -65,7 +34,24 @@ class MeshgridKernel : public framework::OpKernel<T> {
auto ins = context.MultiInput<framework::Tensor>("X"); auto ins = context.MultiInput<framework::Tensor>("X");
auto rank = ins.size(); auto rank = ins.size();
switch (rank) { switch (rank) {
REP_MESHGRID_TEMPLATE(MAX_RANK_SUPPORTED) case 1:
MeshgridForward<1>(context);
break;
case 2:
MeshgridForward<2>(context);
break;
case 3:
MeshgridForward<3>(context);
break;
case 4:
MeshgridForward<4>(context);
break;
case 5:
MeshgridForward<5>(context);
break;
case 6:
MeshgridForward<6>(context);
break;
default: default:
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Excepted Tensor numbers between 1 and 6, but only received d% .", "Excepted Tensor numbers between 1 and 6, but only received d% .",
...@@ -141,7 +127,24 @@ class MeshgridGradKernel : public framework::OpKernel<T> { ...@@ -141,7 +127,24 @@ class MeshgridGradKernel : public framework::OpKernel<T> {
context.MultiInput<framework::Tensor>(framework::GradVarName("Out")); context.MultiInput<framework::Tensor>(framework::GradVarName("Out"));
int n = out_grad.size(); int n = out_grad.size();
switch (n) { switch (n) {
REP_MESHGRID_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) case 1:
MeshgridBackward<1>(context);
break;
case 2:
MeshgridBackward<2>(context);
break;
case 3:
MeshgridBackward<3>(context);
break;
case 4:
MeshgridBackward<4>(context);
break;
case 5:
MeshgridBackward<5>(context);
break;
case 6:
MeshgridBackward<6>(context);
break;
default: default:
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Excepted Tensor numbers between 1 and 6, but only received d% .", "Excepted Tensor numbers between 1 and 6, but only received d% .",
......
...@@ -17,40 +17,12 @@ limitations under the License. */ ...@@ -17,40 +17,12 @@ limitations under the License. */
#include <algorithm> #include <algorithm>
#include <vector> #include <vector>
#include <boost/preprocessor/arithmetic/div.hpp>
#include <boost/preprocessor/arithmetic/mod.hpp>
#include <boost/preprocessor/comparison/greater.hpp>
#include <boost/preprocessor/comparison/greater_equal.hpp>
#include <boost/preprocessor/control/if.hpp>
#include <boost/preprocessor/repetition/repeat.hpp>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#define MAX_RANK_SUPPORTED 6 #define MAX_RANK_SUPPORTED 6
// 1. BOOST_PP_REPEAT macro represents a fast horizontal repetition construct.
// Usage: BOOST_PP_REPEAT(count, macro, data).
// This macro expands to the sequence:
// macro(z, 0, data) macro(z, 1, data) ... macro(z, count - 1, data).
// 2. As for our case, count = MAX_RANK_SUPPORTED(which is 6).
// So the range of n is 0-5(which is count-1).
// We want to generate case 1-6 instead of case 0-5.
// So we need to change n to n + 1.
#define TILE_TEMPLATE(z, n, data) \
case n + 1: { \
Tile<n + 1>(context); \
break; \
}
#define REP_TILE_TEMPLATE(n) BOOST_PP_REPEAT(n, TILE_TEMPLATE, ~)
#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define TILE_GRAD_CASE(n) \
case n + 1: { \
TileBackward<n + 1>(context, reshape_dims_vec, reduce_dims_vec); \
break; \
}
#define TILE_GRAD_TEMPLATE(z, n, data) BOOST_PP_IF(COND(n), TILE_GRAD_CASE(n), )
#define REP_TILE_GRAD_TEMPLATE(n) BOOST_PP_REPEAT(n, TILE_GRAD_TEMPLATE, ~)
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -130,7 +102,26 @@ class TileKernel : public framework::OpKernel<T> { ...@@ -130,7 +102,26 @@ class TileKernel : public framework::OpKernel<T> {
"must be less than or equal to %d, but the value received is %d.", "must be less than or equal to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, repeat_times_size)); MAX_RANK_SUPPORTED, repeat_times_size));
rank = std::max(rank, repeat_times_size); rank = std::max(rank, repeat_times_size);
switch (rank) { REP_TILE_TEMPLATE(MAX_RANK_SUPPORTED) } switch (rank) {
case 1:
Tile<1>(context);
break;
case 2:
Tile<2>(context);
break;
case 3:
Tile<3>(context);
break;
case 4:
Tile<4>(context);
break;
case 5:
Tile<5>(context);
break;
case 6:
Tile<6>(context);
break;
}
} }
protected: protected:
...@@ -251,7 +242,24 @@ class TileGradKernel : public framework::OpKernel<T> { ...@@ -251,7 +242,24 @@ class TileGradKernel : public framework::OpKernel<T> {
"to %d, but the value received is %d.", "to %d, but the value received is %d.",
MAX_RANK_SUPPORTED, dims)); MAX_RANK_SUPPORTED, dims));
switch (dims) { switch (dims) {
REP_TILE_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) case 1:
TileBackward<1>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 2:
TileBackward<2>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 3:
TileBackward<3>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 4:
TileBackward<4>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 5:
TileBackward<5>(context, reshape_dims_vec, reduce_dims_vec);
break;
case 6:
TileBackward<6>(context, reshape_dims_vec, reduce_dims_vec);
break;
default: default:
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Only support tensor with rank being between 1 and 6. But " "Only support tensor with rank being between 1 and 6. But "
......
...@@ -15,13 +15,14 @@ ...@@ -15,13 +15,14 @@
#include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include <cstdlib> #include <cstdlib>
#include "boost/lexical_cast.hpp" #include <string>
namespace paddle { namespace paddle {
namespace platform { namespace platform {
static int GetDefaultConvWorkspaceSizeLimitMBImpl() { static int GetDefaultConvWorkspaceSizeLimitMBImpl() {
const char *env_str = std::getenv("FLAGS_conv_workspace_size_limit"); const char *env_str = std::getenv("FLAGS_conv_workspace_size_limit");
return env_str ? boost::lexical_cast<int>(std::string(env_str)) return env_str ? std::stoi(std::string(env_str))
: kDefaultConvWorkspaceSizeLimitMB; : kDefaultConvWorkspaceSizeLimitMB;
} }
......
cc_library(stringpiece SRCS piece.cc DEPS flags) cc_library(stringpiece SRCS piece.cc DEPS flags)
cc_library(pretty_log SRCS pretty_log.cc DEPS flags) cc_library(pretty_log SRCS pretty_log.cc DEPS flags)
cc_library(string_helper SRCS string_helper.cc DEPS boost flags) cc_library(string_helper SRCS string_helper.cc DEPS flags)
cc_test(stringpiece_test SRCS piece_test.cc DEPS stringpiece glog gflags) cc_test(stringpiece_test SRCS piece_test.cc DEPS stringpiece glog gflags)
cc_test(stringprintf_test SRCS printf_test.cc DEPS glog gflags) cc_test(stringprintf_test SRCS printf_test.cc DEPS glog gflags)
cc_test(to_string_test SRCS to_string_test.cc) cc_test(to_string_test SRCS to_string_test.cc)
cc_test(split_test SRCS split_test.cc) cc_test(split_test SRCS split_test.cc)
cc_test(string_helper_test SRCS string_helper_test.cc DEPS string_helper)
...@@ -88,6 +88,11 @@ inline int str_to_float(const char* str, float* v) { ...@@ -88,6 +88,11 @@ inline int str_to_float(const char* str, float* v) {
return index; return index;
} }
bool ends_with(std::string const& input, std::string const& test) {
if (test.size() > input.size()) return false;
return std::equal(test.rbegin(), test.rend(), input.rbegin());
}
// A helper class for reading lines from file. // A helper class for reading lines from file.
// A line buffer is maintained. It // A line buffer is maintained. It
// doesn't need to know the maximum possible length of a line. // doesn't need to know the maximum possible length of a line.
...@@ -100,7 +105,7 @@ char* LineFileReader::getdelim(FILE* f, char delim) { ...@@ -100,7 +105,7 @@ char* LineFileReader::getdelim(FILE* f, char delim) {
_buffer[--ret] = 0; _buffer[--ret] = 0;
} }
_length = (size_t)ret; _length = static_cast<size_t>(ret);
return _buffer; return _buffer;
} else { } else {
_length = 0; _length = 0;
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "boost/lexical_cast.hpp"
#include "glog/logging.h" #include "glog/logging.h"
namespace paddle { namespace paddle {
...@@ -38,6 +37,7 @@ void format_string_append(std::string& str, const char* fmt, // NOLINT ...@@ -38,6 +37,7 @@ void format_string_append(std::string& str, const char* fmt, // NOLINT
CHECK_GE(len, 0); CHECK_GE(len, 0);
size_t oldlen = str.length(); size_t oldlen = str.length();
str.resize(oldlen + len + 1); str.resize(oldlen + len + 1);
CHECK(snprintf(&str[oldlen], (size_t)len + 1, fmt, args...) == // NOLINT CHECK(snprintf(&str[oldlen], (size_t)len + 1, fmt, args...) == // NOLINT
len); len);
str.resize(oldlen + len); str.resize(oldlen + len);
...@@ -69,6 +69,9 @@ std::string erase_spaces(const std::string& str); ...@@ -69,6 +69,9 @@ std::string erase_spaces(const std::string& str);
int str_to_float(const char* str, float* v); int str_to_float(const char* str, float* v);
// checks whether the test string is a suffix of the input string.
bool ends_with(std::string const& input, std::string const& test);
// split string by delim // split string by delim
template <class T = std::string> template <class T = std::string>
std::vector<T> split_string(const std::string& str, const std::string& delim) { std::vector<T> split_string(const std::string& str, const std::string& delim) {
...@@ -134,7 +137,9 @@ std::string join_strings(const Container& strs, char delim) { ...@@ -134,7 +137,9 @@ std::string join_strings(const Container& strs, char delim) {
str += delim; str += delim;
} }
str += boost::lexical_cast<std::string>(elem); std::stringstream ss;
ss << elem;
str += ss.str();
++i; ++i;
} }
...@@ -151,7 +156,9 @@ std::string join_strings(const Container& strs, const std::string& delim) { ...@@ -151,7 +156,9 @@ std::string join_strings(const Container& strs, const std::string& delim) {
str += delim; str += delim;
} }
str += boost::lexical_cast<std::string>(elem); std::stringstream ss;
ss << elem;
str += ss.str();
++i; ++i;
} }
......
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/string/string_helper.h"
#include <string>
#include "gtest/gtest.h"
TEST(StringHelper, EndsWith) {
std::string input("hello world");
std::string test1("world");
std::string test2("helloworld");
std::string test3("hello world hello world");
EXPECT_TRUE(paddle::string::ends_with(input, test1));
EXPECT_TRUE(paddle::string::ends_with(input, input));
EXPECT_FALSE(paddle::string::ends_with(input, test2));
EXPECT_FALSE(paddle::string::ends_with(input, test3));
}
TEST(StringHelper, FormatStringAppend) {
std::string str("hello");
char fmt[] = "hhh";
paddle::string::format_string_append(str, fmt);
EXPECT_EQ(str, "hellohhh");
}
TEST(StringHelper, JoinStrings) {
std::vector<std::string> v;
v.push_back("hello");
v.push_back("world");
std::string result = paddle::string::join_strings(v, ' ');
EXPECT_EQ(result, "hello world");
result = paddle::string::join_strings(v, '\n');
EXPECT_EQ(result, "hello\nworld");
result = paddle::string::join_strings(v, ',');
EXPECT_EQ(result, "hello,world");
result = paddle::string::join_strings(v, " new ");
EXPECT_EQ(result, "hello new world");
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册