未验证 提交 fc385777 编写于 作者: W wangchaochaohu 提交者: GitHub

fix the compile cost long time test=develop (#21064)

上级 2f27b103
...@@ -31,9 +31,7 @@ limitations under the License. */ ...@@ -31,9 +31,7 @@ limitations under the License. */
break; \ break; \
} }
#define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~) #define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~)
#define COND(n) \ #define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
BOOST_PP_GREATER_EQUAL(BOOST_PP_DIV(n, MAX_RANK_SUPPORTED), \
BOOST_PP_MOD(n, MAX_RANK_SUPPORTED))
#define EXPAND_AS_GRAD_CASE(n) \ #define EXPAND_AS_GRAD_CASE(n) \
case n: { \ case n: { \
ExpandAsBackward<n>(context, reshape_dims_vec, reduce_dims_vec); \ ExpandAsBackward<n>(context, reshape_dims_vec, reduce_dims_vec); \
...@@ -116,23 +114,20 @@ class ExpandAsGradKernel : public framework::OpKernel<T> { ...@@ -116,23 +114,20 @@ class ExpandAsGradKernel : public framework::OpKernel<T> {
std::vector<int> reshape_dims_vec; std::vector<int> reshape_dims_vec;
std::vector<int> reduce_dims_vec; std::vector<int> reduce_dims_vec;
for (size_t i = 0; i < bcast_dims.size(); ++i) { for (size_t i = 0; i < bcast_dims.size(); ++i) {
if (bcast_dims[i] == 1) { reduce_dims_vec.push_back(reshape_dims_vec.size());
reshape_dims_vec.push_back(x_dims[i]); reshape_dims_vec.push_back(bcast_dims[i]);
} else { reshape_dims_vec.push_back(x_dims[i]);
if (x_dims[i] == 1) { }
reduce_dims_vec.push_back(reshape_dims_vec.size()); int dims = reduce_dims_vec.size();
reshape_dims_vec.push_back(bcast_dims[i]); bool just_copy = true;
} else { for (size_t i = 0; i < bcast_dims.size(); i++) {
reduce_dims_vec.push_back(reshape_dims_vec.size()); if (bcast_dims[i] != 1) {
reshape_dims_vec.push_back(bcast_dims[i]); just_copy = false;
reshape_dims_vec.push_back(x_dims[i]); break;
}
} }
} }
int dims = reshape_dims_vec.size() * MAX_RANK_SUPPORTED +
reduce_dims_vec.size() - MAX_RANK_SUPPORTED - 1;
// no need reduce, just copy // no need reduce, just copy
if (reduce_dims_vec.size() == 0) { if (just_copy) {
auto* in0 = context.Input<Tensor>(framework::GradVarName("Out")); auto* in0 = context.Input<Tensor>(framework::GradVarName("Out"));
auto* out0 = context.Output<Tensor>(framework::GradVarName("X")); auto* out0 = context.Output<Tensor>(framework::GradVarName("X"));
out0->mutable_data<T>(context.GetPlace()); out0->mutable_data<T>(context.GetPlace());
...@@ -140,7 +135,7 @@ class ExpandAsGradKernel : public framework::OpKernel<T> { ...@@ -140,7 +135,7 @@ class ExpandAsGradKernel : public framework::OpKernel<T> {
out0); out0);
} else { } else {
switch (dims) { switch (dims) {
REP_EXPAND_AS_GRAD_TEMPLATE(72) REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED)
default: default:
PADDLE_THROW("Only support tensor with rank being between 1 and 6."); PADDLE_THROW("Only support tensor with rank being between 1 and 6.");
} }
...@@ -152,8 +147,8 @@ class ExpandAsGradKernel : public framework::OpKernel<T> { ...@@ -152,8 +147,8 @@ class ExpandAsGradKernel : public framework::OpKernel<T> {
void ExpandAsBackward(const framework::ExecutionContext& context, void ExpandAsBackward(const framework::ExecutionContext& context,
const std::vector<int>& reshape_dims_vec, const std::vector<int>& reshape_dims_vec,
const std::vector<int>& reduce_dims_vec) const { const std::vector<int>& reduce_dims_vec) const {
size_t reshape_size = Dims / MAX_RANK_SUPPORTED + 1; size_t reshape_size = reshape_dims_vec.size();
size_t reduce_size = Dims % MAX_RANK_SUPPORTED + 1; size_t reduce_size = reduce_dims_vec.size();
PADDLE_ENFORCE_EQ(reshape_size, reshape_dims_vec.size(), PADDLE_ENFORCE_EQ(reshape_size, reshape_dims_vec.size(),
"Inconsistent size between template Dims and " "Inconsistent size between template Dims and "
"reshape dimensions."); "reshape dimensions.");
...@@ -164,11 +159,11 @@ class ExpandAsGradKernel : public framework::OpKernel<T> { ...@@ -164,11 +159,11 @@ class ExpandAsGradKernel : public framework::OpKernel<T> {
auto* out0 = context.Output<Tensor>(framework::GradVarName("X")); auto* out0 = context.Output<Tensor>(framework::GradVarName("X"));
out0->mutable_data<T>(context.GetPlace()); out0->mutable_data<T>(context.GetPlace());
auto x_grad = EigenVector<T>::Flatten(*out0); auto x_grad = EigenVector<T>::Flatten(*out0);
Eigen::DSizes<int, Dims / MAX_RANK_SUPPORTED + 1> reshape_dims; Eigen::DSizes<int, Dims * 2> reshape_dims;
for (size_t i = 0; i < reshape_size; ++i) { for (size_t i = 0; i < reshape_size; ++i) {
reshape_dims[i] = reshape_dims_vec[i]; reshape_dims[i] = reshape_dims_vec[i];
} }
Eigen::DSizes<int, Dims % MAX_RANK_SUPPORTED + 1> reduce_dims; Eigen::DSizes<int, Dims> reduce_dims;
for (size_t i = 0; i < reduce_size; ++i) { for (size_t i = 0; i < reduce_size; ++i) {
reduce_dims[i] = reduce_dims_vec[i]; reduce_dims[i] = reduce_dims_vec[i];
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册