From 3662fb71a79c43ea537c8f358ad9b4c9407a7930 Mon Sep 17 00:00:00 2001 From: Zeng Jinle <32832641+sneaxiy@users.noreply.github.com> Date: Tue, 3 Dec 2019 20:33:31 +0800 Subject: [PATCH] remove eval() calls in Eigen, test=develop (#21498) --- paddle/fluid/operators/batch_norm_op.cc | 2 +- paddle/fluid/operators/bilinear_tensor_product_op.h | 8 +++----- paddle/fluid/operators/clip_by_norm_op.h | 2 +- paddle/fluid/operators/instance_norm_op.cc | 4 ++-- paddle/fluid/operators/kldiv_loss_op.h | 2 +- paddle/fluid/operators/reduce_ops/reduce_sum_op.h | 2 +- 6 files changed, 9 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index 5e2d02332f2..a2447cb9681 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -522,7 +522,7 @@ class BatchNormGradKernel EigenVectorArrayMap inv_var_tmp(running_inv_var_data, C); ConstEigenVectorArrayMap var_arr(running_variance->data(), C); - inv_var_tmp = (var_arr + epsilon).sqrt().inverse().eval(); + inv_var_tmp = (var_arr + epsilon).sqrt().inverse(); inv_var_data = running_inv_var_data; } diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.h b/paddle/fluid/operators/bilinear_tensor_product_op.h index 5017c3a457a..8e2f25dfcf5 100644 --- a/paddle/fluid/operators/bilinear_tensor_product_op.h +++ b/paddle/fluid/operators/bilinear_tensor_product_op.h @@ -70,7 +70,7 @@ class BilinearTensorProductKernel : public framework::OpKernel { if (bias) { auto bias_vec = EigenMatrix::From(*bias); Eigen::DSizes bcast(batch_size, 1); - output_mat.device(place) = bias_vec.broadcast(bcast).eval() + output_mat; + output_mat.device(place) = bias_vec.broadcast(bcast) + output_mat; } } }; @@ -143,8 +143,7 @@ class BilinearTensorProductGradKernel : public framework::OpKernel { if (d_x) { y_scale_mat.device(place) = output_vec.reshape(Eigen::DSizes(batch_size, 1)) - .broadcast(bcast_for_x) - .eval() * + .broadcast(bcast_for_x) * y_mat; blas.GEMM(CblasNoTrans, CblasTrans, batch_size, x_dim, y_dim, 1, y_scale.data(), weight_i.data(), 1, d_x->data()); @@ -153,8 +152,7 @@ class BilinearTensorProductGradKernel : public framework::OpKernel { if (d_y || d_weight) { auto output_vec_y = output_vec.reshape(Eigen::DSizes(batch_size, 1)) - .broadcast(bcast_for_y) - .eval(); + .broadcast(bcast_for_y); x_scale_mat.device(place) = output_vec_y * x_mat; if (d_y) { blas.GEMM(CblasNoTrans, CblasNoTrans, batch_size, y_dim, x_dim, 1, diff --git a/paddle/fluid/operators/clip_by_norm_op.h b/paddle/fluid/operators/clip_by_norm_op.h index 90265259c95..a040a3e64a1 100644 --- a/paddle/fluid/operators/clip_by_norm_op.h +++ b/paddle/fluid/operators/clip_by_norm_op.h @@ -75,7 +75,7 @@ class ClipByNormKernel : public framework::OpKernel { auto& place = *context.template device_context().eigen_device(); - auto temp = (x_norm <= max_norm).template cast().eval(); + auto temp = (x_norm <= max_norm).template cast(); auto scaling = temp + (static_cast(1) - temp) * max_norm / x_norm; Eigen::array one_dim{{1}}; Eigen::DSizes m_dsize(input->numel()); diff --git a/paddle/fluid/operators/instance_norm_op.cc b/paddle/fluid/operators/instance_norm_op.cc index a67044849c7..10c7a04eeeb 100644 --- a/paddle/fluid/operators/instance_norm_op.cc +++ b/paddle/fluid/operators/instance_norm_op.cc @@ -491,7 +491,7 @@ class InstanceNormDoubleGradKernel sample_size * inv_var_tile_data * inv_var_tile_data * (ddx_arr.colwise().sum() / sample_size - ddx_arr); - dx_arr = scale_tile_data * dx_arr.eval(); + dx_arr = scale_tile_data * dx_arr; } if (ddScale) { ConstEigenVectorArrayMap ddscale_arr(ddScale->data(), C); @@ -532,7 +532,7 @@ class InstanceNormDoubleGradKernel x_sub_mean_mul_invstd_arr * (dy_arr * x_sub_mean_mul_invstd_arr).colwise().sum() / sample_size); - first_grad_arr = first_grad_arr.eval() * ddx_arr; + first_grad_arr = first_grad_arr * ddx_arr; for (int nc = 0; nc < NxC; ++nc) { int c = nc % C; dscale_arr(c) += first_grad_arr.colwise().sum()(nc); diff --git a/paddle/fluid/operators/kldiv_loss_op.h b/paddle/fluid/operators/kldiv_loss_op.h index 625e16e298d..369fdb4872b 100644 --- a/paddle/fluid/operators/kldiv_loss_op.h +++ b/paddle/fluid/operators/kldiv_loss_op.h @@ -71,7 +71,7 @@ class KLDivLossKernel : public framework::OpKernel { if ("none" == reduction) { loss_t.device(place) = output; } else if ("batchmean" == reduction) { - auto output_sum = output.sum().eval(); + auto output_sum = output.sum(); loss_t.device(place) = output_sum / output_sum.constant(n); } else if ("mean" == reduction) { loss_t.device(place) = output.mean(); diff --git a/paddle/fluid/operators/reduce_ops/reduce_sum_op.h b/paddle/fluid/operators/reduce_ops/reduce_sum_op.h index 7343d01e29d..ceaba30b01f 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_sum_op.h +++ b/paddle/fluid/operators/reduce_ops/reduce_sum_op.h @@ -90,7 +90,7 @@ struct SumGradFunctor { typename DY, typename Dim> void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, const Dim& dim, int size) { - dx->device(place) = dy->eval().broadcast(dim); + dx->device(place) = dy->broadcast(dim); } }; -- GitLab