diff --git a/paddle/operators/smooth_l1_loss_op.h b/paddle/operators/smooth_l1_loss_op.h index ae91b9c893f8c9e3ce55e5a44a54d6ab013e01da..3e4740385842d6a9762f2043183dc89a13ee9832 100644 --- a/paddle/operators/smooth_l1_loss_op.h +++ b/paddle/operators/smooth_l1_loss_op.h @@ -87,10 +87,13 @@ class SmoothL1LossKernel : public framework::OpKernel { auto outside_weight = EigenVector::Flatten(*in3); errors.device(place) = errors * outside_weight; } - auto loss = EigenMatrix::From(*out1, {in0->dims()[0], 1}); + auto loss = EigenVector::Flatten(*out1); // first dimension of 'X' is the number of samples - auto errors_mat_view = EigenMatrix::From(paddle_errors, in0->dims()); - loss.device(place) = errors_mat_view.sum(Eigen::array({1})); + auto mat_dims = + framework::make_ddim({static_cast(in0->dims()[0]), + static_cast(in_counts / in0->dims()[0])}); + auto errors_mat_view = EigenMatrix::From(paddle_errors, mat_dims); + loss.device(place) = errors_mat_view.sum(Eigen::array({{1}})); } }; @@ -162,9 +165,9 @@ class SmoothL1LossGradKernel : public framework::OpKernel { // compute gradients auto out_grad = EigenMatrix::From(*og); auto diff_mat_view = EigenMatrix::From(paddle_diff, mat_dims); - auto gradients = - out_grad.broadcast(Eigen::array({1, static_cast(cols)})) * - weights * diff_mat_view; + auto gradients = out_grad.broadcast( + Eigen::array({{1, static_cast(cols)}})) * + weights * diff_mat_view; if (out0) { out0->mutable_data(context.GetPlace());