From e722f68318416930553d1a8c7bc40e7fb47b80e1 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 3 Sep 2018 15:38:53 +0800 Subject: [PATCH] fix windows compile (#13147) --- paddle/fluid/operators/activation_op.h | 4 ++-- paddle/fluid/operators/attention_lstm_op.cc | 1 - paddle/fluid/operators/gru_unit_op.h | 16 ++++++++-------- paddle/fluid/operators/label_smooth_op.h | 3 ++- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index 91241519265..2e31d1c9c70 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -865,8 +865,8 @@ struct SwishGradFunctor : public BaseActivationFunctor { void operator()(Device d, X x, Out out, dOut dout, dX dx) const { auto temp1 = static_cast(1) / (static_cast(1) + (static_cast(-beta) * x).exp()); - auto temp2 = temp1 * (static_cast(1) - (beta * out)); - dx.device(d) = dout * ((beta * out) + temp2); + auto temp2 = temp1 * (static_cast(1) - (static_cast(beta) * out)); + dx.device(d) = dout * ((static_cast(beta) * out) + temp2); } }; diff --git a/paddle/fluid/operators/attention_lstm_op.cc b/paddle/fluid/operators/attention_lstm_op.cc index a02128c5a54..39b0c856996 100644 --- a/paddle/fluid/operators/attention_lstm_op.cc +++ b/paddle/fluid/operators/attention_lstm_op.cc @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/attention_lstm_op.h" -#include #include #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/cpu_vec.h" diff --git a/paddle/fluid/operators/gru_unit_op.h b/paddle/fluid/operators/gru_unit_op.h index 2d9faed648a..f18d09d33e9 100644 --- a/paddle/fluid/operators/gru_unit_op.h +++ b/paddle/fluid/operators/gru_unit_op.h @@ -92,12 +92,12 @@ class GRUUnitKernel : public framework::OpKernel { gate_data, frame_size * 3); // calculate activited gate - Eigen::array extents({{batch_size, frame_size}}); - Eigen::array u_offsets({{0, 0}}); + Eigen::array extents = {batch_size, frame_size}; + Eigen::array u_offsets = {0, 0}; ActCompute(context.Attr("gate_activation"), place, g.slice(u_offsets, extents), g.slice(u_offsets, extents)); auto u = g.slice(u_offsets, extents); // update gate - Eigen::array r_offsets({{0, frame_size}}); + Eigen::array r_offsets = {0, frame_size}; ActCompute(context.Attr("gate_activation"), place, g.slice(r_offsets, extents), g.slice(r_offsets, extents)); auto r = g.slice(r_offsets, extents); // reset gate @@ -107,7 +107,7 @@ class GRUUnitKernel : public framework::OpKernel { weight_data + frame_size * frame_size * 2, frame_size, 1, gate_data + frame_size * 2, frame_size * 3); - Eigen::array c_offsets({{0, frame_size * 2}}); + Eigen::array c_offsets = {0, frame_size * 2}; ActCompute(context.Attr("activation"), place, g.slice(c_offsets, extents), g.slice(c_offsets, extents)); auto c = g.slice(c_offsets, extents); // output candidate @@ -171,12 +171,12 @@ class GRUUnitGradKernel : public framework::OpKernel { int batch_size = input->dims()[0]; int frame_size = hidden_prev->dims()[1]; - Eigen::array extents({{batch_size, frame_size}}); - Eigen::array u_offsets({{0, 0}}); + Eigen::array extents = {batch_size, frame_size}; + Eigen::array u_offsets = {0, 0}; auto u = g.slice(u_offsets, extents); // update gate - Eigen::array r_offsets({{0, frame_size}}); + Eigen::array r_offsets = {0, frame_size}; auto r = g.slice(r_offsets, extents); // reset gate - Eigen::array c_offsets({{0, frame_size * 2}}); + Eigen::array c_offsets = {0, frame_size * 2}; auto c = g.slice(c_offsets, extents); // output candidate // backward for unactivated update gate diff --git a/paddle/fluid/operators/label_smooth_op.h b/paddle/fluid/operators/label_smooth_op.h index f56fd95e965..f3da17de011 100644 --- a/paddle/fluid/operators/label_smooth_op.h +++ b/paddle/fluid/operators/label_smooth_op.h @@ -38,7 +38,8 @@ class LabelSmoothKernel : public framework::OpKernel { auto dist = framework::EigenVector::Flatten(*dist_t); out.device(dev) = static_cast(1 - epsilon) * in + - epsilon * dist.broadcast(Eigen::DSizes(in_t->numel())); + static_cast(epsilon) * + dist.broadcast(Eigen::DSizes(in_t->numel())); } else { out.device(dev) = static_cast(1 - epsilon) * in + static_cast(epsilon / label_dim); -- GitLab