From c474e7ddf53100fcfaa9830017aac4f70c1857fd Mon Sep 17 00:00:00 2001 From: Kevin Date: Wed, 17 Apr 2019 14:17:28 +0800 Subject: [PATCH] fix overflow by int32 mul test=develop (#16794) * fix overflow by int32 mul test=develop * fix reference nullptr * fix codestyle test=develop * modify to point in ContextProjectFunctor test=develop * modify to point in ContextProjectFunctor test=develop * modify . to -> test=develop --- paddle/fluid/operators/math/context_project.h | 7 ++++--- paddle/fluid/operators/sequence_ops/sequence_conv_op.h | 8 ++++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/operators/math/context_project.h b/paddle/fluid/operators/math/context_project.h index bc0df3f35..d6a4793a8 100644 --- a/paddle/fluid/operators/math/context_project.h +++ b/paddle/fluid/operators/math/context_project.h @@ -87,7 +87,7 @@ template class ContextProjectFunctor { public: void operator()(const DeviceContext& context, const LoDTensor& in, - const Tensor& padding_data, bool padding_trainable, + const Tensor* padding_data, bool padding_trainable, const int context_start, const int context_length, const int context_stride, const int up_pad, const int down_pad, Tensor* col) { @@ -132,6 +132,7 @@ class ContextProjectFunctor { } } if (padding_trainable) { + PADDLE_ENFORCE_NOT_NULL(padding_data); for (int i = 0; i < static_cast(lod_level_0.size()) - 1; ++i) { Tensor out_t = col->Slice(static_cast(lod_level_0[i]), static_cast(lod_level_0[i + 1])); @@ -150,7 +151,7 @@ class ContextProjectFunctor { k + context_length < up_pad ? context_length : up_pad - k; Tensor out_t_sub = out_t.Slice(k * context_length, k * context_length + padding_size); - Tensor w_sub = padding_data.Slice(k, k + padding_size); + Tensor w_sub = padding_data->Slice(k, k + padding_size); framework::TensorCopy(w_sub, context.GetPlace(), context, &out_t_sub); } @@ -180,7 +181,7 @@ class ContextProjectFunctor { Tensor out_t_sub = out_t.Slice( (down_pad_begin_row + t) * context_length - padding_size, (down_pad_begin_row + t) * context_length); - Tensor w_sub = padding_data.Slice( + Tensor w_sub = padding_data->Slice( up_pad + padding_idx, up_pad + padding_idx + padding_size); framework::TensorCopy(w_sub, context.GetPlace(), context, &out_t_sub); diff --git a/paddle/fluid/operators/sequence_ops/sequence_conv_op.h b/paddle/fluid/operators/sequence_ops/sequence_conv_op.h index ee70281d5..3a2c9e3f7 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_conv_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_conv_op.h @@ -49,7 +49,7 @@ class SequenceConvKernel : public framework::OpKernel { int up_pad = std::max(0, -context_start); int down_pad = std::max(0, context_start + context_length - 1); - int sequence_width = static_cast(in->dims()[1]); + auto sequence_width = static_cast(in->dims()[1]); framework::DDim col_shape = {in->dims()[0], context_length * sequence_width}; @@ -62,7 +62,7 @@ class SequenceConvKernel : public framework::OpKernel { set_zero(dev_ctx, &col, static_cast(0)); math::ContextProjectFunctor seq_project_functor; - seq_project_functor(dev_ctx, *in, *padding_data, padding_trainable, + seq_project_functor(dev_ctx, *in, padding_data, padding_trainable, context_start, context_length, context_stride, up_pad, down_pad, &col); @@ -93,7 +93,7 @@ class SequenceConvGradKernel : public framework::OpKernel { int up_pad = std::max(0, -context_start); int down_pad = std::max(0, context_start + context_length - 1); - int sequence_width = static_cast(in->dims()[1]); + auto sequence_width = static_cast(in->dims()[1]); math::SetConstant set_zero; auto& dev_ctx = context.template device_context(); @@ -144,7 +144,7 @@ class SequenceConvGradKernel : public framework::OpKernel { padding_data = context.Input("PaddingData"); } - seq_project_functor(dev_ctx, *in, *padding_data, padding_trainable, + seq_project_functor(dev_ctx, *in, padding_data, padding_trainable, context_start, context_length, context_stride, up_pad, down_pad, &col); -- GitLab