diff --git a/paddle/fluid/operators/math/context_project.h b/paddle/fluid/operators/math/context_project.h index bc0df3f3551c7a100d5d285cab585bb81c07fc5e..d6a4793a8ac3e10fb334d47a51b17c985360d2d8 100644 --- a/paddle/fluid/operators/math/context_project.h +++ b/paddle/fluid/operators/math/context_project.h @@ -87,7 +87,7 @@ template class ContextProjectFunctor { public: void operator()(const DeviceContext& context, const LoDTensor& in, - const Tensor& padding_data, bool padding_trainable, + const Tensor* padding_data, bool padding_trainable, const int context_start, const int context_length, const int context_stride, const int up_pad, const int down_pad, Tensor* col) { @@ -132,6 +132,7 @@ class ContextProjectFunctor { } } if (padding_trainable) { + PADDLE_ENFORCE_NOT_NULL(padding_data); for (int i = 0; i < static_cast(lod_level_0.size()) - 1; ++i) { Tensor out_t = col->Slice(static_cast(lod_level_0[i]), static_cast(lod_level_0[i + 1])); @@ -150,7 +151,7 @@ class ContextProjectFunctor { k + context_length < up_pad ? context_length : up_pad - k; Tensor out_t_sub = out_t.Slice(k * context_length, k * context_length + padding_size); - Tensor w_sub = padding_data.Slice(k, k + padding_size); + Tensor w_sub = padding_data->Slice(k, k + padding_size); framework::TensorCopy(w_sub, context.GetPlace(), context, &out_t_sub); } @@ -180,7 +181,7 @@ class ContextProjectFunctor { Tensor out_t_sub = out_t.Slice( (down_pad_begin_row + t) * context_length - padding_size, (down_pad_begin_row + t) * context_length); - Tensor w_sub = padding_data.Slice( + Tensor w_sub = padding_data->Slice( up_pad + padding_idx, up_pad + padding_idx + padding_size); framework::TensorCopy(w_sub, context.GetPlace(), context, &out_t_sub); diff --git a/paddle/fluid/operators/sequence_ops/sequence_conv_op.h b/paddle/fluid/operators/sequence_ops/sequence_conv_op.h index ee70281d51673b94a1451f636e607fad3404863b..3a2c9e3f734c8c302e3e4b1c6718b3a236fe897a 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_conv_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_conv_op.h @@ -49,7 +49,7 @@ class SequenceConvKernel : public framework::OpKernel { int up_pad = std::max(0, -context_start); int down_pad = std::max(0, context_start + context_length - 1); - int sequence_width = static_cast(in->dims()[1]); + auto sequence_width = static_cast(in->dims()[1]); framework::DDim col_shape = {in->dims()[0], context_length * sequence_width}; @@ -62,7 +62,7 @@ class SequenceConvKernel : public framework::OpKernel { set_zero(dev_ctx, &col, static_cast(0)); math::ContextProjectFunctor seq_project_functor; - seq_project_functor(dev_ctx, *in, *padding_data, padding_trainable, + seq_project_functor(dev_ctx, *in, padding_data, padding_trainable, context_start, context_length, context_stride, up_pad, down_pad, &col); @@ -93,7 +93,7 @@ class SequenceConvGradKernel : public framework::OpKernel { int up_pad = std::max(0, -context_start); int down_pad = std::max(0, context_start + context_length - 1); - int sequence_width = static_cast(in->dims()[1]); + auto sequence_width = static_cast(in->dims()[1]); math::SetConstant set_zero; auto& dev_ctx = context.template device_context(); @@ -144,7 +144,7 @@ class SequenceConvGradKernel : public framework::OpKernel { padding_data = context.Input("PaddingData"); } - seq_project_functor(dev_ctx, *in, *padding_data, padding_trainable, + seq_project_functor(dev_ctx, *in, padding_data, padding_trainable, context_start, context_length, context_stride, up_pad, down_pad, &col);