diff --git a/paddle/operators/math/context_project.h b/paddle/operators/math/context_project.h index e37f3a5bf2bd59e46f66aa3a8284e05d79dbc790..b7466d206e700ff88a974ef1f8eea9c160d31497 100644 --- a/paddle/operators/math/context_project.h +++ b/paddle/operators/math/context_project.h @@ -34,18 +34,15 @@ using EigenMatrix = framework::EigenMatrix; * \param in Input data. * \param Shape The shape of Input data, - * [minibatch, number_of_input_features]. - * \param type A float LoDTensor. + * [minibatch, input_hidden_size]. * * \param padding_data Padding data. * \param Shape The shape of Padding data, - * [up_pad + down_pad, number_of_input_features]. - * \param type A float Tensor. + * [up_pad + down_pad, input_hidden_size]. * * \param col Col data. * \param Shape The shape of Col data, - * [minibatch, context_length * number_of_input_features]. - * \param type A float Tensor. + * [minibatch, context_length * input_hidden_size]. * * For a mini-batch of 2 variable lengths sentences, containing 3, and 1 * time-steps: diff --git a/paddle/operators/sequence_conv_op.cc b/paddle/operators/sequence_conv_op.cc index 139000c561870c3bc49e01cdcb6cf4b787e64577..a73ceb41574fa0e8538a42238c454efe962131e0 100644 --- a/paddle/operators/sequence_conv_op.cc +++ b/paddle/operators/sequence_conv_op.cc @@ -30,9 +30,9 @@ class SequenceConvOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of SequenceConvOp should not be null."); - int context_length = ctx->Attrs().Get("context_length"); - bool padding_trainable = ctx->Attrs().Get("padding_trainable"); - int context_start = ctx->Attrs().Get("context_start"); + int context_length = ctx->Attrs().Get("contextLength"); + bool padding_trainable = ctx->Attrs().Get("paddingTrainable"); + int context_start = ctx->Attrs().Get("contextStart"); auto in_dims = ctx->GetInputDim("X"); auto filter_dims = ctx->GetInputDim("Filter"); @@ -54,7 +54,7 @@ class SequenceConvOp : public framework::OperatorWithKernel { if (context_start == 0 && context_length == 1) { PADDLE_THROW( - "If context_start is 0 and context_length is 1, padding_trainable " + "If context_start is 0 and context_length is 1, paddingTrainable " "should be false."); } PADDLE_ENFORCE(padding_dim.size() == 2, @@ -81,7 +81,7 @@ class SequenceConvGradOp : public framework::OperatorWithKernel { "Gradient of output(Out) should not be null."); PADDLE_ENFORCE(ctx->HasInput("X"), "The input(X) should not be null."); - if (ctx->Attrs().Get("padding_trainable") && + if (ctx->Attrs().Get("paddingTrainable") && ctx->HasOutput(framework::GradVarName("PaddingData"))) { ctx->SetOutputDim(framework::GradVarName("PaddingData"), ctx->GetInputDim("PaddingData")); @@ -128,25 +128,25 @@ class SequenceConvOpMaker : public framework::OpProtoAndCheckerMaker { "this LoDTensor is a matrix with shape (T, D), where, T is the " "total time steps in this mini-batch, D is the output feature size."); - AddAttr("padding_trainable", + AddAttr("paddingTrainable", "(bool, default false) the padding data of SequenceConvOp " "is trainable or not.") .SetDefault(false); - AddAttr("context_length", - "(int, default 3) the context_length of SequenceConvOp is the " + AddAttr("contextLength", + "(int, default 3) the contextLength of SequenceConvOp is the " "height of the convolution kernel.") .SetDefault(3) .GreaterThan(0); - AddAttr("context_start", - "(int, default 0) the context_start of SequenceConvOp " + AddAttr("contextStart", + "(int, default 0) the contextStart of SequenceConvOp " "represents the beginning of the convolution of the number of " "rows of sequence, which can be negative.") .SetDefault(0); - AddAttr("context_stride", - "(int, default 1) the context_stride of SequenceConvOp " + AddAttr("contextStride", + "(int, default 1) the contextStride of SequenceConvOp " "represents the step length of convolution. " "Currently, SequenceConvOp only supports" - "context_stride=1.") + "contextStride=1.") .SetDefault(1) .GreaterThan(0); diff --git a/paddle/operators/sequence_conv_op.h b/paddle/operators/sequence_conv_op.h index cd8a8d4cea39161029602530cc75532b5f977d01..c502601b3800e3ed45680cd51b2c9af8e0761a16 100644 --- a/paddle/operators/sequence_conv_op.h +++ b/paddle/operators/sequence_conv_op.h @@ -35,10 +35,10 @@ class SequenceConvKernel : public framework::OpKernel { out->mutable_data(context.GetPlace()); context.ShareLoD("X", "Out"); - int context_start = context.Attr("context_start"); - int context_length = context.Attr("context_length"); - int context_stride = context.Attr("context_stride"); - bool padding_trainable = context.Attr("padding_trainable"); + int context_start = context.Attr("contextStart"); + int context_length = context.Attr("contextLength"); + int context_stride = context.Attr("contextStride"); + bool padding_trainable = context.Attr("paddingTrainable"); // InferShape by in_lod PADDLE_ENFORCE_EQ(in->lod().size(), 1UL, @@ -89,10 +89,10 @@ class SequenceConvGradKernel : public framework::OpKernel { auto* in = context.Input("X"); auto* filter = context.Input("Filter"); - int context_start = context.Attr("context_start"); - int context_length = context.Attr("context_length"); - int context_stride = context.Attr("context_stride"); - bool padding_trainable = context.Attr("padding_trainable"); + int context_start = context.Attr("contextStart"); + int context_length = context.Attr("contextLength"); + int context_stride = context.Attr("contextStride"); + bool padding_trainable = context.Attr("paddingTrainable"); PADDLE_ENFORCE_EQ(in->lod().size(), 1UL, "Only support one level sequence now."); diff --git a/python/paddle/v2/framework/tests/test_seq_conv.py b/python/paddle/v2/framework/tests/test_seq_conv.py index f0337c20a9e87fab971f9d9e2a113346feb20957..14edc5f953022ca05f5620c28bd7276d961dd4d0 100644 --- a/python/paddle/v2/framework/tests/test_seq_conv.py +++ b/python/paddle/v2/framework/tests/test_seq_conv.py @@ -45,10 +45,10 @@ class TestSeqProject(OpTest): self.inputs_val_no_f = ['PaddingData', 'X'] self.attrs = { - 'context_start': self.context_start, - 'context_length': self.context_length, - 'padding_trainable': self.padding_trainable, - 'context_stride': self.context_stride + 'contextStart': self.context_start, + 'contextLength': self.context_length, + 'paddingTrainable': self.padding_trainable, + 'contextStride': self.context_stride } out = np.zeros( (self.input_size[0], self.output_represention)).astype('float32')