From 75d0c7901530248afbcad7fc79c12241c1e4f00d Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 16 Oct 2017 15:01:39 -0700 Subject: [PATCH] Change Name convention of operator attributes (#4807) * Change dataType to data_type Follow PEP8 * Change name_convention to fit PEP8 --- paddle/framework/backward.cc | 2 +- paddle/operators/cross_entropy_op.cc | 22 ++++++++-------- paddle/operators/cross_entropy_op.cu | 4 +-- paddle/operators/cross_entropy_op.h | 4 +-- paddle/operators/fill_constant_op.cc | 4 +-- paddle/operators/name_convention.md | 2 +- paddle/operators/pool_op.cc | 26 +++++++++---------- paddle/operators/pool_op.h | 8 +++--- paddle/operators/pool_with_index_op.cc | 18 ++++++------- paddle/operators/pool_with_index_op.h | 4 +-- .../softmax_with_cross_entropy_op.cc | 14 +++++----- .../softmax_with_cross_entropy_op.cu | 4 +-- .../operators/softmax_with_cross_entropy_op.h | 4 +-- .../framework/tests/test_cross_entropy_op.py | 4 +-- .../v2/framework/tests/test_pool2d_op.py | 4 +-- .../v2/framework/tests/test_pool3d_op.py | 4 +-- .../v2/framework/tests/test_pool_max_op.py | 2 +- .../test_softmax_with_cross_entropy_op.py | 2 +- 18 files changed, 66 insertions(+), 66 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index e3d7dacd7f0..c78e0560717 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -433,7 +433,7 @@ ParamGradInfoMap AppendBackward( new OpDescBind("fill_constant", {}, {{"Out", {fill_one_op_out}}}, {{"shape", std::vector{1}}, {"value", static_cast(1.0)}, - {"dataType", framework::DataType::FP32}})); + {"data_type", framework::DataType::FP32}})); all_ops.push_back(std::move(fill_one_op)); size_t forward_op_num = all_ops.size(); size_t forward_block_num = program_desc.Size(); diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index 708e80e96a0..923ae5be6c6 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -34,13 +34,13 @@ class CrossEntropyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0], "The 1st dimension of Input(X) and Input(Label) should " "be equal."); - if (ctx->Attrs().Get("softLabel")) { + if (ctx->Attrs().Get("soft_label")) { PADDLE_ENFORCE_EQ(x_dims[1], label_dims[1], - "If Attr(softLabel) == true, the 2nd dimension of " + "If Attr(soft_label) == true, the 2nd dimension of " "Input(X) and Input(Label) should be equal."); } else { PADDLE_ENFORCE_EQ(label_dims[1], 1, - "If Attr(softLabel) == false, the 2nd dimension of " + "If Attr(soft_label) == false, the 2nd dimension of " "Input(Label) should be 1."); } @@ -82,13 +82,13 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { "be equal."); PADDLE_ENFORCE_EQ(dy_dims[1], 1, "The 2nd dimension of Input(Y@Grad) should be 1."); - if (ctx->Attrs().Get("softLabel")) { + if (ctx->Attrs().Get("soft_label")) { PADDLE_ENFORCE_EQ(x_dims[1], label_dims[1], - "When Attr(softLabel) == true, the 2nd dimension of " + "When Attr(soft_label) == true, the 2nd dimension of " "Input(X) and Input(Label) should be equal."); } else { PADDLE_ENFORCE_EQ(label_dims[1], 1, - "When Attr(softLabel) == false, the 2nd dimension of " + "When Attr(soft_label) == false, the 2nd dimension of " "Input(Label) should be 1."); } ctx->SetOutputDim(framework::GradVarName("X"), x_dims); @@ -115,15 +115,15 @@ class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { "Label", "(Tensor, default Tensor), the ground truth which is " "a 2-D tensor. " - "When softLabel is set to false, `Label` is a Tensor with shape " + "When soft_label is set to false, `Label` is a Tensor with shape " "[N x 1]. " - "When softLabel is set to true, `Label` is a Tensor " + "When soft_label is set to true, `Label` is a Tensor " "with shape [N x K]."); AddOutput("Y", "(Tensor, default Tensor), a 2-D tensor " "with shape [N x 1]. The cross entropy loss."); AddAttr( - "softLabel", + "soft_label", "(bool, default false), a flag to indicate whether to interpretate " "the given labels as soft labels.") .SetDefault(false); @@ -133,12 +133,12 @@ CrossEntropy Operator. It supports both standard cross-entropy and soft-label cross-entropy loss computation. 1) One-hot cross-entropy: - softLabel = false, Label[i, 0] indicates the class index for sample i: + soft_label = false, Label[i, 0] indicates the class index for sample i: Y[i] = -log(X[i, Label[i]]) 2) Soft-label cross-entropy: - softLabel = true, Label[i, j] indicates the soft label of class j + soft_label = true, Label[i, j] indicates the soft label of class j for sample i: Y[i] = \sum_j{-Label[i, j] * log(X[i, j])} diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/operators/cross_entropy_op.cu index 07b0388b607..c492dddb09a 100644 --- a/paddle/operators/cross_entropy_op.cu +++ b/paddle/operators/cross_entropy_op.cu @@ -56,7 +56,7 @@ class CrossEntropyOpCUDAKernel : public framework::OpKernel { y->mutable_data(ctx.GetPlace()); math::CrossEntropyFunctor()( - ctx.device_context(), y, x, label, ctx.Attr("softLabel")); + ctx.device_context(), y, x, label, ctx.Attr("soft_label")); } }; @@ -83,7 +83,7 @@ class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel { int block = 512; int grid = (batch_size * class_num + block - 1) / block; - if (ctx.Attr("softLabel")) { + if (ctx.Attr("soft_label")) { auto* label_data = label->data(); SoftCrossEntropyGradientKernel<<< grid, block, 0, reinterpret_cast( diff --git a/paddle/operators/cross_entropy_op.h b/paddle/operators/cross_entropy_op.h index 19c276d23fc..42f282103b5 100644 --- a/paddle/operators/cross_entropy_op.h +++ b/paddle/operators/cross_entropy_op.h @@ -38,7 +38,7 @@ class CrossEntropyOpKernel : public framework::OpKernel { y->mutable_data(ctx.GetPlace()); math::CrossEntropyFunctor()( - ctx.device_context(), y, x, labels, ctx.Attr("softLabel")); + ctx.device_context(), y, x, labels, ctx.Attr("soft_label")); } }; @@ -55,7 +55,7 @@ class CrossEntropyGradientOpKernel : public framework::OpKernel { T* dx_data = dx->mutable_data(ctx.GetPlace()); int class_num = x->dims()[1]; - if (ctx.Attr("softLabel")) { + if (ctx.Attr("soft_label")) { auto x_mat = EigenMatrix::From(*x); auto dy_mat = EigenMatrix::From(*dy); auto lbl_mat = EigenMatrix::From(*label); diff --git a/paddle/operators/fill_constant_op.cc b/paddle/operators/fill_constant_op.cc index 65d03d5fa44..a56832e2024 100644 --- a/paddle/operators/fill_constant_op.cc +++ b/paddle/operators/fill_constant_op.cc @@ -35,7 +35,7 @@ class FillConstantOp : public framework::OperatorWithKernel { framework::DataType IndicateDataType( const framework::ExecutionContext &ctx) const override { - return static_cast(ctx.Attr("dataType")); + return static_cast(ctx.Attr("data_type")); } }; @@ -44,7 +44,7 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { FillConstantOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { - AddAttr("dataType", + AddAttr("data_type", "(int, default 5 (FP32)) " "Output data type") .SetDefault(framework::DataType::FP32); diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 379385dc5d9..5a216907950 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -11,7 +11,7 @@ When defining an operator in Paddle, a corresponding [OpProtoMaker](https://gith - If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule intends making operators which have few inputs/outputs unified. - Attribute. - - Attribute name follows the **camelCase**. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words. + - Attribute name follows the **snake_case**. e.g. `x`, `y`, `axis`, `rowwise_matrix`. Also, attribute name prefers to meaningful English words. - Comments. - Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier`. diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index c6d9aae1332..a326839c0f9 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -29,7 +29,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { auto in_x_dims = ctx->GetInputDim("X"); - std::string pooling_type = ctx->Attrs().Get("poolingType"); + std::string pooling_type = ctx->Attrs().Get("pooling_type"); std::vector ksize = ctx->Attrs().Get>("ksize"); std::vector strides = ctx->Attrs().Get>("strides"); std::vector paddings = ctx->Attrs().Get>("paddings"); @@ -37,7 +37,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5, "Pooling intput should be 4-D or 5-D tensor."); - if (ctx->Attrs().Get("globalPooling")) { + if (ctx->Attrs().Get("global_pooling")) { ksize.resize(static_cast(in_x_dims.size()) - 2); for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = static_cast(in_x_dims[i + 2]); @@ -80,23 +80,23 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto, "the number of channels, H and W is the height and " "width of feature."); - AddAttr("poolingType", - "PoolingType of pooling operator." + AddAttr("pooling_type", + "Pooling_type of pooling operator." "Str constant equal to 'max' or 'avg'.") .InEnum({"max", "avg"}); AddAttr>( "ksize", "The pooling window size(height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be " + "If global_pooling = true, ksize is ignored and need not be " "specified."); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( - "globalPooling", - "Whether to use the globalPooling." + "global_pooling", + "Whether to use the global_pooling." "Bool constant equal to false or true." "Default false." - "If globalPooling = true, ksize is ignored and need not be specified.") + "If global_pooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>("strides", "The strides(height, width) of pooling window." @@ -146,7 +146,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, "the number of channels, D, H and W is the depth, height and " "width of feature."); - AddAttr("poolingType", + AddAttr("pooling_type", "PoolingType of pooling operator." "Str constant equal to 'max' or 'avg'.") .InEnum({"max", "avg"}); @@ -154,15 +154,15 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, AddAttr>( "ksize", "The pooling window size(depth, height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be " + "If global_pooling = true, ksize is ignored and need not be " "specified."); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( - "globalPooling", - "Whether to use the globalPooling." + "global_pooling", + "Whether to use the global_pooling." "Bool constant equal to false or true." "Default false." - "If globalPooling = true, ksize is ignored and need not be specified.") + "If global_pooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>("strides", "Strides(depth, height, width) of pooling operator." diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index e5016d573dd..f6169e05b3c 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -59,11 +59,11 @@ class PoolKernel : public framework::OpKernel { const Tensor* in_x = context.Input("X"); Tensor* out = context.Output("Out"); - std::string pooling_type = context.Attr("poolingType"); + std::string pooling_type = context.Attr("pooling_type"); std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (context.Attr("globalPooling")) { + if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { ksize[i] = static_cast(in_x->dims()[i + 2]); } @@ -119,12 +119,12 @@ class PoolGradKernel : public framework::OpKernel { context.Input(framework::GradVarName("Out")); Tensor* in_x_grad = context.Output(framework::GradVarName("X")); - std::string pooling_type = context.Attr("poolingType"); + std::string pooling_type = context.Attr("pooling_type"); std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (context.Attr("globalPooling")) { + if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = static_cast(in_x->dims()[i + 2]); } diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc index 005ee886934..8eee20c2a9b 100644 --- a/paddle/operators/pool_with_index_op.cc +++ b/paddle/operators/pool_with_index_op.cc @@ -45,7 +45,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5, "Pooling intput should be 4-D or 5-D tensor."); - if (ctx->Attrs().Get("globalPooling")) { + if (ctx->Attrs().Get("global_pooling")) { ksize.resize(static_cast(in_x_dims.size()) - 2); for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = static_cast(in_x_dims[i + 2]); @@ -108,15 +108,15 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr>( "ksize", "The pooling window size(height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be " + "If global_pooling = true, ksize is ignored and need not be " "specified."); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( - "globalPooling", - "Whether to use the globalPooling." + "global_pooling", + "Whether to use the global_pooling." "Bool constant equal to false or true." "Default false." - "If globalPooling = true, ksize is ignored and need not be specified.") + "If global_pooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>("strides", "The strides(height, width) of pooling window." @@ -179,15 +179,15 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr>( "ksize", "The pooling window size(depth, height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be " + "If global_pooling = true, ksize is ignored and need not be " "specified."); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( - "globalPooling", - "Whether to use the globalPooling." + "global_pooling", + "Whether to use the global_pooling." "Bool constant equal to false or true." "Default false." - "If globalPooling = true, ksize is ignored and need not be specified.") + "If global_pooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>( "strides", diff --git a/paddle/operators/pool_with_index_op.h b/paddle/operators/pool_with_index_op.h index 01b961ca829..455c453efcd 100644 --- a/paddle/operators/pool_with_index_op.h +++ b/paddle/operators/pool_with_index_op.h @@ -35,7 +35,7 @@ class MaxPoolWithIndexKernel : public framework::OpKernel { std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (context.Attr("globalPooling")) { + if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { ksize[i] = static_cast(in_x->dims()[i + 2]); } @@ -70,7 +70,7 @@ class MaxPoolWithIndexGradKernel : public framework::OpKernel { std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (context.Attr("globalPooling")) { + if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { ksize[i] = static_cast(in_x_grad->dims()[i + 2]); } diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index 5431a1657c3..9121609f10e 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -46,7 +46,7 @@ class SoftmaxWithCrossEntropyOpMaker "(Tensor, default: Tensor), A 2-D tensor. The cross " "entropy loss with shape [N x 1]."); AddAttr( - "softLabel", + "soft_label", "(bool, default: false), A flag to indicate whether to interpretate " "the given labels as soft labels.") .SetDefault(false); @@ -100,13 +100,13 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(labels_dims.size(), 2UL, "The labels should be a 2-D tensor."); - if (ctx->Attrs().Get("softLabel")) { + if (ctx->Attrs().Get("soft_label")) { PADDLE_ENFORCE_EQ(logits_dims[1], labels_dims[1], - "If Attr(softLabel) == true, the 2nd dimension of " + "If Attr(soft_label) == true, the 2nd dimension of " "Input(X) and Input(Label) should be equal."); } else { PADDLE_ENFORCE_EQ(labels_dims[1], 1UL, - "If Attr(softLabel) == false, the 2nd dimension of " + "If Attr(soft_label) == false, the 2nd dimension of " "Input(Label) should be 1."); } @@ -142,13 +142,13 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(labels_dims.size(), 2UL, "The labels should be a 2-D tensor."); - if (ctx->Attrs().Get("softLabel")) { + if (ctx->Attrs().Get("soft_label")) { PADDLE_ENFORCE_EQ(softmax_dims[1], labels_dims[1], - "When Attr(softLabel) == true, the 2nd dimension of " + "When Attr(soft_label) == true, the 2nd dimension of " "Input(X) and Input(Label) should be equal."); } else { PADDLE_ENFORCE_EQ(labels_dims[1], 1UL, - "When Attr(softLabel) == false, the 2nd dimension of " + "When Attr(soft_label) == false, the 2nd dimension of " "Input(Label) should be 1."); } diff --git a/paddle/operators/softmax_with_cross_entropy_op.cu b/paddle/operators/softmax_with_cross_entropy_op.cu index 2bc53ecf871..d03a1a76585 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/operators/softmax_with_cross_entropy_op.cu @@ -70,7 +70,7 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { logits, softmax); math::CrossEntropyFunctor()( context.device_context(), loss, softmax, labels, - context.Attr("softLabel")); + context.Attr("soft_label")); } }; @@ -93,7 +93,7 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { int block = 512; int grid = (batch_size * class_num + block - 1) / block; - if (context.Attr("softLabel")) { + if (context.Attr("soft_label")) { const T* label_data = labels->data(); SoftCrossEntropyGradientKernel<<< grid, block, 0, reinterpret_cast( diff --git a/paddle/operators/softmax_with_cross_entropy_op.h b/paddle/operators/softmax_with_cross_entropy_op.h index cffd422f182..66d7bc1569e 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.h +++ b/paddle/operators/softmax_with_cross_entropy_op.h @@ -44,7 +44,7 @@ class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { logits, softmax); math::CrossEntropyFunctor()( context.device_context(), loss, softmax, labels, - context.Attr("softLabel")); + context.Attr("soft_label")); } }; @@ -60,7 +60,7 @@ class SoftmaxWithCrossEntropyGradKernel : public framework::OpKernel { logit_grad->ShareDataWith(*context.Input("Softmax")); const int class_num = logit_grad->dims()[1]; - if (context.Attr("softLabel")) { + if (context.Attr("soft_label")) { auto out_grad_mat = EigenMatrix::From(*out_grad); auto logit_grad_mat = EigenMatrix::From(*logit_grad); auto lbl_mat = EigenMatrix::From(*labels); diff --git a/python/paddle/v2/framework/tests/test_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_cross_entropy_op.py index 4ea14da7fd3..919b6c3f674 100644 --- a/python/paddle/v2/framework/tests/test_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_cross_entropy_op.py @@ -49,7 +49,7 @@ class TestCrossEntropyOp2(OpTest): self.inputs = {"X": X, "Label": label} self.outputs = {"Y": cross_entropy} - self.attrs = {"softLabel": True} + self.attrs = {"soft_label": True} def test_check_output(self): self.check_output() @@ -82,7 +82,7 @@ class TestCrossEntropyOp3(OpTest): self.inputs = {"X": X, "Label": label.astype(np.float32)} self.outputs = {"Y": cross_entropy} - self.attrs = {"softLabel": True} + self.attrs = {"soft_label": True} def test_check_output(self): self.check_output() diff --git a/python/paddle/v2/framework/tests/test_pool2d_op.py b/python/paddle/v2/framework/tests/test_pool2d_op.py index 2941fda81b2..3fcd8941d4f 100644 --- a/python/paddle/v2/framework/tests/test_pool2d_op.py +++ b/python/paddle/v2/framework/tests/test_pool2d_op.py @@ -56,8 +56,8 @@ class TestPool2d_Op(OpTest): 'strides': self.strides, 'paddings': self.paddings, 'ksize': self.ksize, - 'poolingType': self.pool_type, - 'globalPooling': self.global_pool, + 'pooling_type': self.pool_type, + 'global_pooling': self.global_pool, } self.outputs = {'Out': output} diff --git a/python/paddle/v2/framework/tests/test_pool3d_op.py b/python/paddle/v2/framework/tests/test_pool3d_op.py index 8792b492e3d..f4e938041fa 100644 --- a/python/paddle/v2/framework/tests/test_pool3d_op.py +++ b/python/paddle/v2/framework/tests/test_pool3d_op.py @@ -64,8 +64,8 @@ class TestPool3d_Op(OpTest): 'strides': self.strides, 'paddings': self.paddings, 'ksize': self.ksize, - 'poolingType': self.pool_type, - 'globalPooling': self.global_pool, + 'pooling_type': self.pool_type, + 'global_pooling': self.global_pool, } self.outputs = {'Out': output} diff --git a/python/paddle/v2/framework/tests/test_pool_max_op.py b/python/paddle/v2/framework/tests/test_pool_max_op.py index f0f8aa6089c..b78f9bba05c 100644 --- a/python/paddle/v2/framework/tests/test_pool_max_op.py +++ b/python/paddle/v2/framework/tests/test_pool_max_op.py @@ -86,7 +86,7 @@ class TestMaxPoolWithIndex_Op(OpTest): 'strides': self.strides, 'paddings': self.paddings, 'ksize': self.ksize, - 'globalPooling': self.global_pool, + 'global_pooling': self.global_pool, } self.inputs = {'X': input} diff --git a/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py index 377d07fb592..05ba954c0b8 100644 --- a/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py @@ -57,7 +57,7 @@ class TestSoftmaxWithCrossEntropyOp2(OpTest): self.inputs = {"Logits": logits, "Label": labels} self.outputs = {"Softmax": softmax, "Loss": cross_entropy} - self.attrs = {"softLabel": True} + self.attrs = {"soft_label": True} def test_check_output(self): self.check_output() -- GitLab