diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index e3d7dacd7f0ad61a606e8b3e8f6a84b98deac729..c78e05607179387e1c015f6fd24669c538587759 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -433,7 +433,7 @@ ParamGradInfoMap AppendBackward( new OpDescBind("fill_constant", {}, {{"Out", {fill_one_op_out}}}, {{"shape", std::vector{1}}, {"value", static_cast(1.0)}, - {"dataType", framework::DataType::FP32}})); + {"data_type", framework::DataType::FP32}})); all_ops.push_back(std::move(fill_one_op)); size_t forward_op_num = all_ops.size(); size_t forward_block_num = program_desc.Size(); diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index 708e80e96a0f007be1594d8b4781d1296d6b398d..923ae5be6c608bd3bccf278f310ec8e219fc0724 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -34,13 +34,13 @@ class CrossEntropyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0], "The 1st dimension of Input(X) and Input(Label) should " "be equal."); - if (ctx->Attrs().Get("softLabel")) { + if (ctx->Attrs().Get("soft_label")) { PADDLE_ENFORCE_EQ(x_dims[1], label_dims[1], - "If Attr(softLabel) == true, the 2nd dimension of " + "If Attr(soft_label) == true, the 2nd dimension of " "Input(X) and Input(Label) should be equal."); } else { PADDLE_ENFORCE_EQ(label_dims[1], 1, - "If Attr(softLabel) == false, the 2nd dimension of " + "If Attr(soft_label) == false, the 2nd dimension of " "Input(Label) should be 1."); } @@ -82,13 +82,13 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { "be equal."); PADDLE_ENFORCE_EQ(dy_dims[1], 1, "The 2nd dimension of Input(Y@Grad) should be 1."); - if (ctx->Attrs().Get("softLabel")) { + if (ctx->Attrs().Get("soft_label")) { PADDLE_ENFORCE_EQ(x_dims[1], label_dims[1], - "When Attr(softLabel) == true, the 2nd dimension of " + "When Attr(soft_label) == true, the 2nd dimension of " "Input(X) and Input(Label) should be equal."); } else { PADDLE_ENFORCE_EQ(label_dims[1], 1, - "When Attr(softLabel) == false, the 2nd dimension of " + "When Attr(soft_label) == false, the 2nd dimension of " "Input(Label) should be 1."); } ctx->SetOutputDim(framework::GradVarName("X"), x_dims); @@ -115,15 +115,15 @@ class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { "Label", "(Tensor, default Tensor), the ground truth which is " "a 2-D tensor. " - "When softLabel is set to false, `Label` is a Tensor with shape " + "When soft_label is set to false, `Label` is a Tensor with shape " "[N x 1]. " - "When softLabel is set to true, `Label` is a Tensor " + "When soft_label is set to true, `Label` is a Tensor " "with shape [N x K]."); AddOutput("Y", "(Tensor, default Tensor), a 2-D tensor " "with shape [N x 1]. The cross entropy loss."); AddAttr( - "softLabel", + "soft_label", "(bool, default false), a flag to indicate whether to interpretate " "the given labels as soft labels.") .SetDefault(false); @@ -133,12 +133,12 @@ CrossEntropy Operator. It supports both standard cross-entropy and soft-label cross-entropy loss computation. 1) One-hot cross-entropy: - softLabel = false, Label[i, 0] indicates the class index for sample i: + soft_label = false, Label[i, 0] indicates the class index for sample i: Y[i] = -log(X[i, Label[i]]) 2) Soft-label cross-entropy: - softLabel = true, Label[i, j] indicates the soft label of class j + soft_label = true, Label[i, j] indicates the soft label of class j for sample i: Y[i] = \sum_j{-Label[i, j] * log(X[i, j])} diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/operators/cross_entropy_op.cu index 07b0388b607e862cd7cd64fcb6e3d00ee277b178..c492dddb09a41e3731a211b4fa083e57ad780f42 100644 --- a/paddle/operators/cross_entropy_op.cu +++ b/paddle/operators/cross_entropy_op.cu @@ -56,7 +56,7 @@ class CrossEntropyOpCUDAKernel : public framework::OpKernel { y->mutable_data(ctx.GetPlace()); math::CrossEntropyFunctor()( - ctx.device_context(), y, x, label, ctx.Attr("softLabel")); + ctx.device_context(), y, x, label, ctx.Attr("soft_label")); } }; @@ -83,7 +83,7 @@ class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel { int block = 512; int grid = (batch_size * class_num + block - 1) / block; - if (ctx.Attr("softLabel")) { + if (ctx.Attr("soft_label")) { auto* label_data = label->data(); SoftCrossEntropyGradientKernel<<< grid, block, 0, reinterpret_cast( diff --git a/paddle/operators/cross_entropy_op.h b/paddle/operators/cross_entropy_op.h index 19c276d23fc0f7f89b45c6d52c1c8e40d7f7d51d..42f282103b5609e3c987fc4a83113f86532f74d6 100644 --- a/paddle/operators/cross_entropy_op.h +++ b/paddle/operators/cross_entropy_op.h @@ -38,7 +38,7 @@ class CrossEntropyOpKernel : public framework::OpKernel { y->mutable_data(ctx.GetPlace()); math::CrossEntropyFunctor()( - ctx.device_context(), y, x, labels, ctx.Attr("softLabel")); + ctx.device_context(), y, x, labels, ctx.Attr("soft_label")); } }; @@ -55,7 +55,7 @@ class CrossEntropyGradientOpKernel : public framework::OpKernel { T* dx_data = dx->mutable_data(ctx.GetPlace()); int class_num = x->dims()[1]; - if (ctx.Attr("softLabel")) { + if (ctx.Attr("soft_label")) { auto x_mat = EigenMatrix::From(*x); auto dy_mat = EigenMatrix::From(*dy); auto lbl_mat = EigenMatrix::From(*label); diff --git a/paddle/operators/fill_constant_op.cc b/paddle/operators/fill_constant_op.cc index 65d03d5fa4426ef4229c1155753c67e59ce98857..a56832e202404a479e12bbbbe29fbf611cb478d4 100644 --- a/paddle/operators/fill_constant_op.cc +++ b/paddle/operators/fill_constant_op.cc @@ -35,7 +35,7 @@ class FillConstantOp : public framework::OperatorWithKernel { framework::DataType IndicateDataType( const framework::ExecutionContext &ctx) const override { - return static_cast(ctx.Attr("dataType")); + return static_cast(ctx.Attr("data_type")); } }; @@ -44,7 +44,7 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { FillConstantOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { - AddAttr("dataType", + AddAttr("data_type", "(int, default 5 (FP32)) " "Output data type") .SetDefault(framework::DataType::FP32); diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 379385dc5d914101c7b5c9494f9383b6cf6a9b79..5a216907950100070ba57176c382eb659effb293 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -11,7 +11,7 @@ When defining an operator in Paddle, a corresponding [OpProtoMaker](https://gith - If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule intends making operators which have few inputs/outputs unified. - Attribute. - - Attribute name follows the **camelCase**. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words. + - Attribute name follows the **snake_case**. e.g. `x`, `y`, `axis`, `rowwise_matrix`. Also, attribute name prefers to meaningful English words. - Comments. - Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier`. diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index c6d9aae13322ebcc9ebbe394d9b9831bd67fe632..a326839c0f9ad14b8fd2aac596f21c7dd2539cd7 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -29,7 +29,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { auto in_x_dims = ctx->GetInputDim("X"); - std::string pooling_type = ctx->Attrs().Get("poolingType"); + std::string pooling_type = ctx->Attrs().Get("pooling_type"); std::vector ksize = ctx->Attrs().Get>("ksize"); std::vector strides = ctx->Attrs().Get>("strides"); std::vector paddings = ctx->Attrs().Get>("paddings"); @@ -37,7 +37,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5, "Pooling intput should be 4-D or 5-D tensor."); - if (ctx->Attrs().Get("globalPooling")) { + if (ctx->Attrs().Get("global_pooling")) { ksize.resize(static_cast(in_x_dims.size()) - 2); for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = static_cast(in_x_dims[i + 2]); @@ -80,23 +80,23 @@ Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto, "the number of channels, H and W is the height and " "width of feature."); - AddAttr("poolingType", - "PoolingType of pooling operator." + AddAttr("pooling_type", + "Pooling_type of pooling operator." "Str constant equal to 'max' or 'avg'.") .InEnum({"max", "avg"}); AddAttr>( "ksize", "The pooling window size(height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be " + "If global_pooling = true, ksize is ignored and need not be " "specified."); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( - "globalPooling", - "Whether to use the globalPooling." + "global_pooling", + "Whether to use the global_pooling." "Bool constant equal to false or true." "Default false." - "If globalPooling = true, ksize is ignored and need not be specified.") + "If global_pooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>("strides", "The strides(height, width) of pooling window." @@ -146,7 +146,7 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, "the number of channels, D, H and W is the depth, height and " "width of feature."); - AddAttr("poolingType", + AddAttr("pooling_type", "PoolingType of pooling operator." "Str constant equal to 'max' or 'avg'.") .InEnum({"max", "avg"}); @@ -154,15 +154,15 @@ Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, AddAttr>( "ksize", "The pooling window size(depth, height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be " + "If global_pooling = true, ksize is ignored and need not be " "specified."); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( - "globalPooling", - "Whether to use the globalPooling." + "global_pooling", + "Whether to use the global_pooling." "Bool constant equal to false or true." "Default false." - "If globalPooling = true, ksize is ignored and need not be specified.") + "If global_pooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>("strides", "Strides(depth, height, width) of pooling operator." diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index e5016d573dde0a9c8a90cddf14f68706b69fade5..f6169e05b3cdea621d26873185b2544f7d0fc188 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -59,11 +59,11 @@ class PoolKernel : public framework::OpKernel { const Tensor* in_x = context.Input("X"); Tensor* out = context.Output("Out"); - std::string pooling_type = context.Attr("poolingType"); + std::string pooling_type = context.Attr("pooling_type"); std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (context.Attr("globalPooling")) { + if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { ksize[i] = static_cast(in_x->dims()[i + 2]); } @@ -119,12 +119,12 @@ class PoolGradKernel : public framework::OpKernel { context.Input(framework::GradVarName("Out")); Tensor* in_x_grad = context.Output(framework::GradVarName("X")); - std::string pooling_type = context.Attr("poolingType"); + std::string pooling_type = context.Attr("pooling_type"); std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (context.Attr("globalPooling")) { + if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = static_cast(in_x->dims()[i + 2]); } diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc index 005ee886934b193064cc739638398b3535db9274..8eee20c2a9b6ce734a695ff36774bf51d0ad88be 100644 --- a/paddle/operators/pool_with_index_op.cc +++ b/paddle/operators/pool_with_index_op.cc @@ -45,7 +45,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5, "Pooling intput should be 4-D or 5-D tensor."); - if (ctx->Attrs().Get("globalPooling")) { + if (ctx->Attrs().Get("global_pooling")) { ksize.resize(static_cast(in_x_dims.size()) - 2); for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = static_cast(in_x_dims[i + 2]); @@ -108,15 +108,15 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr>( "ksize", "The pooling window size(height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be " + "If global_pooling = true, ksize is ignored and need not be " "specified."); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( - "globalPooling", - "Whether to use the globalPooling." + "global_pooling", + "Whether to use the global_pooling." "Bool constant equal to false or true." "Default false." - "If globalPooling = true, ksize is ignored and need not be specified.") + "If global_pooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>("strides", "The strides(height, width) of pooling window." @@ -179,15 +179,15 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr>( "ksize", "The pooling window size(depth, height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be " + "If global_pooling = true, ksize is ignored and need not be " "specified."); // TODO(Chengduo): Add checker. (Currently, // TypedAttrChecker don't support vector type.) AddAttr( - "globalPooling", - "Whether to use the globalPooling." + "global_pooling", + "Whether to use the global_pooling." "Bool constant equal to false or true." "Default false." - "If globalPooling = true, ksize is ignored and need not be specified.") + "If global_pooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>( "strides", diff --git a/paddle/operators/pool_with_index_op.h b/paddle/operators/pool_with_index_op.h index 01b961ca8295f723bea7335e43ec5ab100dfc65c..455c453efcd15bf0150bbd3de83d50729f338b4b 100644 --- a/paddle/operators/pool_with_index_op.h +++ b/paddle/operators/pool_with_index_op.h @@ -35,7 +35,7 @@ class MaxPoolWithIndexKernel : public framework::OpKernel { std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (context.Attr("globalPooling")) { + if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { ksize[i] = static_cast(in_x->dims()[i + 2]); } @@ -70,7 +70,7 @@ class MaxPoolWithIndexGradKernel : public framework::OpKernel { std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (context.Attr("globalPooling")) { + if (context.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { ksize[i] = static_cast(in_x_grad->dims()[i + 2]); } diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index 5431a1657c300d9c60100616ca7ea2e1196824ec..9121609f10e6aba6bcb999f2a9cc436412e212b5 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -46,7 +46,7 @@ class SoftmaxWithCrossEntropyOpMaker "(Tensor, default: Tensor), A 2-D tensor. The cross " "entropy loss with shape [N x 1]."); AddAttr( - "softLabel", + "soft_label", "(bool, default: false), A flag to indicate whether to interpretate " "the given labels as soft labels.") .SetDefault(false); @@ -100,13 +100,13 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(labels_dims.size(), 2UL, "The labels should be a 2-D tensor."); - if (ctx->Attrs().Get("softLabel")) { + if (ctx->Attrs().Get("soft_label")) { PADDLE_ENFORCE_EQ(logits_dims[1], labels_dims[1], - "If Attr(softLabel) == true, the 2nd dimension of " + "If Attr(soft_label) == true, the 2nd dimension of " "Input(X) and Input(Label) should be equal."); } else { PADDLE_ENFORCE_EQ(labels_dims[1], 1UL, - "If Attr(softLabel) == false, the 2nd dimension of " + "If Attr(soft_label) == false, the 2nd dimension of " "Input(Label) should be 1."); } @@ -142,13 +142,13 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(labels_dims.size(), 2UL, "The labels should be a 2-D tensor."); - if (ctx->Attrs().Get("softLabel")) { + if (ctx->Attrs().Get("soft_label")) { PADDLE_ENFORCE_EQ(softmax_dims[1], labels_dims[1], - "When Attr(softLabel) == true, the 2nd dimension of " + "When Attr(soft_label) == true, the 2nd dimension of " "Input(X) and Input(Label) should be equal."); } else { PADDLE_ENFORCE_EQ(labels_dims[1], 1UL, - "When Attr(softLabel) == false, the 2nd dimension of " + "When Attr(soft_label) == false, the 2nd dimension of " "Input(Label) should be 1."); } diff --git a/paddle/operators/softmax_with_cross_entropy_op.cu b/paddle/operators/softmax_with_cross_entropy_op.cu index 2bc53ecf871eb1800a920ba85e8eac31d7037efe..d03a1a76585bc79633d089b776ca07ba908085ba 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/operators/softmax_with_cross_entropy_op.cu @@ -70,7 +70,7 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { logits, softmax); math::CrossEntropyFunctor()( context.device_context(), loss, softmax, labels, - context.Attr("softLabel")); + context.Attr("soft_label")); } }; @@ -93,7 +93,7 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { int block = 512; int grid = (batch_size * class_num + block - 1) / block; - if (context.Attr("softLabel")) { + if (context.Attr("soft_label")) { const T* label_data = labels->data(); SoftCrossEntropyGradientKernel<<< grid, block, 0, reinterpret_cast( diff --git a/paddle/operators/softmax_with_cross_entropy_op.h b/paddle/operators/softmax_with_cross_entropy_op.h index cffd422f1827b646a8abcd881fdcb5455e6a663a..66d7bc1569e124096f30b6cd91fe22189506e4a5 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.h +++ b/paddle/operators/softmax_with_cross_entropy_op.h @@ -44,7 +44,7 @@ class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { logits, softmax); math::CrossEntropyFunctor()( context.device_context(), loss, softmax, labels, - context.Attr("softLabel")); + context.Attr("soft_label")); } }; @@ -60,7 +60,7 @@ class SoftmaxWithCrossEntropyGradKernel : public framework::OpKernel { logit_grad->ShareDataWith(*context.Input("Softmax")); const int class_num = logit_grad->dims()[1]; - if (context.Attr("softLabel")) { + if (context.Attr("soft_label")) { auto out_grad_mat = EigenMatrix::From(*out_grad); auto logit_grad_mat = EigenMatrix::From(*logit_grad); auto lbl_mat = EigenMatrix::From(*labels); diff --git a/python/paddle/v2/framework/tests/test_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_cross_entropy_op.py index 4ea14da7fd3d84870965d62514d6a79b4926a6ec..919b6c3f6745a9c6115e7af857c1a30354305f89 100644 --- a/python/paddle/v2/framework/tests/test_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_cross_entropy_op.py @@ -49,7 +49,7 @@ class TestCrossEntropyOp2(OpTest): self.inputs = {"X": X, "Label": label} self.outputs = {"Y": cross_entropy} - self.attrs = {"softLabel": True} + self.attrs = {"soft_label": True} def test_check_output(self): self.check_output() @@ -82,7 +82,7 @@ class TestCrossEntropyOp3(OpTest): self.inputs = {"X": X, "Label": label.astype(np.float32)} self.outputs = {"Y": cross_entropy} - self.attrs = {"softLabel": True} + self.attrs = {"soft_label": True} def test_check_output(self): self.check_output() diff --git a/python/paddle/v2/framework/tests/test_pool2d_op.py b/python/paddle/v2/framework/tests/test_pool2d_op.py index 2941fda81b23998072810d8c6f6597a6f3db7e30..3fcd8941d4f8a8638db0009b368734c234e702f6 100644 --- a/python/paddle/v2/framework/tests/test_pool2d_op.py +++ b/python/paddle/v2/framework/tests/test_pool2d_op.py @@ -56,8 +56,8 @@ class TestPool2d_Op(OpTest): 'strides': self.strides, 'paddings': self.paddings, 'ksize': self.ksize, - 'poolingType': self.pool_type, - 'globalPooling': self.global_pool, + 'pooling_type': self.pool_type, + 'global_pooling': self.global_pool, } self.outputs = {'Out': output} diff --git a/python/paddle/v2/framework/tests/test_pool3d_op.py b/python/paddle/v2/framework/tests/test_pool3d_op.py index 8792b492e3da6541f71185be82b8bfc4f52d821d..f4e938041fa0ae9d0760023afdbf2f3052b244ea 100644 --- a/python/paddle/v2/framework/tests/test_pool3d_op.py +++ b/python/paddle/v2/framework/tests/test_pool3d_op.py @@ -64,8 +64,8 @@ class TestPool3d_Op(OpTest): 'strides': self.strides, 'paddings': self.paddings, 'ksize': self.ksize, - 'poolingType': self.pool_type, - 'globalPooling': self.global_pool, + 'pooling_type': self.pool_type, + 'global_pooling': self.global_pool, } self.outputs = {'Out': output} diff --git a/python/paddle/v2/framework/tests/test_pool_max_op.py b/python/paddle/v2/framework/tests/test_pool_max_op.py index f0f8aa6089c74d31702a6a5d37362099205d96b2..b78f9bba05c5af38806f6cabb0e53379f8aa0526 100644 --- a/python/paddle/v2/framework/tests/test_pool_max_op.py +++ b/python/paddle/v2/framework/tests/test_pool_max_op.py @@ -86,7 +86,7 @@ class TestMaxPoolWithIndex_Op(OpTest): 'strides': self.strides, 'paddings': self.paddings, 'ksize': self.ksize, - 'globalPooling': self.global_pool, + 'global_pooling': self.global_pool, } self.inputs = {'X': input} diff --git a/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py index 377d07fb5927a108e9bd39ab227da4f40a9cd447..05ba954c0b8655b92b12f9cc686ef048c4d84bbc 100644 --- a/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py @@ -57,7 +57,7 @@ class TestSoftmaxWithCrossEntropyOp2(OpTest): self.inputs = {"Logits": logits, "Label": labels} self.outputs = {"Softmax": softmax, "Loss": cross_entropy} - self.attrs = {"softLabel": True} + self.attrs = {"soft_label": True} def test_check_output(self): self.check_output()