diff --git a/doc/design/register_grad_op.md b/doc/design/register_grad_op.md new file mode 100644 index 0000000000000000000000000000000000000000..12b04fb2713d8bb862c08ddb538ddf860bb2983c --- /dev/null +++ b/doc/design/register_grad_op.md @@ -0,0 +1,67 @@ +# Design Doc: Gradient Operators Registration + + +## The Problem Posed + +In our current operator registration mechanism, for each operator, the programmer should register a *gradient operator creator* function, which takes a C++ operator instance, and returns the corresponding gradient instance. + +However, as we decided to separate the *compilation* and *execution* of DL models, we need to reshape the creator to take a protobuf `OpDesc` message, and returns a corresponding message. + +More than that, the new registration mechanism need to support the fact that an operators' gradient computation might be a composition of operators. + +## Current Implementation + +OpInfos store in a association map which key is the operator type. The `grad_op_type` indicate associated gradient operator type. Operator can create gradient operator by `OpInfo::creator_` of gradient. The pseudo code is + +```cpp +struct OpInfo { + std::function creator_; + std::string grad_op_type_; + ... +}; + +map OpInfoMap; + +OperatorBase* CreateGradientOperator(const OperatorBase& op) { + return OpInfoMap.at(op.Type()).creator_(...); +} +``` + +## Proposed Solution + +The mapping relationship between an operator and its gradient operators is a function. The interface of that function is: + +```cpp +// (OpDesc) --> vector +using GradOpDescMaker = std::function(const OpDesc&)>; +``` + +The function take a `OpDesc` of the forward operator and return one or many gradient operator descriptions. + +The `GradOpDescMaker` will be registered in `OpInfo`, to replace `grad_op_type_` field. The `OpInfo` should be + +```cpp +struct OpInfo { + GradOpDescMaker grad_op_maker_; + ... +}; +``` + +The `grad_op_maker_ ` is `nullptr` if the operator does not have associated gradient operators. + +We should chagne register macros at the same time. In the current solution, there is no difference between forwarding operators and backward operators. So `REGISTER_OP` just register one operator. If the `REGISTER_OPERATOR ` contains `OpProtoAndCheckerMaker` and `GradOpDescMaker`, we just list them in the same macro. It can be done by a macro contains `__VA_ARGS__`. + +The user interface should be + +```cpp +vector MinusOpGradMaker(OpDesc) {...} +REGISTER_OPERATOR(minus, MinusOp, MinusOpProtoAndCheckerMaker, SumOpGradMaker); +// Developers can still manually implement gradient operator. +REGISTER_OPERATOR(minus_grad, MinusGradOp); +``` + +The interface of current `REGISTER_OP` macro could not be changed. In `REGISTER_OP`, it will invoke `REGISTER_OPERATOR` two times and generate GradOpDescMaker inside. + +```cpp +REGISTER_OP(minus, MinusOp, MinusOpProtoAndCheckerMaker, minus_grad, MinusGradOp); +``` diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 8a5d8532bb32db917b893f7f59039e08d85c8c34..9140854a96ccac583adce4c89f7b134360360c6d 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -22,14 +22,14 @@ cc_library(attribute SRCS attribute.cc DEPS framework_proto) cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS attribute) cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute) cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker) -cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto) +cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto proto_desc) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator proto_desc) cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) -cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op) +cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry sum_op) py_proto_compile(framework_py_proto SRCS framework.proto) # Generate an empty __init__.py to make framework_py_proto as a valid python module. diff --git a/paddle/framework/details/op_registry.h b/paddle/framework/details/op_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..d2516ccc1eec21682276c2fddf049984453404d4 --- /dev/null +++ b/paddle/framework/details/op_registry.h @@ -0,0 +1,105 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/framework/op_info.h" +#include "paddle/framework/op_proto_maker.h" +#include "paddle/framework/operator.h" + +namespace paddle { +namespace framework { +namespace details { + +enum OpInfoFillType { + kOperator = 0, + kOpProtoAndCheckerMaker = 1, + kGradOpDescMaker = 2 +}; + +template +struct OpInfoFillTypeID { + static constexpr OpInfoFillType ID() { + return std::is_base_of::value + ? kOperator + : (std::is_base_of::value + ? kOpProtoAndCheckerMaker + : (std::is_base_of::value + ? kGradOpDescMaker + : static_cast(-1))); + } +}; + +template ::ID()> +struct OpInfoFiller; + +template +class OperatorRegistrarRecursive; + +template +class OperatorRegistrarRecursive { + public: + using T = typename std::tuple_element>::type; + OperatorRegistrarRecursive(const char* op_type, OpInfo* info) { + OpInfoFiller fill; + fill(op_type, info); + constexpr auto size = sizeof...(ARGS); + OperatorRegistrarRecursive reg(op_type, + info); + (void)(reg); + } +}; + +template +class OperatorRegistrarRecursive { + public: + OperatorRegistrarRecursive(const char* op_type, OpInfo* info) {} +}; + +template +struct OpInfoFiller { + void operator()(const char* op_type, OpInfo* info) const { + info->creator_ = [](const std::string& type, const VariableNameMap& inputs, + const VariableNameMap& outputs, + const AttributeMap& attrs) { + return new T(type, inputs, outputs, attrs); + }; + } +}; + +template +struct OpInfoFiller { + void operator()(const char* op_type, OpInfo* info) const { + info->proto_ = new OpProto; + info->checker_ = new OpAttrChecker(); + auto maker = T(info->proto_, info->checker_); + maker.Validate(); + info->proto_->set_type(op_type); + PADDLE_ENFORCE( + info->proto_->IsInitialized(), + "Fail to initialize %s's OpProto, because %s is not initialized", + op_type, info->proto_->InitializationErrorString()); + } +}; + +template +struct OpInfoFiller { + void operator()(const char* op_type, OpInfo* info) const { + info->grad_op_maker_ = new T(); + } +}; +} // namespace details + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index d09892f81bea34415d454b017258fd2a0d4575db..55c5fa420e2441260999ba15cf86484f9fd6e890 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -3,7 +3,7 @@ #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" -USE_OP(add); +USE_OP(sum); namespace paddle { namespace framework { @@ -41,17 +41,24 @@ namespace f = paddle::framework; TEST(GradOpBuilder, AddTwo) { std::shared_ptr add_op(f::OpRegistry::CreateOp( - "add", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {})); + "sum", {{"X", {"x", "y"}}}, {{"Out", {"out"}}}, {})); std::shared_ptr grad_add_op = f::OpRegistry::CreateGradOp(*add_op); - EXPECT_EQ(grad_add_op->Inputs().size(), 4UL); - EXPECT_EQ(grad_add_op->Outputs().size(), 2UL); - EXPECT_EQ(grad_add_op->Input("X"), "x"); - EXPECT_EQ(grad_add_op->Input("Y"), "y"); - EXPECT_EQ(grad_add_op->Input("Out"), "out"); + + EXPECT_EQ(grad_add_op->Inputs().size(), 1UL); + EXPECT_EQ(grad_add_op->Outputs().size(), 1UL); EXPECT_EQ(grad_add_op->Input(f::GradVarName("Out")), f::GradVarName("out")); - EXPECT_EQ(grad_add_op->Output(f::GradVarName("X")), f::GradVarName("x")); - EXPECT_EQ(grad_add_op->Output(f::GradVarName("Y")), f::GradVarName("y")); + auto &outputs = grad_add_op->Outputs(f::GradVarName("X")); + EXPECT_EQ(2UL, outputs.size()); + auto in_output = [&outputs](const std::string &name) { + for (auto &output_name : outputs) { + if (output_name == name) return true; + } + return false; + }; + + EXPECT_TRUE(in_output(f::GradVarName("x"))); + EXPECT_TRUE(in_output(f::GradVarName("y"))); } REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad, f::NOP); diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index b98d8f23a14cf6fbe787953ad16b5c9ab99222ad..6d1ee4dece14affa78ccf4ff1d9c7f09992f127f 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -17,8 +17,8 @@ #include #include #include - #include "paddle/framework/attribute.h" +#include "paddle/framework/op_desc.h" namespace paddle { namespace framework { @@ -29,11 +29,18 @@ using OpCreator = std::function; +class GradOpDescMakerBase { + public: + virtual ~GradOpDescMakerBase() = default; + virtual std::vector operator()(const OpDescBind&) const = 0; +}; + struct OpInfo { OpCreator creator_; std::string grad_op_type_; - OpProto* proto_; - OpAttrChecker* checker_; + GradOpDescMakerBase* grad_op_maker_{nullptr}; + OpProto* proto_{nullptr}; + OpAttrChecker* checker_{nullptr}; bool HasOpProtoAndChecker() const { return proto_ != nullptr && checker_ != nullptr; diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 4db38badaea8ae22d9ad47951f4941f3bdeb401a..4ee2c7d27561c3855059c42e1604f353c65bfa41 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -21,49 +21,42 @@ limitations under the License. */ #include #include #include "paddle/framework/attribute.h" +#include "paddle/framework/details/op_registry.h" #include "paddle/framework/framework.pb.h" #include "paddle/framework/grad_op_builder.h" -#include "paddle/framework/op_info.h" -#include "paddle/framework/op_proto_maker.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" namespace paddle { namespace framework { +template +struct OperatorRegistrar { + explicit OperatorRegistrar(const char* op_type) : op_type(op_type) { + PADDLE_ENFORCE(!OpInfoMap::Instance().Has(op_type), + "'%s' is registered more than once.", op_type); + static_assert(sizeof...(ARGS) != 0, + "OperatorRegistrar should be invoked at least by OpClass"); + details::OperatorRegistrarRecursive<0, false, ARGS...>(op_type, &info); + } + + ~OperatorRegistrar() { OpInfoMap::Instance().Insert(op_type, info); } + + const char* op_type; + + OpInfo info; +}; + class OpRegistry { public: template static void RegisterOp(const std::string& op_type, const std::string& grad_op_type) { - PADDLE_ENFORCE(!OpInfoMap::Instance().Has(op_type), - "'%s' is registered more than once.", op_type); - OpInfo op_info; - op_info.creator_ = []( - const std::string& type, const VariableNameMap& inputs, - const VariableNameMap& outputs, const AttributeMap& attrs) { - return new OpType(type, inputs, outputs, attrs); - }; - op_info.grad_op_type_ = grad_op_type; - if (std::type_index(typeid(ProtoMakerType)) != - std::type_index(typeid(NOPMaker))) { - op_info.proto_ = new OpProto; - op_info.checker_ = new OpAttrChecker; - auto maker = ProtoMakerType(op_info.proto_, op_info.checker_); - maker.Validate(); - op_info.proto_->set_type(op_type); - PADDLE_ENFORCE( - op_info.proto_->IsInitialized(), - "Fail to initialize %s's OpProto, because %s is not initialized", - op_type, op_info.proto_->InitializationErrorString()); - } else { - op_info.proto_ = nullptr; - op_info.checker_ = nullptr; - } - OpInfoMap::Instance().Insert(op_type, op_info); + OperatorRegistrar reg(op_type.c_str()); + reg.info.grad_op_type_ = grad_op_type; // register gradient op if (!grad_op_type.empty()) { - RegisterOp(grad_op_type, ""); + OperatorRegistrar grad_reg(grad_op_type.c_str()); } } diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index b6fc0409d5cb22b13352df41b8e911c79bc4825a..f89f40b444f893d898595db50e245824a19284f6 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -173,3 +173,14 @@ TEST(OpRegistry, CustomChecker) { int test_attr = op->Attr("test_attr"); ASSERT_EQ(test_attr, 4); } + +class CosineOpComplete : public paddle::framework::CosineOp { + public: + DEFINE_OP_CONSTRUCTOR(CosineOpComplete, paddle::framework::CosineOp); + DEFINE_OP_CLONE_METHOD(CosineOpComplete); +}; + +TEST(OperatorRegistrar, Test) { + using namespace paddle::framework; + OperatorRegistrar reg("cos"); +} \ No newline at end of file diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc deleted file mode 100644 index 3914d1323083ede6a7ea07e7b4ef76b9e4afd26d..0000000000000000000000000000000000000000 --- a/paddle/operators/add_op.cc +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/operators/add_op.h" - -namespace paddle { -namespace operators { - -class AddOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of AddOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of AddOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of AddOp should not be null."); - - auto x_dims = ctx->GetInputDim("X"); - auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(x_dims, y_dims, - "Two input of Add Op's dimension must be same."); - ctx->SetOutputDim("Out", x_dims); - } -}; - -class AddOpMaker : public framework::OpProtoAndCheckerMaker { - public: - AddOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The first input of add op"); - AddInput("Y", "The second input of add op"); - AddOutput("Out", "The output of add op"); - AddComment(R"DOC( -Two Element Add Operator. - -The equation is: Out = X + Y -)DOC"); - } -}; - -class AddOpGrad : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContextBase* ctx) const override {} -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OP(add, ops::AddOp, ops::AddOpMaker, add_grad, ops::AddOpGrad); - -REGISTER_OP_CPU_KERNEL(add, ops::AddKernel); diff --git a/paddle/operators/add_op.cu b/paddle/operators/add_op.cu deleted file mode 100644 index d9c6d20a6c320b59e57ed25da3dd8b093833f8c7..0000000000000000000000000000000000000000 --- a/paddle/operators/add_op.cu +++ /dev/null @@ -1,18 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#include "paddle/operators/add_op.h" - -namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(add, ops::AddKernel); diff --git a/paddle/operators/add_op.h b/paddle/operators/add_op.h deleted file mode 100644 index 75163032a1ff11a1f18cfd0a4ff7289ff0cb66bf..0000000000000000000000000000000000000000 --- a/paddle/operators/add_op.h +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" - -namespace paddle { -namespace operators { - -using Tensor = framework::Tensor; -template -using EigenVector = framework::EigenVector; - -template -class AddKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* input0 = context.Input("X"); - auto* input1 = context.Input("Y"); - auto* output = context.Output("Out"); - - output->mutable_data(context.GetPlace()); - - auto X = EigenVector::Flatten(*input0); - auto Y = EigenVector::Flatten(*input1); - auto Z = EigenVector::Flatten(*output); - - auto place = context.GetEigenDevice(); - - Z.device(place) = X + Y; - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index 8f62a9f4db8d39edc11949df513aebf4fa257d45..5d76313aeb96c0c8204f64aee1057f753ec85d6b 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -43,8 +43,10 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: SumOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "the input tensors of sum operator.").AsDuplicable(); - AddOutput("Out", "the output tensor of sum operator."); + AddInput("X", "the input tensors of sum operator.") + .AsDuplicable() + .NotInGradient(); + AddOutput("Out", "the output tensor of sum operator.").NotInGradient(); AddComment(R"DOC( Sum the input tensors. diff --git a/python/paddle/v2/framework/tests/test_add_op.py b/python/paddle/v2/framework/tests/test_add_op.py deleted file mode 100644 index 3ca34d9b9fc2b7b54cc25ca0e0d1a08a71e37c52..0000000000000000000000000000000000000000 --- a/python/paddle/v2/framework/tests/test_add_op.py +++ /dev/null @@ -1,20 +0,0 @@ -import unittest -import numpy as np -from op_test import OpTest - - -class TestAddOp(OpTest): - def setUp(self): - self.op_type = "add" - self.inputs = { - 'X': np.random.random((102, 105)).astype("float32"), - 'Y': np.random.random((102, 105)).astype("float32") - } - self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} - - def test_check_output(self): - self.check_output() - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/v2/framework/tests/test_cond_op.py b/python/paddle/v2/framework/tests/test_cond_op.py index 3698ce9c8ed5c021826af622a53ee742e9b22552..76323b5e10c59822b4de82a70ebd57b3e57c8392 100644 --- a/python/paddle/v2/framework/tests/test_cond_op.py +++ b/python/paddle/v2/framework/tests/test_cond_op.py @@ -15,7 +15,7 @@ class PySimpleCond(object): for i in range(1, 10, 2): array[i] = 0 self.cond = np.array(array) - self.x = np.ones(shape=(10, 1)) + self.x = np.ones(shape=(10, 1)).astype("float32") def forward(self): self.index_t = np.where(self.cond == 1) diff --git a/python/paddle/v2/framework/tests/test_gradient_checker.py b/python/paddle/v2/framework/tests/test_gradient_checker.py deleted file mode 100644 index 85117bf9600975ea5d61dfb5b34335792bf6d8b2..0000000000000000000000000000000000000000 --- a/python/paddle/v2/framework/tests/test_gradient_checker.py +++ /dev/null @@ -1,46 +0,0 @@ -import unittest -import numpy as np -import paddle.v2.framework.core as core -from op_test import get_numeric_gradient -from op_test import create_op - - -class GetNumericGradientTest(unittest.TestCase): - def test_add_op(self): - x = np.random.random((10, 1)).astype("float32") - y = np.random.random((10, 1)).astype("float32") - z = x + y - scope = core.Scope() - add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict()) - arr = get_numeric_gradient(scope, add_op, {'X': x, - 'Y': y}, 'X', ['Out']) - self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4) - - def test_softmax_op(self): - def stable_softmax(x): - """Compute the softmax of vector x in a numerically stable way.""" - shiftx = x - np.max(x) - exps = np.exp(shiftx) - return exps / np.sum(exps) - - def label_softmax_grad(Y, dY): - dX = Y * 0.0 - for i in range(Y.shape[0]): - d = np.dot(Y[i, :], dY[i, :]) - dX[i, :] = Y[i, :] * (dY[i, :] - d) - return dX - - X = np.random.random((2, 2)).astype("float32") - Y = np.apply_along_axis(stable_softmax, 1, X) - dY = np.ones(Y.shape) - dX = label_softmax_grad(Y, dY) - - scope = core.Scope() - softmax_op = create_op(scope, "softmax", {"X": X}, {"Y": Y}, dict()) - - arr = get_numeric_gradient(scope, softmax_op, {"X": X}, "X", "Y") - np.testing.assert_almost_equal(arr, dX, decimal=1e-2) - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/v2/framework/tests/test_net.py b/python/paddle/v2/framework/tests/test_net.py index 50cfb855f2b01d8fd32342855d46716da7e07856..8503257feb8e1a5802f3f889f72c559a2aaa583a 100644 --- a/python/paddle/v2/framework/tests/test_net.py +++ b/python/paddle/v2/framework/tests/test_net.py @@ -15,7 +15,7 @@ def fc(X, W, Y): class TestNet(unittest.TestCase): def test_net_all(self): net = core.Net.create() - op1 = Operator("add", X="X", Y="Y", Out="Out") + op1 = Operator("sum", X=["X", "Y"], Out="Out") net.append_op(op1) net2 = core.Net.create() @@ -26,7 +26,7 @@ class TestNet(unittest.TestCase): expected = ''' Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}. - Op(add), inputs:{X[X], Y[Y]}, outputs:{Out[Out]}. + Op(sum), inputs:{X[X, Y]}, outputs:{Out[Out]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}. diff --git a/python/paddle/v2/framework/tests/test_operator.py b/python/paddle/v2/framework/tests/test_operator.py index 040556322d79cbb594eb9af585a5b9920d7ab625..98f6b2f5ee639120557cb85b3ada6d2931f7d0d2 100644 --- a/python/paddle/v2/framework/tests/test_operator.py +++ b/python/paddle/v2/framework/tests/test_operator.py @@ -193,10 +193,10 @@ class TestOpDescCreationMethod(unittest.TestCase): class TestOpCreations(unittest.TestCase): def test_all(self): - add_op = op.Operator("add", X="a", Y="b", Out="z") + add_op = op.Operator("sum", X=["a", "b"], Out="z") self.assertIsNotNone(add_op) # Invoke C++ DebugString() - self.assertEqual('Op(add), inputs:{X[a], Y[b]}, outputs:{Out[z]}.', + self.assertEqual('Op(sum), inputs:{X[a, b]}, outputs:{Out[z]}.', str(add_op))