diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index feadc9be4ba074ea300707c259f1a59cecaa5d6a..9140854a96ccac583adce4c89f7b134360360c6d 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -29,7 +29,7 @@ cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator proto_desc) cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) -cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op) +cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry sum_op) py_proto_compile(framework_py_proto SRCS framework.proto) # Generate an empty __init__.py to make framework_py_proto as a valid python module. diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index d09892f81bea34415d454b017258fd2a0d4575db..55c5fa420e2441260999ba15cf86484f9fd6e890 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -3,7 +3,7 @@ #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" -USE_OP(add); +USE_OP(sum); namespace paddle { namespace framework { @@ -41,17 +41,24 @@ namespace f = paddle::framework; TEST(GradOpBuilder, AddTwo) { std::shared_ptr add_op(f::OpRegistry::CreateOp( - "add", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {})); + "sum", {{"X", {"x", "y"}}}, {{"Out", {"out"}}}, {})); std::shared_ptr grad_add_op = f::OpRegistry::CreateGradOp(*add_op); - EXPECT_EQ(grad_add_op->Inputs().size(), 4UL); - EXPECT_EQ(grad_add_op->Outputs().size(), 2UL); - EXPECT_EQ(grad_add_op->Input("X"), "x"); - EXPECT_EQ(grad_add_op->Input("Y"), "y"); - EXPECT_EQ(grad_add_op->Input("Out"), "out"); + + EXPECT_EQ(grad_add_op->Inputs().size(), 1UL); + EXPECT_EQ(grad_add_op->Outputs().size(), 1UL); EXPECT_EQ(grad_add_op->Input(f::GradVarName("Out")), f::GradVarName("out")); - EXPECT_EQ(grad_add_op->Output(f::GradVarName("X")), f::GradVarName("x")); - EXPECT_EQ(grad_add_op->Output(f::GradVarName("Y")), f::GradVarName("y")); + auto &outputs = grad_add_op->Outputs(f::GradVarName("X")); + EXPECT_EQ(2UL, outputs.size()); + auto in_output = [&outputs](const std::string &name) { + for (auto &output_name : outputs) { + if (output_name == name) return true; + } + return false; + }; + + EXPECT_TRUE(in_output(f::GradVarName("x"))); + EXPECT_TRUE(in_output(f::GradVarName("y"))); } REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad, f::NOP); diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc deleted file mode 100644 index 3914d1323083ede6a7ea07e7b4ef76b9e4afd26d..0000000000000000000000000000000000000000 --- a/paddle/operators/add_op.cc +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/operators/add_op.h" - -namespace paddle { -namespace operators { - -class AddOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of AddOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of AddOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of AddOp should not be null."); - - auto x_dims = ctx->GetInputDim("X"); - auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(x_dims, y_dims, - "Two input of Add Op's dimension must be same."); - ctx->SetOutputDim("Out", x_dims); - } -}; - -class AddOpMaker : public framework::OpProtoAndCheckerMaker { - public: - AddOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The first input of add op"); - AddInput("Y", "The second input of add op"); - AddOutput("Out", "The output of add op"); - AddComment(R"DOC( -Two Element Add Operator. - -The equation is: Out = X + Y -)DOC"); - } -}; - -class AddOpGrad : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContextBase* ctx) const override {} -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OP(add, ops::AddOp, ops::AddOpMaker, add_grad, ops::AddOpGrad); - -REGISTER_OP_CPU_KERNEL(add, ops::AddKernel); diff --git a/paddle/operators/add_op.cu b/paddle/operators/add_op.cu deleted file mode 100644 index d9c6d20a6c320b59e57ed25da3dd8b093833f8c7..0000000000000000000000000000000000000000 --- a/paddle/operators/add_op.cu +++ /dev/null @@ -1,18 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#include "paddle/operators/add_op.h" - -namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(add, ops::AddKernel); diff --git a/paddle/operators/add_op.h b/paddle/operators/add_op.h deleted file mode 100644 index 75163032a1ff11a1f18cfd0a4ff7289ff0cb66bf..0000000000000000000000000000000000000000 --- a/paddle/operators/add_op.h +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" - -namespace paddle { -namespace operators { - -using Tensor = framework::Tensor; -template -using EigenVector = framework::EigenVector; - -template -class AddKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* input0 = context.Input("X"); - auto* input1 = context.Input("Y"); - auto* output = context.Output("Out"); - - output->mutable_data(context.GetPlace()); - - auto X = EigenVector::Flatten(*input0); - auto Y = EigenVector::Flatten(*input1); - auto Z = EigenVector::Flatten(*output); - - auto place = context.GetEigenDevice(); - - Z.device(place) = X + Y; - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index 8f62a9f4db8d39edc11949df513aebf4fa257d45..5d76313aeb96c0c8204f64aee1057f753ec85d6b 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -43,8 +43,10 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: SumOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "the input tensors of sum operator.").AsDuplicable(); - AddOutput("Out", "the output tensor of sum operator."); + AddInput("X", "the input tensors of sum operator.") + .AsDuplicable() + .NotInGradient(); + AddOutput("Out", "the output tensor of sum operator.").NotInGradient(); AddComment(R"DOC( Sum the input tensors. diff --git a/python/paddle/v2/framework/tests/test_add_op.py b/python/paddle/v2/framework/tests/test_add_op.py deleted file mode 100644 index 3ca34d9b9fc2b7b54cc25ca0e0d1a08a71e37c52..0000000000000000000000000000000000000000 --- a/python/paddle/v2/framework/tests/test_add_op.py +++ /dev/null @@ -1,20 +0,0 @@ -import unittest -import numpy as np -from op_test import OpTest - - -class TestAddOp(OpTest): - def setUp(self): - self.op_type = "add" - self.inputs = { - 'X': np.random.random((102, 105)).astype("float32"), - 'Y': np.random.random((102, 105)).astype("float32") - } - self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} - - def test_check_output(self): - self.check_output() - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/v2/framework/tests/test_cond_op.py b/python/paddle/v2/framework/tests/test_cond_op.py index 3698ce9c8ed5c021826af622a53ee742e9b22552..76323b5e10c59822b4de82a70ebd57b3e57c8392 100644 --- a/python/paddle/v2/framework/tests/test_cond_op.py +++ b/python/paddle/v2/framework/tests/test_cond_op.py @@ -15,7 +15,7 @@ class PySimpleCond(object): for i in range(1, 10, 2): array[i] = 0 self.cond = np.array(array) - self.x = np.ones(shape=(10, 1)) + self.x = np.ones(shape=(10, 1)).astype("float32") def forward(self): self.index_t = np.where(self.cond == 1) diff --git a/python/paddle/v2/framework/tests/test_gradient_checker.py b/python/paddle/v2/framework/tests/test_gradient_checker.py deleted file mode 100644 index 85117bf9600975ea5d61dfb5b34335792bf6d8b2..0000000000000000000000000000000000000000 --- a/python/paddle/v2/framework/tests/test_gradient_checker.py +++ /dev/null @@ -1,46 +0,0 @@ -import unittest -import numpy as np -import paddle.v2.framework.core as core -from op_test import get_numeric_gradient -from op_test import create_op - - -class GetNumericGradientTest(unittest.TestCase): - def test_add_op(self): - x = np.random.random((10, 1)).astype("float32") - y = np.random.random((10, 1)).astype("float32") - z = x + y - scope = core.Scope() - add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict()) - arr = get_numeric_gradient(scope, add_op, {'X': x, - 'Y': y}, 'X', ['Out']) - self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4) - - def test_softmax_op(self): - def stable_softmax(x): - """Compute the softmax of vector x in a numerically stable way.""" - shiftx = x - np.max(x) - exps = np.exp(shiftx) - return exps / np.sum(exps) - - def label_softmax_grad(Y, dY): - dX = Y * 0.0 - for i in range(Y.shape[0]): - d = np.dot(Y[i, :], dY[i, :]) - dX[i, :] = Y[i, :] * (dY[i, :] - d) - return dX - - X = np.random.random((2, 2)).astype("float32") - Y = np.apply_along_axis(stable_softmax, 1, X) - dY = np.ones(Y.shape) - dX = label_softmax_grad(Y, dY) - - scope = core.Scope() - softmax_op = create_op(scope, "softmax", {"X": X}, {"Y": Y}, dict()) - - arr = get_numeric_gradient(scope, softmax_op, {"X": X}, "X", "Y") - np.testing.assert_almost_equal(arr, dX, decimal=1e-2) - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/v2/framework/tests/test_net.py b/python/paddle/v2/framework/tests/test_net.py index 50cfb855f2b01d8fd32342855d46716da7e07856..8503257feb8e1a5802f3f889f72c559a2aaa583a 100644 --- a/python/paddle/v2/framework/tests/test_net.py +++ b/python/paddle/v2/framework/tests/test_net.py @@ -15,7 +15,7 @@ def fc(X, W, Y): class TestNet(unittest.TestCase): def test_net_all(self): net = core.Net.create() - op1 = Operator("add", X="X", Y="Y", Out="Out") + op1 = Operator("sum", X=["X", "Y"], Out="Out") net.append_op(op1) net2 = core.Net.create() @@ -26,7 +26,7 @@ class TestNet(unittest.TestCase): expected = ''' Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}. - Op(add), inputs:{X[X], Y[Y]}, outputs:{Out[Out]}. + Op(sum), inputs:{X[X, Y]}, outputs:{Out[Out]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}. diff --git a/python/paddle/v2/framework/tests/test_operator.py b/python/paddle/v2/framework/tests/test_operator.py index 040556322d79cbb594eb9af585a5b9920d7ab625..98f6b2f5ee639120557cb85b3ada6d2931f7d0d2 100644 --- a/python/paddle/v2/framework/tests/test_operator.py +++ b/python/paddle/v2/framework/tests/test_operator.py @@ -193,10 +193,10 @@ class TestOpDescCreationMethod(unittest.TestCase): class TestOpCreations(unittest.TestCase): def test_all(self): - add_op = op.Operator("add", X="a", Y="b", Out="z") + add_op = op.Operator("sum", X=["a", "b"], Out="z") self.assertIsNotNone(add_op) # Invoke C++ DebugString() - self.assertEqual('Op(add), inputs:{X[a], Y[b]}, outputs:{Out[z]}.', + self.assertEqual('Op(sum), inputs:{X[a, b]}, outputs:{Out[z]}.', str(add_op))