提交 020e45f7 编写于 作者: L Luo Tao

follow comments to revert add_two_op to add_op

上级 3ab93275
...@@ -25,7 +25,7 @@ cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) ...@@ -25,7 +25,7 @@ cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry)
cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator) cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator)
cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder) cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder)
cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry)
cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_two_op) cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op)
py_proto_compile(framework_py_proto SRCS framework.proto) py_proto_compile(framework_py_proto SRCS framework.proto)
# Generate an empty __init__.py to make framework_py_proto as a valid python module. # Generate an empty __init__.py to make framework_py_proto as a valid python module.
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h" #include "paddle/framework/operator.h"
USE_OP(add_two); USE_OP(add);
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -41,7 +41,7 @@ namespace f = paddle::framework; ...@@ -41,7 +41,7 @@ namespace f = paddle::framework;
TEST(GradOpBuilder, AddTwo) { TEST(GradOpBuilder, AddTwo) {
std::shared_ptr<f::OperatorBase> add_op(f::OpRegistry::CreateOp( std::shared_ptr<f::OperatorBase> add_op(f::OpRegistry::CreateOp(
"add_two", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {})); "add", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {}));
std::shared_ptr<f::OperatorBase> grad_add_op = std::shared_ptr<f::OperatorBase> grad_add_op =
f::OpRegistry::CreateGradOp(*add_op); f::OpRegistry::CreateGradOp(*add_op);
EXPECT_EQ(grad_add_op->Inputs().size(), 4UL); EXPECT_EQ(grad_add_op->Inputs().size(), 4UL);
......
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/operators/add_two_op.h" #include "paddle/operators/add_op.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -57,7 +57,6 @@ class AddOpGrad : public framework::OperatorWithKernel { ...@@ -57,7 +57,6 @@ class AddOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker, add_two_grad, ops::AddOpGrad); REGISTER_OP(add, ops::AddOp, ops::AddOpMaker, add_grad, ops::AddOpGrad);
REGISTER_OP_CPU_KERNEL(add_two, REGISTER_OP_CPU_KERNEL(add, ops::AddKernel<paddle::platform::CPUPlace, float>);
ops::AddKernel<paddle::platform::CPUPlace, float>);
...@@ -14,8 +14,7 @@ ...@@ -14,8 +14,7 @@
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/add_two_op.h" #include "paddle/operators/add_op.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(add_two, REGISTER_OP_GPU_KERNEL(add, ops::AddKernel<paddle::platform::GPUPlace, float>);
ops::AddKernel<paddle::platform::GPUPlace, float>);
...@@ -30,7 +30,7 @@ limitations under the License. */ ...@@ -30,7 +30,7 @@ limitations under the License. */
namespace py = pybind11; namespace py = pybind11;
USE_OP(add_two); USE_OP(add);
USE_OP(onehot_cross_entropy); USE_OP(onehot_cross_entropy);
USE_OP(sgd); USE_OP(sgd);
USE_OP(mul); USE_OP(mul);
......
...@@ -11,7 +11,7 @@ class TestAddOp(unittest.TestCase): ...@@ -11,7 +11,7 @@ class TestAddOp(unittest.TestCase):
__metaclass__ = OpTestMeta __metaclass__ = OpTestMeta
def setUp(self): def setUp(self):
self.type = "add_two" self.type = "add"
self.inputs = { self.inputs = {
'X': numpy.random.random((102, 105)).astype("float32"), 'X': numpy.random.random((102, 105)).astype("float32"),
'Y': numpy.random.random((102, 105)).astype("float32") 'Y': numpy.random.random((102, 105)).astype("float32")
......
...@@ -7,7 +7,7 @@ from gradient_checker import get_numeric_gradient ...@@ -7,7 +7,7 @@ from gradient_checker import get_numeric_gradient
class GetNumericGradientTest(unittest.TestCase): class GetNumericGradientTest(unittest.TestCase):
def test_add_op(self): def test_add_op(self):
add_op = Operator('add_two', X="X", Y="Y", Out="Z") add_op = Operator('add', X="X", Y="Y", Out="Z")
x = numpy.random.random((10, 1)).astype("float32") x = numpy.random.random((10, 1)).astype("float32")
y = numpy.random.random((10, 1)).astype("float32") y = numpy.random.random((10, 1)).astype("float32")
......
...@@ -15,7 +15,7 @@ def fc(X, W, Y): ...@@ -15,7 +15,7 @@ def fc(X, W, Y):
class TestNet(unittest.TestCase): class TestNet(unittest.TestCase):
def test_net_all(self): def test_net_all(self):
net = core.Net.create() net = core.Net.create()
op1 = Operator("add_two", X="X", Y="Y", Out="Out") op1 = Operator("add", X="X", Y="Y", Out="Out")
net.append_op(op1) net.append_op(op1)
net2 = core.Net.create() net2 = core.Net.create()
...@@ -26,7 +26,7 @@ class TestNet(unittest.TestCase): ...@@ -26,7 +26,7 @@ class TestNet(unittest.TestCase):
expected = ''' expected = '''
Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}.
Op(add_two), inputs:{X[X], Y[Y]}, outputs:{Out[Out]}. Op(add), inputs:{X[X], Y[Y]}, outputs:{Out[Out]}.
Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}. Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}.
......
...@@ -193,10 +193,10 @@ class TestOpDescCreationMethod(unittest.TestCase): ...@@ -193,10 +193,10 @@ class TestOpDescCreationMethod(unittest.TestCase):
class TestOpCreations(unittest.TestCase): class TestOpCreations(unittest.TestCase):
def test_all(self): def test_all(self):
add_op = op.Operator("add_two", X="a", Y="b", Out="z") add_op = op.Operator("add", X="a", Y="b", Out="z")
self.assertIsNotNone(add_op) self.assertIsNotNone(add_op)
# Invoke C++ DebugString() # Invoke C++ DebugString()
self.assertEqual('Op(add_two), inputs:{X[a], Y[b]}, outputs:{Out[z]}.', self.assertEqual('Op(add), inputs:{X[a], Y[b]}, outputs:{Out[z]}.',
str(add_op)) str(add_op))
......
...@@ -146,7 +146,7 @@ class TestRecurrentOp(unittest.TestCase): ...@@ -146,7 +146,7 @@ class TestRecurrentOp(unittest.TestCase):
stepnet = core.Net.create() stepnet = core.Net.create()
x_fc_op = Operator("mul", X="x@alias", Y="W", Out="Wx") x_fc_op = Operator("mul", X="x@alias", Y="W", Out="Wx")
h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh") h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh")
sum_op = Operator("add_two", X="Wx", Y="Uh", Out="sum") sum_op = Operator("add", X="Wx", Y="Uh", Out="sum")
sig_op = Operator("sigmoid", X="sum", Y="h@alias") sig_op = Operator("sigmoid", X="sum", Y="h@alias")
for op in [x_fc_op, h_fc_op, sum_op, sig_op]: for op in [x_fc_op, h_fc_op, sum_op, sig_op]:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册