提交 e786746f 编写于 作者: F fengjiayi

Merge branch 'backward' of https://github.com/dzhwinter/Paddle into backward

/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/fully_connected_op.h"
#include <iostream>
namespace paddle {
namespace framework {
void FCOp::Run(const ScopePtr& scope,
const platform::DeviceContext& dev_ctx) const override {
std::cout << "FC" << std::endl;
}
void FCOp::InferShape(const ScopePtr& scope) const override {}
void FCGradientOp::Run(const ScopePtr& scope,
const platform::DeviceContext& dev_ctx) const override {
std::cout << "FCGrad" << std::endl;
}
void FCGradientOp::InferShape(const ScopePtr& scope) const override {}
REGISTER_OP(my_fc, paddle::framework::FCOp,
paddle::framework::FCOpProtoAndCheckerMaker);
REGISTER_OP(my_fc_grad, paddle::framework::FCGradientOp,
paddle::framework::FCGradientOpProtoAndCheckerMaker);
} // namespace framework
} // namespace paddle
...@@ -47,6 +47,8 @@ class FCGradientOp : public OperatorBase { ...@@ -47,6 +47,8 @@ class FCGradientOp : public OperatorBase {
}; };
// class FCGradientOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {}; // class FCGradientOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {};
REGISTER_OP(my_fc, FCOp, FCOpProtoAndCheckerMaker);
REGISTER_GRADIENT_OP(my_fc_grad, FCGradientOp);
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <paddle/framework/net.h> #include <paddle/framework/net.h>
#include <paddle/framework/op_registry.h> #include <paddle/framework/op_registry.h>
#include <paddle/framework/operator.h> #include <paddle/framework/operator.h>
#include "paddle/framework/fully_connected_op.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -31,68 +32,51 @@ void AssertSameVectorWithoutOrder(const std::vector<T>& expected, ...@@ -31,68 +32,51 @@ void AssertSameVectorWithoutOrder(const std::vector<T>& expected,
} }
} }
class PlainNetTest : public testing::Test {
public:
virtual void SetUp() {
net_ = std::make_shared<PlainNet>();
ASSERT_NE(net_, nullptr);
auto op1 = std::make_shared<TestOp>();
op1->inputs_ = {"x", "w1", "b1"};
op1->outputs_ = {"y"};
net_->AddOp(op1);
auto op2 = std::make_shared<TestOp>();
op2->inputs_ = {"y", "w2", "b2"};
op2->outputs_ = {"z"};
net_->AddOp(op2);
net_->CompleteAddOp();
}
virtual void TearDown() {}
virtual void TestBody() {}
void TestOpKernel() {
AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"}, net_->inputs_);
AssertSameVectorWithoutOrder({"y", "z"}, net_->outputs_);
auto tmp_idx_iter = net_->attrs_.find("temporary_index");
ASSERT_NE(net_->attrs_.end(), tmp_idx_iter);
auto& tmp_idx = boost::get<std::vector<int>>(tmp_idx_iter->second);
ASSERT_EQ(1UL, tmp_idx.size());
ASSERT_EQ("y", net_->outputs_[tmp_idx[0]]);
auto scope = std::make_shared<Scope>();
platform::CPUDeviceContext dev_ctx;
net_->InferShape(scope);
net_->Run(scope, dev_ctx);
ASSERT_EQ(2, infer_shape_cnt);
ASSERT_EQ(2, run_cnt);
auto op2 = std::make_shared<TestOp>();
ASSERT_THROW(net_->AddOp(op2), EnforceNotMet);
}
void TestAddBackwardOp() {
auto grad_ops = AddBackwardOp(net_);
for (auto& op : grad_ops->ops_) {
op->DebugString();
}
}
private:
std::shared_ptr<PlainNet> net_;
};
TEST(OpKernel, all) { TEST(OpKernel, all) {
PlainNetTest net; auto net = std::make_shared<PlainNet>();
net.TestOpKernel(); ASSERT_NE(net, nullptr);
auto op1 = std::make_shared<TestOp>();
op1->inputs_ = {"x", "w1", "b1"};
op1->outputs_ = {"y"};
net->AddOp(op1);
auto op2 = std::make_shared<TestOp>();
op2->inputs_ = {"y", "w2", "b2"};
op2->outputs_ = {"z"};
net->AddOp(op2);
net->CompleteAddOp();
AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"}, net->inputs_);
AssertSameVectorWithoutOrder({"y", "z"}, net->outputs_);
auto tmp_idx_iter = net->attrs_.find("temporary_index");
ASSERT_NE(net->attrs_.end(), tmp_idx_iter);
auto& tmp_idx = boost::get<std::vector<int>>(tmp_idx_iter->second);
ASSERT_EQ(1UL, tmp_idx.size());
ASSERT_EQ("y", net->outputs_[tmp_idx[0]]);
auto scope = std::make_shared<Scope>();
platform::CPUDeviceContext dev_ctx;
net->InferShape(scope);
net->Run(scope, dev_ctx);
ASSERT_EQ(2, infer_shape_cnt);
ASSERT_EQ(2, run_cnt);
ASSERT_THROW(net->AddOp(op2), EnforceNotMet);
} }
TEST(AddBackwardOp, TestAddBackwardOp) { TEST(AddBackwardOp, TestGradOp) {
PlainNetTest net; auto net = std::make_shared<PlainNet>();
net.TestAddBackwardOp(); ASSERT_NE(net, nullptr);
auto op1 = std::make_shared<FCOp>();
op1->inputs_ = {"x", "w1", "b1"};
op1->outputs_ = {"y"};
net->AddOp(op1);
auto grad_ops = AddBackwardOp(net);
for (auto& op : grad_ops->ops_) {
op->DebugString();
}
} }
} // namespace framework } // namespace framework
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/net.h"
#include "paddle/framework/fully_connected_op.h"
#include "paddle/framework/op_registry.h"
#include <gtest/gtest.h>
namespace paddle {
namespace framework {
TEST(AddBackwardOp, ALL)
} // namespace framework
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册