提交 030f4302 编写于 作者: Y Yu Yang

Merge branch 'develop' of github.com:baidu/Paddle into feature/refactorize_framework_proto

...@@ -147,8 +147,9 @@ std::shared_ptr<OperatorBase> BackwardRecursive( ...@@ -147,8 +147,9 @@ std::shared_ptr<OperatorBase> BackwardRecursive(
ForEachVarName(grad_op->inputs_, [&no_grad_names, ForEachVarName(grad_op->inputs_, [&no_grad_names,
&net](std::string& grad_input) { &net](std::string& grad_input) {
if (no_grad_names.count(grad_input)) { if (no_grad_names.count(grad_input)) {
std::string prefix = // +1 for \0
grad_input.substr(0, grad_input.size() - kGradVarSuffix.size()); std::string prefix = grad_input.substr(
0, grad_input.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1);
grad_input = prefix + kZeroVarSuffix; grad_input = prefix + kZeroVarSuffix;
// If part of input gradient of that operator is not calculated, fill // If part of input gradient of that operator is not calculated, fill
...@@ -184,7 +185,7 @@ std::shared_ptr<OperatorBase> Backward( ...@@ -184,7 +185,7 @@ std::shared_ptr<OperatorBase> Backward(
std::unordered_set<std::string> no_grad_names; std::unordered_set<std::string> no_grad_names;
no_grad_names.reserve(no_grad_vars.size()); no_grad_names.reserve(no_grad_vars.size());
no_grad_names.insert(kEmptyVarName + kGradVarSuffix); no_grad_names.insert(std::string(kEmptyVarName) + kGradVarSuffix);
for (auto& name : no_grad_vars) { for (auto& name : no_grad_vars) {
no_grad_names.insert(name + kGradVarSuffix); no_grad_names.insert(name + kGradVarSuffix);
......
...@@ -166,21 +166,17 @@ REGISTER_OP(fc, f::FcOp, f::FcOpMaker); ...@@ -166,21 +166,17 @@ REGISTER_OP(fc, f::FcOp, f::FcOpMaker);
REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker); REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker);
REGISTER_GRADIENT_OP(many_output_op, many_output_op_grad, f::EmptyOp); REGISTER_GRADIENT_OP(many_output_op, many_output_op_grad, f::EmptyOp);
TEST(Backward, need_to_be_removed) {}
//
// TEST(Backward, simple_op_grad) { // TEST(Backward, simple_op_grad) {
// auto fwd = f::OpRegistry::CreateOp( // auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {});
// "rowwise_add", {{"X", {"X"}}, {"b", {"b"}}}, {{"Out", {"Out"}}}, {});
// ASSERT_NE(fwd, nullptr); // ASSERT_NE(fwd, nullptr);
// auto gop = f::OpRegistry::CreateGradOp(*fwd); // auto gop = f::OpRegistry::CreateGradOp(*fwd);
// ASSERT_EQ(4UL, gop->inputs_.size()); // ASSERT_EQ(4UL, gop->inputs_.size());
// ASSERT_EQ(f::kEmptyVarName, gop->inputs_[0]); // ASSERT_EQ(f::kEmptyVarName, gop->inputs_[0]);
// ASSERT_EQ("rowwise_add_grad", gop->type_); // ASSERT_EQ("rowwise_add_grad", gop->type_);
// ASSERT_EQ("X" + f::kGradVarSuffix, gop->outputs_[0]); // ASSERT_EQ(f::GradVarName("X"), gop->outputs_[0]);
// ASSERT_EQ("b" + f::kGradVarSuffix, gop->outputs_[1]); // ASSERT_EQ(f::GradVarName("b"), gop->outputs_[1]);
// //
// ASSERT_EQ("X" + f::kGradVarSuffix, gop->Output("X" + f::kGradVarSuffix)); // ASSERT_EQ(f::GradVarName("X"), gop->Output(f::GradVarName("X")));
//} //}
// //
// TEST(Backward, simple_op_not_need_grad) { // TEST(Backward, simple_op_not_need_grad) {
...@@ -188,7 +184,7 @@ TEST(Backward, need_to_be_removed) {} ...@@ -188,7 +184,7 @@ TEST(Backward, need_to_be_removed) {}
// ASSERT_NE(fwd, nullptr); // ASSERT_NE(fwd, nullptr);
// auto gop = f::Backward(*fwd, {"X"}); // auto gop = f::Backward(*fwd, {"X"});
// ASSERT_EQ(std::find(gop->outputs_.begin(), gop->outputs_.end(), // ASSERT_EQ(std::find(gop->outputs_.begin(), gop->outputs_.end(),
// "X" + f::kGradVarSuffix), // f::GradVarName("X")),
// gop->outputs_.end()); // gop->outputs_.end());
// //
// auto no_input_gop = f::Backward(*fwd, {"X", "b"}); // auto no_input_gop = f::Backward(*fwd, {"X", "b"});
...@@ -259,18 +255,18 @@ TEST(Backward, need_to_be_removed) {} ...@@ -259,18 +255,18 @@ TEST(Backward, need_to_be_removed) {}
// all_output.erase(f::kEmptyVarName); // all_output.erase(f::kEmptyVarName);
// //
// for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) { // for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) {
// ASSERT_NE(all_output.find(out + f::kGradVarSuffix), all_output.end()); // ASSERT_NE(all_output.find(f::GradVarName(out)), all_output.end());
// } // }
// //
// // Not Generated X // // Not Generated X
// ASSERT_EQ(all_output.find("X" + f::kGradVarSuffix), all_output.end()); // ASSERT_EQ(all_output.find(f::GradVarName("X")), all_output.end());
// //
// ASSERT_EQ(2UL, bwd_net->ops_.size()); // ASSERT_EQ(2UL, bwd_net->ops_.size());
// ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp()); // ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp());
// auto first_fc_grad = static_cast<ops::NetOp *>(bwd_net->ops_[1].get()); // auto first_fc_grad = static_cast<ops::NetOp *>(bwd_net->ops_[1].get());
// ASSERT_EQ(3UL, first_fc_grad->ops_.size()); // ASSERT_EQ(3UL, first_fc_grad->ops_.size());
// ASSERT_EQ(f::kEmptyVarName, // ASSERT_EQ(f::kEmptyVarName,
// first_fc_grad->ops_[2]->Output("A" + f::kGradVarSuffix)); // first_fc_grad->ops_[2]->Output(f::GradVarName("A")));
//} //}
// //
// TEST(Backward, net_shared_weight) { // TEST(Backward, net_shared_weight) {
...@@ -322,17 +318,15 @@ TEST(Backward, need_to_be_removed) {} ...@@ -322,17 +318,15 @@ TEST(Backward, need_to_be_removed) {}
// ASSERT_EQ(1UL, fill_zero.inputs_.size()); // ASSERT_EQ(1UL, fill_zero.inputs_.size());
// ASSERT_EQ("Z", fill_zero.inputs_[0]); // ASSERT_EQ("Z", fill_zero.inputs_[0]);
// ASSERT_EQ(1UL, fill_zero.outputs_.size()); // ASSERT_EQ(1UL, fill_zero.outputs_.size());
// ASSERT_EQ("Z" + f::kZeroVarSuffix, fill_zero.outputs_[0]); // ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.outputs_[0]);
// //
// auto &d_many_out = *net->ops_[1]; // auto &d_many_out = *net->ops_[1];
// ASSERT_EQ("many_output_op_grad", d_many_out.type_); // ASSERT_EQ("many_output_op_grad", d_many_out.type_);
// ASSERT_EQ(1UL + 2UL + 2UL, d_many_out.inputs_.size()); // I/O/OG // ASSERT_EQ(1UL + 2UL + 2UL, d_many_out.inputs_.size()); // I/O/OG
// ASSERT_EQ("Z" + f::kZeroVarSuffix, d_many_out.Input("z" + // ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix,
// f::kGradVarSuffix)); // d_many_out.Input(f::GradVarName("z")));
// ASSERT_EQ("Y" + f::kGradVarSuffix, d_many_out.Input("y" + // ASSERT_EQ(f::GradVarName("Y"), d_many_out.Input(f::GradVarName("y")));
// f::kGradVarSuffix)); // ASSERT_EQ(f::GradVarName("X"), d_many_out.Output(f::GradVarName("x")));
// ASSERT_EQ("X" + f::kGradVarSuffix,
// d_many_out.Output("x" + f::kGradVarSuffix));
//} //}
// //
// TEST(Backward, op_part_of_input_are_not_need) { // TEST(Backward, op_part_of_input_are_not_need) {
...@@ -342,11 +336,9 @@ TEST(Backward, need_to_be_removed) {} ...@@ -342,11 +336,9 @@ TEST(Backward, need_to_be_removed) {}
// ASSERT_EQ(grad_mul.type_, "mul_grad"); // ASSERT_EQ(grad_mul.type_, "mul_grad");
// ASSERT_EQ(grad_mul.inputs_.size(), 2UL + 1UL + 1UL); // ASSERT_EQ(grad_mul.inputs_.size(), 2UL + 1UL + 1UL);
// ASSERT_EQ(grad_mul.outputs_.size(), 2UL); // ASSERT_EQ(grad_mul.outputs_.size(), 2UL);
// ASSERT_EQ(grad_mul.Output("A" + f::kGradVarSuffix), f::kEmptyVarName); // ASSERT_EQ(grad_mul.Output(f::GradVarName("A")), f::kEmptyVarName);
// ASSERT_EQ(grad_mul.Output("B" + f::kGradVarSuffix), "b" + // ASSERT_EQ(grad_mul.Output(f::GradVarName("B")), f::GradVarName("b"));
// f::kGradVarSuffix); // ASSERT_EQ(grad_mul.Input(f::GradVarName("Out")), f::GradVarName("out"));
// ASSERT_EQ(grad_mul.Input("Out" + f::kGradVarSuffix),
// "out" + f::kGradVarSuffix);
// ASSERT_EQ(grad_mul.Input("A"), "a"); // ASSERT_EQ(grad_mul.Input("A"), "a");
// ASSERT_EQ(grad_mul.Input("B"), "b"); // ASSERT_EQ(grad_mul.Input("B"), "b");
// ASSERT_EQ(grad_mul.Input("Out"), "out"); // ASSERT_EQ(grad_mul.Input("Out"), "out");
......
...@@ -85,21 +85,19 @@ TEST(GradOpBuilder, MutiInOut) { ...@@ -85,21 +85,19 @@ TEST(GradOpBuilder, MutiInOut) {
EXPECT_EQ(grad_test_op->Input("Out1"), "out1"); EXPECT_EQ(grad_test_op->Input("Out1"), "out1");
EXPECT_EQ(grad_test_op->Inputs("Out2_mult"), EXPECT_EQ(grad_test_op->Inputs("Out2_mult"),
std::vector<std::string>({"out2_1", "out2_2"})); std::vector<std::string>({"out2_1", "out2_2"}));
EXPECT_EQ(grad_test_op->Input("Out1" + f::kGradVarSuffix), EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out1")),
"out1" + f::kGradVarSuffix); f::GradVarName("out1"));
EXPECT_EQ(grad_test_op->Inputs("Out2_mult" + f::kGradVarSuffix), EXPECT_EQ(grad_test_op->Inputs(f::GradVarName("Out2_mult")),
std::vector<std::string>( std::vector<std::string>(
{"out2_1" + f::kGradVarSuffix, "out2_2" + f::kGradVarSuffix})); {f::GradVarName("out2_1"), f::GradVarName("out2_2")}));
ASSERT_EQ(grad_test_op->outputs_.size(), 5UL); ASSERT_EQ(grad_test_op->outputs_.size(), 5UL);
EXPECT_EQ(grad_test_op->Output("In1" + f::kGradVarSuffix), EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1"));
"in1" + f::kGradVarSuffix); EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")),
EXPECT_EQ(grad_test_op->Outputs("In2_mult" + f::kGradVarSuffix), std::vector<std::string>({f::GradVarName("in2_1"),
std::vector<std::string>({"in2_1" + f::kGradVarSuffix, f::GradVarName("in2_2"),
"in2_2" + f::kGradVarSuffix, f::GradVarName("in2_3")}));
"in2_3" + f::kGradVarSuffix})); EXPECT_EQ(grad_test_op->Output(f::GradVarName("In3")), f::GradVarName("in3"));
EXPECT_EQ(grad_test_op->Output("In3" + f::kGradVarSuffix),
"in3" + f::kGradVarSuffix);
} }
TEST(GradOpBuilder, IOIgnoredInGradient) { TEST(GradOpBuilder, IOIgnoredInGradient) {
...@@ -123,19 +121,18 @@ TEST(GradOpBuilder, IOIgnoredInGradient) { ...@@ -123,19 +121,18 @@ TEST(GradOpBuilder, IOIgnoredInGradient) {
EXPECT_EQ(grad_test_op->Inputs("Out1_mult"), EXPECT_EQ(grad_test_op->Inputs("Out1_mult"),
std::vector<std::string>({"out1_1", "out1_2"})); std::vector<std::string>({"out1_1", "out1_2"}));
EXPECT_EQ(grad_test_op->Input("Out2"), f::kEmptyVarName); EXPECT_EQ(grad_test_op->Input("Out2"), f::kEmptyVarName);
EXPECT_EQ(grad_test_op->Inputs("Out1_mult" + f::kGradVarSuffix), EXPECT_EQ(grad_test_op->Inputs(f::GradVarName("Out1_mult")),
std::vector<std::string>( std::vector<std::string>(
{"out1_1" + f::kGradVarSuffix, "out1_2" + f::kGradVarSuffix})); {f::GradVarName("out1_1"), f::GradVarName("out1_2")}));
EXPECT_EQ(grad_test_op->Input("Out2" + f::kGradVarSuffix), EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out2")),
"out2" + f::kGradVarSuffix); f::GradVarName("out2"));
ASSERT_EQ(grad_test_op->outputs_.size(), 5UL); ASSERT_EQ(grad_test_op->outputs_.size(), 5UL);
EXPECT_EQ(grad_test_op->Output("In1" + f::kGradVarSuffix), EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1"));
"in1" + f::kGradVarSuffix); EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")),
EXPECT_EQ(grad_test_op->Outputs("In2_mult" + f::kGradVarSuffix),
std::vector<std::string>( std::vector<std::string>(
{"in2_1" + f::kGradVarSuffix, "in2_2" + f::kGradVarSuffix})); {f::GradVarName("in2_1"), f::GradVarName("in2_2")}));
EXPECT_EQ(grad_test_op->Outputs("In3_mult" + f::kGradVarSuffix), EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In3_mult")),
std::vector<std::string>( std::vector<std::string>(
{"in3_1" + f::kGradVarSuffix, "in3_2" + f::kGradVarSuffix})); {f::GradVarName("in3_1"), f::GradVarName("in3_2")}));
} }
...@@ -32,19 +32,19 @@ namespace paddle { ...@@ -32,19 +32,19 @@ namespace paddle {
namespace framework { namespace framework {
/// If a variable is a empty variable, that name will be used. /// If a variable is a empty variable, that name will be used.
const std::string kEmptyVarName = "@EMPTY@"; constexpr char kEmptyVarName[] = "@EMPTY@";
/// If a variable is a temporary variable, that name will be set in Python, /// If a variable is a temporary variable, that name will be set in Python,
/// but it will be convert to a unique name in scope after OpCreator. /// but it will be convert to a unique name in scope after OpCreator.
const std::string kTempVarName = "@TEMP@"; constexpr char kTempVarName[] = "@TEMP@";
/// If a variable's name has a certain suffix, it means that the /// If a variable's name has a certain suffix, it means that the
/// variable is the gradient of another varibale. /// variable is the gradient of another varibale.
/// e.g. Variable "x@GRAD" is the gradient of varibale "x". /// e.g. Variable "x@GRAD" is the gradient of varibale "x".
const std::string kGradVarSuffix = "@GRAD"; constexpr char kGradVarSuffix[] = "@GRAD";
/// Variables with this suffix are supposed to be filled up with zeros. /// Variables with this suffix are supposed to be filled up with zeros.
const std::string kZeroVarSuffix = "@ZERO"; constexpr char kZeroVarSuffix[] = "@ZERO";
inline std::string GradVarName(const std::string& var_name) { inline std::string GradVarName(const std::string& var_name) {
return var_name + kGradVarSuffix; return var_name + kGradVarSuffix;
......
...@@ -22,6 +22,7 @@ limitations under the License. */ ...@@ -22,6 +22,7 @@ limitations under the License. */
#include "paddle/operators/net_op.h" #include "paddle/operators/net_op.h"
#include "paddle/platform/enforce.h" #include "paddle/platform/enforce.h"
#include "paddle/platform/place.h" #include "paddle/platform/place.h"
#include "paddle/string/to_string.h"
#include "pybind11/numpy.h" #include "pybind11/numpy.h"
#include "pybind11/pybind11.h" #include "pybind11/pybind11.h"
#include "pybind11/stl.h" #include "pybind11/stl.h"
...@@ -186,9 +187,13 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -186,9 +187,13 @@ All parameter, weight, gradient are variables in Paddle.
}); });
// clang-format on // clang-format on
py::class_<paddle::platform::GPUPlace>(m, "GPUPlace").def(py::init<int>()); py::class_<platform::GPUPlace>(m, "GPUPlace")
.def(py::init<int>())
.def("__str__", string::to_string<const platform::GPUPlace &>);
py::class_<paddle::platform::CPUPlace>(m, "CPUPlace").def(py::init<>()); py::class_<paddle::platform::CPUPlace>(m, "CPUPlace")
.def(py::init<>())
.def("__str__", string::to_string<const platform::CPUPlace &>);
py::class_<OperatorBase, std::shared_ptr<OperatorBase>> operator_base( py::class_<OperatorBase, std::shared_ptr<OperatorBase>> operator_base(
m, "Operator"); m, "Operator");
......
...@@ -45,10 +45,8 @@ cc_library(net_op SRCS net_op.cc DEPS op_registry) ...@@ -45,10 +45,8 @@ cc_library(net_op SRCS net_op.cc DEPS op_registry)
cc_test(net_op_test SRCS net_op_test.cc DEPS net_op) cc_test(net_op_test SRCS net_op_test.cc DEPS net_op)
op_library(add_op SRCS add_op.cc add_op.cu) op_library(add_op SRCS add_op.cc add_op.cu)
cc_test(add_op_test SRCS add_op_test.cc DEPS add_op)
op_library(mean_op SRCS mean_op.cc mean_op.cu) op_library(mean_op SRCS mean_op.cc mean_op.cu)
cc_test(mean_op_test SRCS mean_op_test.cc DEPS mean_op)
op_library(mul_op SRCS mul_op.cc mul_op.cu) op_library(mul_op SRCS mul_op.cc mul_op.cu)
op_library(rowwise_add_op SRCS rowwise_add_op.cu rowwise_add_op.cc) op_library(rowwise_add_op SRCS rowwise_add_op.cu rowwise_add_op.cc)
...@@ -59,7 +57,6 @@ op_library(cross_entropy_op SRCS cross_entropy_op.cc cross_entropy_op.cu) ...@@ -59,7 +57,6 @@ op_library(cross_entropy_op SRCS cross_entropy_op.cc cross_entropy_op.cu)
op_library(fill_zeros_like_op SRCS fill_zeros_like_op.cc fill_zeros_like_op.cu) op_library(fill_zeros_like_op SRCS fill_zeros_like_op.cc fill_zeros_like_op.cu)
op_library(sgd_op SRCS sgd_op.cc sgd_op.cu) op_library(sgd_op SRCS sgd_op.cc sgd_op.cu)
cc_test(sgd_op_test SRCS sgd_op_test.cc DEPS sgd_op)
op_library(fc_op op_library(fc_op
SRCS fc_op.cc SRCS fc_op.cc
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#define private public
#include "paddle/framework/op_registry.h"
USE_OP(add_two);
TEST(AddOp, GetOpProto) {
auto& protos = paddle::framework::OpRegistry::protos();
auto it = protos.find("add_two");
ASSERT_NE(it, protos.end());
auto& op_creators = paddle::framework::OpRegistry::op_creators();
auto it1 = op_creators.find("add_two_grad");
ASSERT_NE(it1, op_creators.end());
}
...@@ -39,7 +39,7 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -39,7 +39,7 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker {
class MeanGradOp : public framework::OperatorWithKernel { class MeanGradOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
ctx.Output<Tensor>("X" + framework::kGradVarSuffix) ctx.Output<Tensor>(framework::GradVarName("X"))
->Resize(ctx.Input<Tensor>("X")->dims()); ->Resize(ctx.Input<Tensor>("X")->dims());
} }
}; };
......
...@@ -48,10 +48,10 @@ template <typename Place, typename T> ...@@ -48,10 +48,10 @@ template <typename Place, typename T>
class MeanGradKernel : public framework::OpKernel { class MeanGradKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto OG = context.Input<Tensor>("Out" + framework::kGradVarSuffix); auto OG = context.Input<Tensor>(framework::GradVarName("Out"));
PADDLE_ENFORCE(framework::product(OG->dims()) == 1, PADDLE_ENFORCE(framework::product(OG->dims()) == 1,
"Mean Gradient should be scalar"); "Mean Gradient should be scalar");
auto IG = context.Output<Tensor>("X" + framework::kGradVarSuffix); auto IG = context.Output<Tensor>(framework::GradVarName("X"));
IG->mutable_data<T>(context.GetPlace()); IG->mutable_data<T>(context.GetPlace());
T ig_size = (T)framework::product(IG->dims()); T ig_size = (T)framework::product(IG->dims());
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <paddle/framework/op_registry.h>
USE_OP(mean);
TEST(MeanOp, GetOpProto) {
auto& protos = paddle::framework::OpRegistry::protos();
auto it = protos.find("mean");
ASSERT_NE(it, protos.end());
}
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <paddle/framework/op_registry.h>
USE_OP(sgd);
TEST(SGDOp, GetOpProto) {
auto& protos = paddle::framework::OpRegistry::protos();
auto it = protos.find("sgd");
ASSERT_NE(it, protos.end());
}
...@@ -15,11 +15,12 @@ limitations under the License. */ ...@@ -15,11 +15,12 @@ limitations under the License. */
#pragma once #pragma once
#include <execinfo.h> #include <execinfo.h>
#include <paddle/string/printf.h>
#include <iomanip> #include <iomanip>
#include <sstream> #include <sstream>
#include <stdexcept> #include <stdexcept>
#include <string> #include <string>
#include "paddle/string/printf.h"
#include "paddle/string/to_string.h"
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
...@@ -191,27 +192,11 @@ inline void throw_on_error(T e) { ...@@ -191,27 +192,11 @@ inline void throw_on_error(T e) {
PADDLE_ENFORCE(nullptr != (__VAL), #__VAL " should not be null\n%s", \ PADDLE_ENFORCE(nullptr != (__VAL), #__VAL " should not be null\n%s", \
paddle::string::Sprintf("" __VA_ARGS__)); paddle::string::Sprintf("" __VA_ARGS__));
template <typename T>
inline std::string enforce_to_string(const T& val) {
std::ostringstream sout;
sout << val;
return sout.str();
}
template <>
inline std::string enforce_to_string(const std::string& val) {
return val;
}
template <>
inline std::string enforce_to_string(const char* const& val) {
return std::string(val);
}
#define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \ #define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \
PADDLE_ENFORCE(__VAL0 __CMP __VAL1, \ PADDLE_ENFORCE(__VAL0 __CMP __VAL1, \
"enforce %s " #__CMP " %s failed, %s " #__INV_CMP " %s\n%s", \ "enforce %s " #__CMP " %s failed, %s " #__INV_CMP " %s\n%s", \
#__VAL0, #__VAL1, \ #__VAL0, #__VAL1, paddle::string::to_string(__VAL0), \
paddle::platform::enforce_to_string(__VAL0), \ paddle::string::to_string(__VAL1), \
paddle::platform::enforce_to_string(__VAL1), \
paddle::string::Sprintf("" __VA_ARGS__)); paddle::string::Sprintf("" __VA_ARGS__));
} // namespace platform } // namespace platform
......
...@@ -9,6 +9,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -9,6 +9,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <array>
#include <iostream>
#include <memory> #include <memory>
#include "gtest/gtest.h" #include "gtest/gtest.h"
...@@ -83,7 +85,7 @@ TEST(ENFORCE_NE, FAIL) { ...@@ -83,7 +85,7 @@ TEST(ENFORCE_NE, FAIL) {
} catch (paddle::platform::EnforceNotMet error) { } catch (paddle::platform::EnforceNotMet error) {
caught_exception = true; caught_exception = true;
EXPECT_TRUE(HasPrefix(StringPiece(error.what()), EXPECT_TRUE(HasPrefix(StringPiece(error.what()),
"enforce 1.0 != 1UL failed, 1.000000 == 1")) "enforce 1.0 != 1UL failed, 1 == 1"))
<< error.what() << " does not have expected prefix"; << error.what() << " does not have expected prefix";
} }
EXPECT_TRUE(caught_exception); EXPECT_TRUE(caught_exception);
...@@ -176,3 +178,39 @@ TEST(ENFORCE_NOT_NULL, FAIL) { ...@@ -176,3 +178,39 @@ TEST(ENFORCE_NOT_NULL, FAIL) {
} }
EXPECT_TRUE(caught_exception); EXPECT_TRUE(caught_exception);
} }
struct Dims {
size_t dims_[4];
bool operator==(const Dims& o) const {
for (size_t i = 0; i < 4; ++i) {
if (dims_[i] != o.dims_[i]) return false;
}
return true;
}
};
std::ostream& operator<<(std::ostream& os, const Dims& d) {
for (size_t i = 0; i < 4; ++i) {
if (i == 0) {
os << "[";
}
os << d.dims_[i];
if (i == 4 - 1) {
os << "]";
} else {
os << ", ";
}
}
return os;
}
TEST(ENFORCE_USER_DEFINED_CLASS, EQ) {
Dims a{{1, 2, 3, 4}}, b{{1, 2, 3, 4}};
PADDLE_ENFORCE_EQ(a, b);
}
TEST(ENFORCE_USER_DEFINED_CLASS, NE) {
Dims a{{1, 2, 3, 4}}, b{{5, 6, 7, 8}};
ASSERT_THROW(PADDLE_ENFORCE_EQ(a, b), paddle::platform::EnforceNotMet);
}
\ No newline at end of file
...@@ -2,3 +2,4 @@ cc_library(stringpiece SRCS piece.cc) ...@@ -2,3 +2,4 @@ cc_library(stringpiece SRCS piece.cc)
cc_test(stringpiece_test SRCS piece_test.cc DEPS stringpiece glog gflags) cc_test(stringpiece_test SRCS piece_test.cc DEPS stringpiece glog gflags)
cc_test(stringprintf_test SRCS printf_test.cc DEPS glog gflags) cc_test(stringprintf_test SRCS printf_test.cc DEPS glog gflags)
cc_test(to_string_test SRCS to_string_test.cc)
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <sstream>
#include <string>
namespace paddle {
namespace string {
template <typename T>
inline std::string to_string(T v) {
std::ostringstream sout;
sout << v;
return sout.str();
}
// Faster std::string/const char* type
template <>
inline std::string to_string(std::string v) {
return v;
}
template <>
inline std::string to_string(const char* v) {
return std::string(v);
}
} // namespace string
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/string/to_string.h"
#include <gtest/gtest.h>
constexpr char kOutputString[] = "User Defined Output";
class UserDefinedClass {
public:
};
std::ostream& operator<<(std::ostream& s, const UserDefinedClass& ins) {
s << kOutputString;
return s;
}
TEST(to_string, normal) {
using namespace paddle::string;
ASSERT_EQ("10", to_string(10));
ASSERT_EQ("abc", to_string("abc"));
ASSERT_EQ("1.2", to_string(1.2));
}
TEST(to_string, user_defined) {
using namespace paddle::string;
UserDefinedClass instance;
ASSERT_EQ(kOutputString, to_string(instance));
}
\ No newline at end of file
...@@ -92,15 +92,27 @@ def get_numeric_gradient(op, ...@@ -92,15 +92,27 @@ def get_numeric_gradient(op,
class GradientChecker(unittest.TestCase): class GradientChecker(unittest.TestCase):
def __is_close(self, numeric_grads, scope, max_relative_error): def assert_is_close(self, numeric_grads, scope, max_relative_error,
msg_prefix):
for name in numeric_grads: for name in numeric_grads:
op_grad = numpy.array( b = numpy.array(scope.find_var(grad_var_name(name)).get_tensor())
scope.find_var(grad_var_name(name)).get_tensor()) a = numeric_grads[name]
is_close = numpy.allclose(
numeric_grads[name], op_grad, rtol=max_relative_error, atol=100) abs_a = numpy.abs(a)
if not is_close: # if abs_a is nearly zero, then use abs error for a, not relative
return False # error.
return True abs_a[abs_a < 1e-3] = 1
diff_mat = numpy.abs(a - b) / abs_a
max_diff = numpy.max(diff_mat)
def err_msg():
offset = numpy.argmax(diff_mat > max_relative_error)
return "%s Variable %s max gradient diff %f over limit %f, the first " \
"error element is %d" % (
msg_prefix, name, max_diff, max_relative_error, offset)
self.assertLessEqual(max_diff, max_relative_error, err_msg())
def check_grad(self, def check_grad(self,
forward_op, forward_op,
...@@ -145,7 +157,8 @@ class GradientChecker(unittest.TestCase): ...@@ -145,7 +157,8 @@ class GradientChecker(unittest.TestCase):
# get numeric gradient # get numeric gradient
for check_name in inputs_to_check: for check_name in inputs_to_check:
numeric_grad[check_name] = \ numeric_grad[check_name] = \
get_numeric_gradient(forward_op, input_vars, output_name, check_name) get_numeric_gradient(forward_op, input_vars, output_name,
check_name)
# get operator gradient according to different device # get operator gradient according to different device
for place in places: for place in places:
...@@ -187,15 +200,8 @@ class GradientChecker(unittest.TestCase): ...@@ -187,15 +200,8 @@ class GradientChecker(unittest.TestCase):
backward_op.infer_shape(scope) backward_op.infer_shape(scope)
backward_op.run(scope, ctx) backward_op.run(scope, ctx)
if isinstance(place, core.CPUPlace): self.assert_is_close(numeric_grad, scope, max_relative_error,
msg = "CPU kernel gradient is not close to numeric gradient" "Gradient Check On %s" % str(place))
else:
if isinstance(place, core.GPUPlace):
msg = "GPU kernel gradient is not close to numeric gradient"
else:
raise ValueError("unknown place " + type(place))
self.assertTrue(
self.__is_close(numeric_grad, scope, max_relative_error), msg)
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册