diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 8f39b79cf729774d9057b3b68d71d2b4907da77a..3e16949c9b0e3557050d2de2b0408b01bcfafb88 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -147,8 +147,9 @@ std::shared_ptr BackwardRecursive( ForEachVarName(grad_op->inputs_, [&no_grad_names, &net](std::string& grad_input) { if (no_grad_names.count(grad_input)) { - std::string prefix = - grad_input.substr(0, grad_input.size() - kGradVarSuffix.size()); + // +1 for \0 + std::string prefix = grad_input.substr( + 0, grad_input.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); grad_input = prefix + kZeroVarSuffix; // If part of input gradient of that operator is not calculated, fill @@ -184,7 +185,7 @@ std::shared_ptr Backward( std::unordered_set no_grad_names; no_grad_names.reserve(no_grad_vars.size()); - no_grad_names.insert(kEmptyVarName + kGradVarSuffix); + no_grad_names.insert(std::string(kEmptyVarName) + kGradVarSuffix); for (auto& name : no_grad_vars) { no_grad_names.insert(name + kGradVarSuffix); diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index cd02469a265a2193bea74764b9e630ab5af694ea..9a38d54acf8b328851c2af6bf858adaffb93d0a1 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -166,21 +166,17 @@ REGISTER_OP(fc, f::FcOp, f::FcOpMaker); REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker); REGISTER_GRADIENT_OP(many_output_op, many_output_op_grad, f::EmptyOp); -TEST(Backward, need_to_be_removed) {} - -// // TEST(Backward, simple_op_grad) { -// auto fwd = f::OpRegistry::CreateOp( -// "rowwise_add", {{"X", {"X"}}, {"b", {"b"}}}, {{"Out", {"Out"}}}, {}); +// auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); // ASSERT_NE(fwd, nullptr); // auto gop = f::OpRegistry::CreateGradOp(*fwd); // ASSERT_EQ(4UL, gop->inputs_.size()); // ASSERT_EQ(f::kEmptyVarName, gop->inputs_[0]); // ASSERT_EQ("rowwise_add_grad", gop->type_); -// ASSERT_EQ("X" + f::kGradVarSuffix, gop->outputs_[0]); -// ASSERT_EQ("b" + f::kGradVarSuffix, gop->outputs_[1]); +// ASSERT_EQ(f::GradVarName("X"), gop->outputs_[0]); +// ASSERT_EQ(f::GradVarName("b"), gop->outputs_[1]); // -// ASSERT_EQ("X" + f::kGradVarSuffix, gop->Output("X" + f::kGradVarSuffix)); +// ASSERT_EQ(f::GradVarName("X"), gop->Output(f::GradVarName("X"))); //} // // TEST(Backward, simple_op_not_need_grad) { @@ -188,7 +184,7 @@ TEST(Backward, need_to_be_removed) {} // ASSERT_NE(fwd, nullptr); // auto gop = f::Backward(*fwd, {"X"}); // ASSERT_EQ(std::find(gop->outputs_.begin(), gop->outputs_.end(), -// "X" + f::kGradVarSuffix), +// f::GradVarName("X")), // gop->outputs_.end()); // // auto no_input_gop = f::Backward(*fwd, {"X", "b"}); @@ -259,18 +255,18 @@ TEST(Backward, need_to_be_removed) {} // all_output.erase(f::kEmptyVarName); // // for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) { -// ASSERT_NE(all_output.find(out + f::kGradVarSuffix), all_output.end()); +// ASSERT_NE(all_output.find(f::GradVarName(out)), all_output.end()); // } // // // Not Generated X -// ASSERT_EQ(all_output.find("X" + f::kGradVarSuffix), all_output.end()); +// ASSERT_EQ(all_output.find(f::GradVarName("X")), all_output.end()); // // ASSERT_EQ(2UL, bwd_net->ops_.size()); // ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp()); // auto first_fc_grad = static_cast(bwd_net->ops_[1].get()); // ASSERT_EQ(3UL, first_fc_grad->ops_.size()); // ASSERT_EQ(f::kEmptyVarName, -// first_fc_grad->ops_[2]->Output("A" + f::kGradVarSuffix)); +// first_fc_grad->ops_[2]->Output(f::GradVarName("A"))); //} // // TEST(Backward, net_shared_weight) { @@ -322,17 +318,15 @@ TEST(Backward, need_to_be_removed) {} // ASSERT_EQ(1UL, fill_zero.inputs_.size()); // ASSERT_EQ("Z", fill_zero.inputs_[0]); // ASSERT_EQ(1UL, fill_zero.outputs_.size()); -// ASSERT_EQ("Z" + f::kZeroVarSuffix, fill_zero.outputs_[0]); +// ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.outputs_[0]); // // auto &d_many_out = *net->ops_[1]; // ASSERT_EQ("many_output_op_grad", d_many_out.type_); // ASSERT_EQ(1UL + 2UL + 2UL, d_many_out.inputs_.size()); // I/O/OG -// ASSERT_EQ("Z" + f::kZeroVarSuffix, d_many_out.Input("z" + -// f::kGradVarSuffix)); -// ASSERT_EQ("Y" + f::kGradVarSuffix, d_many_out.Input("y" + -// f::kGradVarSuffix)); -// ASSERT_EQ("X" + f::kGradVarSuffix, -// d_many_out.Output("x" + f::kGradVarSuffix)); +// ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, +// d_many_out.Input(f::GradVarName("z"))); +// ASSERT_EQ(f::GradVarName("Y"), d_many_out.Input(f::GradVarName("y"))); +// ASSERT_EQ(f::GradVarName("X"), d_many_out.Output(f::GradVarName("x"))); //} // // TEST(Backward, op_part_of_input_are_not_need) { @@ -342,11 +336,9 @@ TEST(Backward, need_to_be_removed) {} // ASSERT_EQ(grad_mul.type_, "mul_grad"); // ASSERT_EQ(grad_mul.inputs_.size(), 2UL + 1UL + 1UL); // ASSERT_EQ(grad_mul.outputs_.size(), 2UL); -// ASSERT_EQ(grad_mul.Output("A" + f::kGradVarSuffix), f::kEmptyVarName); -// ASSERT_EQ(grad_mul.Output("B" + f::kGradVarSuffix), "b" + -// f::kGradVarSuffix); -// ASSERT_EQ(grad_mul.Input("Out" + f::kGradVarSuffix), -// "out" + f::kGradVarSuffix); +// ASSERT_EQ(grad_mul.Output(f::GradVarName("A")), f::kEmptyVarName); +// ASSERT_EQ(grad_mul.Output(f::GradVarName("B")), f::GradVarName("b")); +// ASSERT_EQ(grad_mul.Input(f::GradVarName("Out")), f::GradVarName("out")); // ASSERT_EQ(grad_mul.Input("A"), "a"); // ASSERT_EQ(grad_mul.Input("B"), "b"); // ASSERT_EQ(grad_mul.Input("Out"), "out"); diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index f308abfa79fad1bcdd18918e465cb0b3ff87c05a..1feae415a66673daf8b649abf48f670fc80b9a59 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -85,21 +85,19 @@ TEST(GradOpBuilder, MutiInOut) { EXPECT_EQ(grad_test_op->Input("Out1"), "out1"); EXPECT_EQ(grad_test_op->Inputs("Out2_mult"), std::vector({"out2_1", "out2_2"})); - EXPECT_EQ(grad_test_op->Input("Out1" + f::kGradVarSuffix), - "out1" + f::kGradVarSuffix); - EXPECT_EQ(grad_test_op->Inputs("Out2_mult" + f::kGradVarSuffix), + EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out1")), + f::GradVarName("out1")); + EXPECT_EQ(grad_test_op->Inputs(f::GradVarName("Out2_mult")), std::vector( - {"out2_1" + f::kGradVarSuffix, "out2_2" + f::kGradVarSuffix})); + {f::GradVarName("out2_1"), f::GradVarName("out2_2")})); ASSERT_EQ(grad_test_op->outputs_.size(), 5UL); - EXPECT_EQ(grad_test_op->Output("In1" + f::kGradVarSuffix), - "in1" + f::kGradVarSuffix); - EXPECT_EQ(grad_test_op->Outputs("In2_mult" + f::kGradVarSuffix), - std::vector({"in2_1" + f::kGradVarSuffix, - "in2_2" + f::kGradVarSuffix, - "in2_3" + f::kGradVarSuffix})); - EXPECT_EQ(grad_test_op->Output("In3" + f::kGradVarSuffix), - "in3" + f::kGradVarSuffix); + EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); + EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), + std::vector({f::GradVarName("in2_1"), + f::GradVarName("in2_2"), + f::GradVarName("in2_3")})); + EXPECT_EQ(grad_test_op->Output(f::GradVarName("In3")), f::GradVarName("in3")); } TEST(GradOpBuilder, IOIgnoredInGradient) { @@ -123,19 +121,18 @@ TEST(GradOpBuilder, IOIgnoredInGradient) { EXPECT_EQ(grad_test_op->Inputs("Out1_mult"), std::vector({"out1_1", "out1_2"})); EXPECT_EQ(grad_test_op->Input("Out2"), f::kEmptyVarName); - EXPECT_EQ(grad_test_op->Inputs("Out1_mult" + f::kGradVarSuffix), + EXPECT_EQ(grad_test_op->Inputs(f::GradVarName("Out1_mult")), std::vector( - {"out1_1" + f::kGradVarSuffix, "out1_2" + f::kGradVarSuffix})); - EXPECT_EQ(grad_test_op->Input("Out2" + f::kGradVarSuffix), - "out2" + f::kGradVarSuffix); + {f::GradVarName("out1_1"), f::GradVarName("out1_2")})); + EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out2")), + f::GradVarName("out2")); ASSERT_EQ(grad_test_op->outputs_.size(), 5UL); - EXPECT_EQ(grad_test_op->Output("In1" + f::kGradVarSuffix), - "in1" + f::kGradVarSuffix); - EXPECT_EQ(grad_test_op->Outputs("In2_mult" + f::kGradVarSuffix), + EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); + EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), std::vector( - {"in2_1" + f::kGradVarSuffix, "in2_2" + f::kGradVarSuffix})); - EXPECT_EQ(grad_test_op->Outputs("In3_mult" + f::kGradVarSuffix), + {f::GradVarName("in2_1"), f::GradVarName("in2_2")})); + EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In3_mult")), std::vector( - {"in3_1" + f::kGradVarSuffix, "in3_2" + f::kGradVarSuffix})); + {f::GradVarName("in3_1"), f::GradVarName("in3_2")})); } diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 15b1c73676143e65cb15cf6fb3fcbd23e7ebf027..fc5db7ce286d5ebe70b2de734f2fac454f1a0794 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -32,19 +32,19 @@ namespace paddle { namespace framework { /// If a variable is a empty variable, that name will be used. -const std::string kEmptyVarName = "@EMPTY@"; +constexpr char kEmptyVarName[] = "@EMPTY@"; /// If a variable is a temporary variable, that name will be set in Python, /// but it will be convert to a unique name in scope after OpCreator. -const std::string kTempVarName = "@TEMP@"; +constexpr char kTempVarName[] = "@TEMP@"; /// If a variable's name has a certain suffix, it means that the /// variable is the gradient of another varibale. /// e.g. Variable "x@GRAD" is the gradient of varibale "x". -const std::string kGradVarSuffix = "@GRAD"; +constexpr char kGradVarSuffix[] = "@GRAD"; /// Variables with this suffix are supposed to be filled up with zeros. -const std::string kZeroVarSuffix = "@ZERO"; +constexpr char kZeroVarSuffix[] = "@ZERO"; inline std::string GradVarName(const std::string& var_name) { return var_name + kGradVarSuffix; diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index 3b3f33c8e212e9005beccdedf1d6b33e8c4a45ce..94d2a4c68e1a5c9426a6f0a6e3eb07a9a7f27ddb 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -22,6 +22,7 @@ limitations under the License. */ #include "paddle/operators/net_op.h" #include "paddle/platform/enforce.h" #include "paddle/platform/place.h" +#include "paddle/string/to_string.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" @@ -186,9 +187,13 @@ All parameter, weight, gradient are variables in Paddle. }); // clang-format on - py::class_(m, "GPUPlace").def(py::init()); + py::class_(m, "GPUPlace") + .def(py::init()) + .def("__str__", string::to_string); - py::class_(m, "CPUPlace").def(py::init<>()); + py::class_(m, "CPUPlace") + .def(py::init<>()) + .def("__str__", string::to_string); py::class_> operator_base( m, "Operator"); diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 3ce18da887a1bf850b2e5c82b4c0040ac1eb3a1f..1fd51219b169516d9fb99c1ecf1c2119f2c8c358 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -45,10 +45,8 @@ cc_library(net_op SRCS net_op.cc DEPS op_registry) cc_test(net_op_test SRCS net_op_test.cc DEPS net_op) op_library(add_op SRCS add_op.cc add_op.cu) -cc_test(add_op_test SRCS add_op_test.cc DEPS add_op) op_library(mean_op SRCS mean_op.cc mean_op.cu) -cc_test(mean_op_test SRCS mean_op_test.cc DEPS mean_op) op_library(mul_op SRCS mul_op.cc mul_op.cu) op_library(rowwise_add_op SRCS rowwise_add_op.cu rowwise_add_op.cc) @@ -59,7 +57,6 @@ op_library(cross_entropy_op SRCS cross_entropy_op.cc cross_entropy_op.cu) op_library(fill_zeros_like_op SRCS fill_zeros_like_op.cc fill_zeros_like_op.cu) op_library(sgd_op SRCS sgd_op.cc sgd_op.cu) -cc_test(sgd_op_test SRCS sgd_op_test.cc DEPS sgd_op) op_library(fc_op SRCS fc_op.cc diff --git a/paddle/operators/add_op_test.cc b/paddle/operators/add_op_test.cc deleted file mode 100644 index bf529defb20d27200a28666278db8607b986e2d5..0000000000000000000000000000000000000000 --- a/paddle/operators/add_op_test.cc +++ /dev/null @@ -1,28 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#define private public -#include "paddle/framework/op_registry.h" - -USE_OP(add_two); - -TEST(AddOp, GetOpProto) { - auto& protos = paddle::framework::OpRegistry::protos(); - auto it = protos.find("add_two"); - ASSERT_NE(it, protos.end()); - auto& op_creators = paddle::framework::OpRegistry::op_creators(); - auto it1 = op_creators.find("add_two_grad"); - ASSERT_NE(it1, op_creators.end()); -} diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 2121faf15b9fcce5efd9fa8d63dbc298bb2ed527..2787ac46b7fabae1aa78f1a08fa71dbc39c0b082 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -39,7 +39,7 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker { class MeanGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - ctx.Output("X" + framework::kGradVarSuffix) + ctx.Output(framework::GradVarName("X")) ->Resize(ctx.Input("X")->dims()); } }; diff --git a/paddle/operators/mean_op.h b/paddle/operators/mean_op.h index f3db0a29bb234948d180d964fb82057632ec4414..e8595a14faa7c1b03734f814c78f9cbf1819fbb5 100644 --- a/paddle/operators/mean_op.h +++ b/paddle/operators/mean_op.h @@ -48,10 +48,10 @@ template class MeanGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto OG = context.Input("Out" + framework::kGradVarSuffix); + auto OG = context.Input(framework::GradVarName("Out")); PADDLE_ENFORCE(framework::product(OG->dims()) == 1, "Mean Gradient should be scalar"); - auto IG = context.Output("X" + framework::kGradVarSuffix); + auto IG = context.Output(framework::GradVarName("X")); IG->mutable_data(context.GetPlace()); T ig_size = (T)framework::product(IG->dims()); diff --git a/paddle/operators/mean_op_test.cc b/paddle/operators/mean_op_test.cc deleted file mode 100644 index 375dcd50e130355c60f82b9d39d1b94fb2c911b0..0000000000000000000000000000000000000000 --- a/paddle/operators/mean_op_test.cc +++ /dev/null @@ -1,25 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include - -#include - -USE_OP(mean); - -TEST(MeanOp, GetOpProto) { - auto& protos = paddle::framework::OpRegistry::protos(); - auto it = protos.find("mean"); - ASSERT_NE(it, protos.end()); -} diff --git a/paddle/operators/sgd_op_test.cc b/paddle/operators/sgd_op_test.cc deleted file mode 100644 index 75137259f5e608b259b073101353e5818bb17c92..0000000000000000000000000000000000000000 --- a/paddle/operators/sgd_op_test.cc +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include -USE_OP(sgd); -TEST(SGDOp, GetOpProto) { - auto& protos = paddle::framework::OpRegistry::protos(); - auto it = protos.find("sgd"); - ASSERT_NE(it, protos.end()); -} diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index 46293e6c0bb318ec1dadc76a0213ddfdbb8bcd70..337a059fb1494d500be0fd2437e59c863ae1563c 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -15,11 +15,12 @@ limitations under the License. */ #pragma once #include -#include #include #include #include #include +#include "paddle/string/printf.h" +#include "paddle/string/to_string.h" #ifndef PADDLE_ONLY_CPU @@ -191,27 +192,11 @@ inline void throw_on_error(T e) { PADDLE_ENFORCE(nullptr != (__VAL), #__VAL " should not be null\n%s", \ paddle::string::Sprintf("" __VA_ARGS__)); -template -inline std::string enforce_to_string(const T& val) { - std::ostringstream sout; - sout << val; - return sout.str(); -} -template <> -inline std::string enforce_to_string(const std::string& val) { - return val; -} -template <> -inline std::string enforce_to_string(const char* const& val) { - return std::string(val); -} - #define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \ PADDLE_ENFORCE(__VAL0 __CMP __VAL1, \ "enforce %s " #__CMP " %s failed, %s " #__INV_CMP " %s\n%s", \ - #__VAL0, #__VAL1, \ - paddle::platform::enforce_to_string(__VAL0), \ - paddle::platform::enforce_to_string(__VAL1), \ + #__VAL0, #__VAL1, paddle::string::to_string(__VAL0), \ + paddle::string::to_string(__VAL1), \ paddle::string::Sprintf("" __VA_ARGS__)); } // namespace platform diff --git a/paddle/platform/enforce_test.cc b/paddle/platform/enforce_test.cc index 5408fce558ab0d9c369aaba22374315fe553ce0f..80bdee3d9dfbe38ef707a6ba60cdb7f7b99714de 100644 --- a/paddle/platform/enforce_test.cc +++ b/paddle/platform/enforce_test.cc @@ -9,6 +9,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include #include #include "gtest/gtest.h" @@ -83,7 +85,7 @@ TEST(ENFORCE_NE, FAIL) { } catch (paddle::platform::EnforceNotMet error) { caught_exception = true; EXPECT_TRUE(HasPrefix(StringPiece(error.what()), - "enforce 1.0 != 1UL failed, 1.000000 == 1")) + "enforce 1.0 != 1UL failed, 1 == 1")) << error.what() << " does not have expected prefix"; } EXPECT_TRUE(caught_exception); @@ -176,3 +178,39 @@ TEST(ENFORCE_NOT_NULL, FAIL) { } EXPECT_TRUE(caught_exception); } + +struct Dims { + size_t dims_[4]; + + bool operator==(const Dims& o) const { + for (size_t i = 0; i < 4; ++i) { + if (dims_[i] != o.dims_[i]) return false; + } + return true; + } +}; + +std::ostream& operator<<(std::ostream& os, const Dims& d) { + for (size_t i = 0; i < 4; ++i) { + if (i == 0) { + os << "["; + } + os << d.dims_[i]; + if (i == 4 - 1) { + os << "]"; + } else { + os << ", "; + } + } + return os; +} + +TEST(ENFORCE_USER_DEFINED_CLASS, EQ) { + Dims a{{1, 2, 3, 4}}, b{{1, 2, 3, 4}}; + PADDLE_ENFORCE_EQ(a, b); +} + +TEST(ENFORCE_USER_DEFINED_CLASS, NE) { + Dims a{{1, 2, 3, 4}}, b{{5, 6, 7, 8}}; + ASSERT_THROW(PADDLE_ENFORCE_EQ(a, b), paddle::platform::EnforceNotMet); +} \ No newline at end of file diff --git a/paddle/string/CMakeLists.txt b/paddle/string/CMakeLists.txt index 5becf62672d0c606c98ea1a1a4383df97088ab05..60667b72873f9422aec1807972a81ab680de2e64 100644 --- a/paddle/string/CMakeLists.txt +++ b/paddle/string/CMakeLists.txt @@ -2,3 +2,4 @@ cc_library(stringpiece SRCS piece.cc) cc_test(stringpiece_test SRCS piece_test.cc DEPS stringpiece glog gflags) cc_test(stringprintf_test SRCS printf_test.cc DEPS glog gflags) +cc_test(to_string_test SRCS to_string_test.cc) diff --git a/paddle/string/to_string.h b/paddle/string/to_string.h new file mode 100644 index 0000000000000000000000000000000000000000..4f478b6a36b23bdba8ef3ddae94b3eadf18716c2 --- /dev/null +++ b/paddle/string/to_string.h @@ -0,0 +1,40 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include +#include + +namespace paddle { +namespace string { +template +inline std::string to_string(T v) { + std::ostringstream sout; + sout << v; + return sout.str(); +} + +// Faster std::string/const char* type +template <> +inline std::string to_string(std::string v) { + return v; +} + +template <> +inline std::string to_string(const char* v) { + return std::string(v); +} + +} // namespace string +} // namespace paddle diff --git a/paddle/string/to_string_test.cc b/paddle/string/to_string_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..5ff1b007f1875c7b920a08bd13b8d98cdc5138d3 --- /dev/null +++ b/paddle/string/to_string_test.cc @@ -0,0 +1,39 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/string/to_string.h" +#include + +constexpr char kOutputString[] = "User Defined Output"; +class UserDefinedClass { +public: +}; + +std::ostream& operator<<(std::ostream& s, const UserDefinedClass& ins) { + s << kOutputString; + return s; +} + +TEST(to_string, normal) { + using namespace paddle::string; + ASSERT_EQ("10", to_string(10)); + ASSERT_EQ("abc", to_string("abc")); + ASSERT_EQ("1.2", to_string(1.2)); +} + +TEST(to_string, user_defined) { + using namespace paddle::string; + UserDefinedClass instance; + ASSERT_EQ(kOutputString, to_string(instance)); +} \ No newline at end of file diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py index b73c4869d14a62a951d8e45dafb14b7523355519..aacc5e88feeb65e08093a35ef85837c916cfd39e 100644 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ b/python/paddle/v2/framework/tests/gradient_checker.py @@ -92,15 +92,27 @@ def get_numeric_gradient(op, class GradientChecker(unittest.TestCase): - def __is_close(self, numeric_grads, scope, max_relative_error): + def assert_is_close(self, numeric_grads, scope, max_relative_error, + msg_prefix): for name in numeric_grads: - op_grad = numpy.array( - scope.find_var(grad_var_name(name)).get_tensor()) - is_close = numpy.allclose( - numeric_grads[name], op_grad, rtol=max_relative_error, atol=100) - if not is_close: - return False - return True + b = numpy.array(scope.find_var(grad_var_name(name)).get_tensor()) + a = numeric_grads[name] + + abs_a = numpy.abs(a) + # if abs_a is nearly zero, then use abs error for a, not relative + # error. + abs_a[abs_a < 1e-3] = 1 + + diff_mat = numpy.abs(a - b) / abs_a + max_diff = numpy.max(diff_mat) + + def err_msg(): + offset = numpy.argmax(diff_mat > max_relative_error) + return "%s Variable %s max gradient diff %f over limit %f, the first " \ + "error element is %d" % ( + msg_prefix, name, max_diff, max_relative_error, offset) + + self.assertLessEqual(max_diff, max_relative_error, err_msg()) def check_grad(self, forward_op, @@ -145,7 +157,8 @@ class GradientChecker(unittest.TestCase): # get numeric gradient for check_name in inputs_to_check: numeric_grad[check_name] = \ - get_numeric_gradient(forward_op, input_vars, output_name, check_name) + get_numeric_gradient(forward_op, input_vars, output_name, + check_name) # get operator gradient according to different device for place in places: @@ -187,15 +200,8 @@ class GradientChecker(unittest.TestCase): backward_op.infer_shape(scope) backward_op.run(scope, ctx) - if isinstance(place, core.CPUPlace): - msg = "CPU kernel gradient is not close to numeric gradient" - else: - if isinstance(place, core.GPUPlace): - msg = "GPU kernel gradient is not close to numeric gradient" - else: - raise ValueError("unknown place " + type(place)) - self.assertTrue( - self.__is_close(numeric_grad, scope, max_relative_error), msg) + self.assert_is_close(numeric_grad, scope, max_relative_error, + "Gradient Check On %s" % str(place)) if __name__ == '__main__':