From 3fc68f6f14afb3a0e1aadb0c06d787c2d2b75a66 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 2 Aug 2017 19:37:20 +0800 Subject: [PATCH] Move pybind.cc/tensor_bind.h to paddle::framework Fix #3171 --- paddle/CMakeLists.txt | 1 - paddle/framework/CMakeLists.txt | 9 ++ paddle/{pybind => framework}/pybind.cc | 96 ++++++++++------------ paddle/framework/tensor.h | 12 ++- paddle/{pybind => framework}/tensor_bind.h | 9 +- paddle/pybind/CMakeLists.txt | 9 -- 6 files changed, 61 insertions(+), 75 deletions(-) rename paddle/{pybind => framework}/pybind.cc (60%) rename paddle/{pybind => framework}/tensor_bind.h (93%) delete mode 100644 paddle/pybind/CMakeLists.txt diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 4b06966fba2..f8a88cf317a 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -15,7 +15,6 @@ if(Boost_FOUND) add_subdirectory(platform) add_subdirectory(framework) add_subdirectory(operators) - add_subdirectory(pybind) endif() if(WITH_C_API) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 12a3a00bba3..b73426eaacd 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -36,3 +36,12 @@ cc_test(net_op_test SRCS net_op_test.cc DEPS net) cc_library(backward SRCS backward.cc DEPS net) cc_test(backward_test SRCS backward_test.cc DEPS backward) +cc_library(paddle_pybind SHARED + SRCS pybind.cc + DEPS pybind python + fc_op + sgd_op + add_op + mean_op + cross_entropy_op + recurrent_network_op) diff --git a/paddle/pybind/pybind.cc b/paddle/framework/pybind.cc similarity index 60% rename from paddle/pybind/pybind.cc rename to paddle/framework/pybind.cc index 801ef50e577..a735cc2ad51 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/framework/pybind.cc @@ -20,13 +20,12 @@ limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" -#include "paddle/pybind/tensor_bind.h" +#include "paddle/framework/tensor_bind.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" namespace py = pybind11; -namespace pd = paddle::framework; USE_OP(add_two); USE_OP(onehot_cross_entropy); @@ -38,13 +37,14 @@ USE_OP(sigmoid); USE_OP(softmax); USE_OP(rowwise_add); USE_OP_WITHOUT_KERNEL(recurrent_op); - +namespace paddle { +namespace framework { template -void ExposeOperator(ClassType& m) { +void ExposeOperator(ClassType &m) { m.def("infer_shape", &ClassType::type::InferShape) .def("run", &ClassType::type::Run) .def("outputs", - [](const typename ClassType::type& op) -> std::vector { + [](const typename ClassType::type &op) -> std::vector { return op.outputs_; }) .def("__str__", &ClassType::type::DebugString); @@ -58,68 +58,58 @@ static size_t UniqueIntegerGenerator() { PYBIND11_PLUGIN(core) { py::module m("core", "C++ core of PaddlePaddle"); - py::class_(m, "Tensor", py::buffer_protocol()) - .def_buffer([](pd::Tensor& self) -> py::buffer_info { - return paddle::pybind::CastToPyBuffer(self); - }) + py::class_(m, "Tensor", py::buffer_protocol()) + .def_buffer( + [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); }) .def("get_dims", - [](const pd::Tensor& self) { return pd::vectorize(self.dims()); }) + [](const Tensor &self) { return vectorize(self.dims()); }) .def("set_dims", - [](pd::Tensor& self, const std::vector& dim) { - self.Resize(pd::make_ddim(dim)); + [](Tensor &self, const std::vector &dim) { + self.Resize(make_ddim(dim)); }) .def("alloc_float", - [](pd::Tensor& self) { + [](Tensor &self) { self.mutable_data(paddle::platform::CPUPlace()); }) .def("alloc_int", - [](pd::Tensor& self) { + [](Tensor &self) { self.mutable_data(paddle::platform::CPUPlace()); }) - .def("set", paddle::pybind::PyTensorSetFromArray) - .def("set", paddle::pybind::PyTensorSetFromArray) - .def("shape", - [](pd::Tensor& self) { return pd::vectorize(self.dims()); }); + .def("set", PyTensorSetFromArray) + .def("set", PyTensorSetFromArray) + .def("shape", [](Tensor &self) { return vectorize(self.dims()); }); - py::class_(m, "Variable", R"DOC(Variable Class. + py::class_(m, "Variable", R"DOC(Variable Class. All parameter, weight, gradient are variables in Paddle. )DOC") - .def("is_int", [](const pd::Variable& var) { return var.IsType(); }) + .def("is_int", [](const Variable &var) { return var.IsType(); }) .def("set_int", - [](pd::Variable& var, int val) -> void { - *var.GetMutable() = val; - }) - .def("get_int", - [](const pd::Variable& var) -> int { return var.Get(); }) + [](Variable &var, int val) -> void { *var.GetMutable() = val; }) + .def("get_int", [](const Variable &var) -> int { return var.Get(); }) .def("get_tensor", - [](pd::Variable& self) -> pd::Tensor* { - return self.GetMutable(); - }, + [](Variable &self) -> Tensor * { return self.GetMutable(); }, py::return_value_policy::reference) .def("get_net", - [](pd::Variable& self) -> pd::NetOp* { - return self.GetMutable(); - }, + [](Variable &self) -> NetOp * { return self.GetMutable(); }, py::return_value_policy::reference); - py::class_(m, "Scope", "") + py::class_(m, "Scope", "") .def("new_var", - [](pd::Scope& self, const std::string& name) -> pd::Variable* { + [](Scope &self, const std::string &name) -> Variable * { return self.NewVar(name); }, py::return_value_policy::reference) - .def("find_var", &pd::Scope::FindVar, py::return_value_policy::reference) + .def("find_var", &Scope::FindVar, py::return_value_policy::reference) .def(py::init<>()) - .def("new_scope", - [](pd::Scope& self) -> pd::Scope* { return &self.NewScope(); }, + .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); }, py::return_value_policy::reference) - .def("drop_kids", &pd::Scope::DropKids); + .def("drop_kids", &Scope::DropKids); //! @note: Be careful! PyBind will return std::string as an unicode, not //! Python str. If you want a str object, you should cast them in Python. m.def("get_all_op_protos", []() -> std::vector { - auto& protos = pd::OpRegistry::protos(); + auto &protos = OpRegistry::protos(); std::vector ret_values; for (auto it = protos.begin(); it != protos.end(); ++it) { PADDLE_ENFORCE(it->second.IsInitialized(), @@ -134,47 +124,49 @@ All parameter, weight, gradient are variables in Paddle. m.def_submodule( "var_names", "The module will return special predefined variable name in Paddle") - .def("empty", pd::OperatorBase::EMPTY_VAR_NAME) - .def("temp", pd::OperatorBase::TMP_VAR_NAME); + .def("empty", OperatorBase::EMPTY_VAR_NAME) + .def("temp", OperatorBase::TMP_VAR_NAME); py::class_(m, "DeviceContext") - .def_static("cpu_context", []() -> paddle::platform::DeviceContext* { + .def_static("cpu_context", []() -> paddle::platform::DeviceContext * { return new paddle::platform::CPUDeviceContext(); }); - py::class_> operator_base( + py::class_> operator_base( m, "Operator"); operator_base.def_static("create", [](py::bytes protobin) { - pd::OpDesc desc; + OpDesc desc; PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), "Cannot parse user input to OpDesc"); PADDLE_ENFORCE(desc.IsInitialized(), "User OpDesc is not initialized, reason %s", desc.InitializationErrorString()); - return pd::OpRegistry::CreateOp(desc); + return OpRegistry::CreateOp(desc); }); ExposeOperator(operator_base); - py::class_> net(m, "Net"); + py::class_> net(m, "Net"); net.def_static("create", - []() -> std::shared_ptr { - auto retv = std::make_shared(); + []() -> std::shared_ptr { + auto retv = std::make_shared(); retv->type_ = "plain_net"; return retv; }) - .def("add_op", &pd::NetOp::AddOp) + .def("add_op", &NetOp::AddOp) .def("add_op", - [](pd::NetOp& self, const std::shared_ptr& net) -> void { - self.AddOp(std::static_pointer_cast(net)); + [](NetOp &self, const std::shared_ptr &net) -> void { + self.AddOp(std::static_pointer_cast(net)); }) - .def("complete_add_op", &pd::NetOp::CompleteAddOp) + .def("complete_add_op", &NetOp::CompleteAddOp) .def("complete_add_op", - [](std::shared_ptr& self) { self->CompleteAddOp(); }); + [](std::shared_ptr &self) { self->CompleteAddOp(); }); ExposeOperator(net); m.def("unique_integer", UniqueIntegerGenerator); return m.ptr(); } +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 76070f636b0..c3e9a914f1d 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -26,19 +26,17 @@ limitations under the License. */ #include "unsupported/Eigen/CXX11/Tensor" namespace paddle { -namespace pybind { -namespace details { // forward declare -template -struct CastToPyBufferImpl; -} // namespace details -} // namespace pybind namespace framework { +namespace details { +template +struct CastToPyBufferImpl; +} class Tensor { public: template - friend struct paddle::pybind::details::CastToPyBufferImpl; + friend struct details::CastToPyBufferImpl; template friend struct EigenTensor; diff --git a/paddle/pybind/tensor_bind.h b/paddle/framework/tensor_bind.h similarity index 93% rename from paddle/pybind/tensor_bind.h rename to paddle/framework/tensor_bind.h index 995e102bf9d..530b640f705 100644 --- a/paddle/pybind/tensor_bind.h +++ b/paddle/framework/tensor_bind.h @@ -21,7 +21,7 @@ namespace py = pybind11; namespace paddle { -namespace pybind { +namespace framework { namespace details { @@ -59,11 +59,8 @@ struct CastToPyBufferImpl { return py::buffer_info( tensor.mutable_data(tensor.holder_->place()), - sizeof(CUR_TYPE), - py::format_descriptor::format(), - (size_t)framework::arity(tensor.dims()), - dims_outside, - strides); + sizeof(CUR_TYPE), py::format_descriptor::format(), + (size_t)framework::arity(tensor.dims()), dims_outside, strides); } else { constexpr bool less = I + 1 < std::tuple_size>::value; return CastToPyBufferImpl()(tensor); diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt deleted file mode 100644 index 845589dcb19..00000000000 --- a/paddle/pybind/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -cc_library(paddle_pybind SHARED - SRCS pybind.cc - DEPS pybind python - fc_op - sgd_op - add_op - mean_op - cross_entropy_op - recurrent_network_op) -- GitLab