From e29003669ff30272070bb8513fb95c2042c305b9 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 22 Sep 2017 11:23:47 -0700 Subject: [PATCH] Moving protobuf binding code to protobuf module --- paddle/pybind/CMakeLists.txt | 3 +- paddle/pybind/protobuf.cc | 136 ++++++++++++++++++++++++++++++++++ paddle/pybind/protobuf.h | 54 ++++++++++++++ paddle/pybind/pybind.cc | 140 ++--------------------------------- 4 files changed, 197 insertions(+), 136 deletions(-) create mode 100644 paddle/pybind/protobuf.cc create mode 100644 paddle/pybind/protobuf.h diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index 4f05406c7f7..a1d7483973b 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -1,6 +1,7 @@ if(WITH_PYTHON) + cc_library(proto_bind SRCS protobuf.cc) cc_library(paddle_pybind SHARED SRCS pybind.cc - DEPS pybind python backward + DEPS proto_bind pybind python backward ${GLOB_OP_LIB}) endif(WITH_PYTHON) diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc new file mode 100644 index 00000000000..91f4c7d7c88 --- /dev/null +++ b/paddle/pybind/protobuf.cc @@ -0,0 +1,136 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/pybind/protobuf.h" + +namespace paddle { +namespace framework { + +void bind_program_desc(py::module &m) { + py::class_(m, "ProgramDesc", "") + .def_static("instance", + [] { return &GetProgramDesc(); }, + py::return_value_policy::reference) + .def_static("__create_program_desc__", + [] { + // Only used for unit-test + auto *prog_desc = new ProgramDesc; + auto *block = prog_desc->mutable_blocks()->Add(); + block->set_idx(0); + block->set_parent_idx(-1); + return prog_desc; + }) + .def("append_block", + [](ProgramDesc &self, BlockDesc &parent) { + auto desc = self.add_blocks(); + desc->set_idx(self.mutable_blocks()->size() - 1); + desc->set_parent_idx(parent.idx()); + return desc; + }, + py::return_value_policy::reference) + .def("root_block", + [](ProgramDesc &self) { return self.mutable_blocks()->Mutable(0); }, + py::return_value_policy::reference) + .def("__str__", [](ProgramDesc &self) { return self.DebugString(); }); +} + +void bind_block_desc(py::module &m) { + py::class_(m, "BlockDesc", "") + .def("id", [](BlockDesc &self) { return self.idx(); }) + .def("parent", [](BlockDesc &self) { return self.parent_idx(); }) + .def("append_op", + [](BlockDesc &self) { return self.add_ops(); }, + py::return_value_policy::reference) + .def("new_var", + [](BlockDesc &self) { return self.add_vars(); }, + py::return_value_policy::reference); +} + +void bind_var_dses(py::module &m) { + py::class_(m, "VarDesc", "") + .def(py::init<>()) + .def("set_name", + [](VarDesc &self, const std::string &name) { self.set_name(name); }) + .def("set_shape", + [](VarDesc &self, const std::vector &dims) { + LoDTensorDesc *lod_tensor_desc = self.mutable_lod_tensor(); + for (const int64_t &i : dims) { + lod_tensor_desc->add_dims(i); + } + }) + .def("set_data_type", + [](VarDesc &self, int type_id) { + LoDTensorDesc *lod_tensor_desc = self.mutable_lod_tensor(); + lod_tensor_desc->set_data_type(static_cast(type_id)); + }) + .def("shape", [](VarDesc &self) { + const LoDTensorDesc &lod_tensor_desc = self.lod_tensor(); + int rank = lod_tensor_desc.dims_size(); + std::vector res(rank); + for (int i = 0; i < rank; ++i) { + res[i] = lod_tensor_desc.dims(i); + } + return res; + }); +} + +void bind_op_desc(py::module &m) { + auto op_desc_set_var = [](OpDesc::Var *var, + const std::string ¶meter, + const std::vector &arguments) { + var->set_parameter(parameter); + VectorToRepeated(arguments, var->mutable_arguments()); + }; + + auto op_desc_set_attr = [](OpDesc &desc, const std::string &name) { + auto attr = desc.add_attrs(); + attr->set_name(name); + return attr; + }; + + py::class_(m, "OpDesc", "") + .def("type", [](OpDesc &op) { return op.type(); }) + .def("set_input", + [op_desc_set_var](OpDesc &self, + const std::string ¶meter, + const std::vector &arguments) { + auto ipt = self.add_inputs(); + op_desc_set_var(ipt, parameter, arguments); + }) + .def("input_names", + [](OpDesc &self) { + std::vector ret_val; + ret_val.reserve(static_cast(self.inputs().size())); + std::transform( + self.inputs().begin(), + self.inputs().end(), + std::back_inserter(ret_val), + [](const OpDesc::Var &var) { return var.parameter(); }); + return ret_val; + }) + .def("__str__", [](OpDesc &self) { return self.DebugString(); }) + .def("set_output", + [op_desc_set_var](OpDesc &self, + const std::string ¶meter, + const std::vector &arguments) { + auto opt = self.add_outputs(); + op_desc_set_var(opt, parameter, arguments); + }) + .def("set_attr", + [op_desc_set_attr](OpDesc &self, const std::string &name, int i) { + op_desc_set_attr(self, name)->set_i(i); + }); +} +} // namespace framework +} // namespace paddle diff --git a/paddle/pybind/protobuf.h b/paddle/pybind/protobuf.h new file mode 100644 index 00000000000..ff4813cce7c --- /dev/null +++ b/paddle/pybind/protobuf.h @@ -0,0 +1,54 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include +#include "paddle/framework/op_registry.h" +#include "pybind11/numpy.h" +#include "pybind11/pybind11.h" +#include "pybind11/stl.h" + +namespace py = pybind11; + +namespace paddle { +namespace framework { + +template +inline std::vector RepeatedToVector( + const google::protobuf::RepeatedField& repeated_field) { + std::vector ret; + ret.reserve(repeated_field.size()); + std::copy( + repeated_field.begin(), repeated_field.end(), std::back_inserter(ret)); + return ret; +} + +template +inline void VectorToRepeated(const std::vector& vec, + RepeatedField* repeated_field) { + repeated_field->Reserve(vec.size()); + for (auto& elem : vec) { + *repeated_field->Add() = elem; + } +} + +void bind_program_desc(py::module& m); +void bind_block_desc(py::module& m); +void bind_var_dses(py::module& m); +void bind_op_desc(py::module& m); +} // namespace framework +} // namespace paddle diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 5ccc8c377fb..10c6670e004 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -12,13 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include -#include -#include +#include "paddle/pybind/protobuf.h" #include "paddle/framework/backward.h" #include "paddle/framework/lod_tensor.h" -#include "paddle/framework/op_registry.h" #include "paddle/operators/cond_op.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" @@ -27,11 +24,6 @@ limitations under the License. */ #include "paddle/pybind/pybind.h" #include "paddle/pybind/tensor_py.h" #include "paddle/string/to_string.h" -#include "pybind11/numpy.h" -#include "pybind11/pybind11.h" -#include "pybind11/stl.h" - -namespace py = pybind11; namespace paddle { namespace framework { @@ -53,25 +45,6 @@ bool IsCompileGPU() { #endif } -template -inline std::vector RepeatedToVector( - const google::protobuf::RepeatedField &repeated_field) { - std::vector ret; - ret.reserve(repeated_field.size()); - std::copy( - repeated_field.begin(), repeated_field.end(), std::back_inserter(ret)); - return ret; -} - -template -inline void VectorToRepeated(const std::vector &vec, - RepeatedField *repeated_field) { - repeated_field->Reserve(vec.size()); - for (auto &elem : vec) { - *repeated_field->Add() = elem; - } -} - PYBIND11_PLUGIN(core) { py::module m("core", "C++ core of PaddlePaddle"); @@ -334,113 +307,10 @@ All parameter, weight, gradient are variables in Paddle. m.def("is_compile_gpu", IsCompileGPU); - py::class_(m, "ProgramDesc", "") - .def_static("instance", - [] { return &GetProgramDesc(); }, - py::return_value_policy::reference) - .def_static("__create_program_desc__", - [] { - // Only used for unit-test - auto *prog_desc = new ProgramDesc; - auto *block = prog_desc->mutable_blocks()->Add(); - block->set_idx(0); - block->set_parent_idx(-1); - return prog_desc; - }) - .def("append_block", - [](ProgramDesc &self, BlockDesc &parent) { - auto desc = self.add_blocks(); - desc->set_idx(self.mutable_blocks()->size() - 1); - desc->set_parent_idx(parent.idx()); - return desc; - }, - py::return_value_policy::reference) - .def("root_block", - [](ProgramDesc &self) { return self.mutable_blocks()->Mutable(0); }, - py::return_value_policy::reference) - .def("__str__", [](ProgramDesc &self) { return self.DebugString(); }); - - py::class_(m, "BlockDesc", "") - .def("id", [](BlockDesc &self) { return self.idx(); }) - .def("parent", [](BlockDesc &self) { return self.parent_idx(); }) - .def("append_op", - [](BlockDesc &self) { return self.add_ops(); }, - py::return_value_policy::reference) - .def("new_var", - [](BlockDesc &self) { return self.add_vars(); }, - py::return_value_policy::reference); - - py::class_(m, "VarDesc", "") - .def(py::init<>()) - .def("set_name", - [](VarDesc &self, const std::string &name) { self.set_name(name); }) - .def("set_shape", - [](VarDesc &self, const std::vector &dims) { - LoDTensorDesc *lod_tensor_desc = self.mutable_lod_tensor(); - for (const int64_t &i : dims) { - lod_tensor_desc->add_dims(i); - } - }) - .def("set_data_type", - [](VarDesc &self, int type_id) { - LoDTensorDesc *lod_tensor_desc = self.mutable_lod_tensor(); - lod_tensor_desc->set_data_type(static_cast(type_id)); - }) - .def("shape", [](VarDesc &self) { - const LoDTensorDesc &lod_tensor_desc = self.lod_tensor(); - int rank = lod_tensor_desc.dims_size(); - std::vector res(rank); - for (int i = 0; i < rank; ++i) { - res[i] = lod_tensor_desc.dims(i); - } - return res; - }); - - auto op_desc_set_var = [](OpDesc::Var *var, - const std::string ¶meter, - const std::vector &arguments) { - var->set_parameter(parameter); - VectorToRepeated(arguments, var->mutable_arguments()); - }; - - auto op_desc_set_attr = [](OpDesc &desc, const std::string &name) { - auto attr = desc.add_attrs(); - attr->set_name(name); - return attr; - }; - - py::class_(m, "OpDesc", "") - .def("type", [](OpDesc &op) { return op.type(); }) - .def("set_input", - [op_desc_set_var](OpDesc &self, - const std::string ¶meter, - const std::vector &arguments) { - auto ipt = self.add_inputs(); - op_desc_set_var(ipt, parameter, arguments); - }) - .def("input_names", - [](OpDesc &self) { - std::vector ret_val; - ret_val.reserve(static_cast(self.inputs().size())); - std::transform( - self.inputs().begin(), - self.inputs().end(), - std::back_inserter(ret_val), - [](const OpDesc::Var &var) { return var.parameter(); }); - return ret_val; - }) - .def("__str__", [](OpDesc &self) { return self.DebugString(); }) - .def("set_output", - [op_desc_set_var](OpDesc &self, - const std::string ¶meter, - const std::vector &arguments) { - auto opt = self.add_outputs(); - op_desc_set_var(opt, parameter, arguments); - }) - .def("set_attr", - [op_desc_set_attr](OpDesc &self, const std::string &name, int i) { - op_desc_set_attr(self, name)->set_i(i); - }); + bind_program_desc(m); + bind_block_desc(m); + bind_var_dses(m); + bind_op_desc(m); return m.ptr(); } -- GitLab