/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include #include #include #include "paddle/framework/net.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" #include "paddle/platform/enforce.h" #include "paddle/platform/place.h" #include "paddle/pybind/tensor_bind.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" namespace py = pybind11; namespace pd = paddle::framework; USE_OP(add_two); USE_OP(onehot_cross_entropy); USE_OP_WITHOUT_KERNEL(fc); USE_OP(sgd); USE_OP(mul); USE_OP(sigmoid); USE_OP(softmax); USE_OP(rowwise_add); USE_OP_WITHOUT_KERNEL(recurrent_op); template void ExposeOperator(ClassType& m) { m.def("infer_shape", &ClassType::type::InferShape) .def("run", &ClassType::type::Run) .def("outputs", [](const typename ClassType::type& op) -> std::vector { return op.outputs_; }) .def("__str__", &ClassType::type::DebugString); } static size_t UniqueIntegerGenerator() { static std::atomic generator; return generator.fetch_add(1); } bool IsCompileGPU() { #ifdef PADDLE_ONLY_CPU return false; #else return true; #endif } PYBIND11_PLUGIN(core) { py::module m("core", "C++ core of PaddlePaddle"); py::class_(m, "Tensor", py::buffer_protocol()) .def_buffer([](pd::Tensor& self) -> py::buffer_info { return paddle::pybind::CastToPyBuffer(self); }) .def("get_dims", [](const pd::Tensor& self) { return pd::vectorize(self.dims()); }) .def("set_dims", [](pd::Tensor& self, const std::vector& dim) { self.Resize(pd::make_ddim(dim)); }) .def("alloc_float", [](pd::Tensor& self, paddle::platform::GPUPlace& place) { self.mutable_data(place); }) .def("alloc_float", [](pd::Tensor& self, paddle::platform::CPUPlace& place) { self.mutable_data(place); }) .def("alloc_int", [](pd::Tensor& self, paddle::platform::CPUPlace& place) { self.mutable_data(place); }) .def("alloc_int", [](pd::Tensor& self, paddle::platform::GPUPlace& place) { self.mutable_data(place); }) .def("set", paddle::pybind::PyCPUTensorSetFromArray) .def("set", paddle::pybind::PyCPUTensorSetFromArray) #ifndef PADDLE_ONLY_CPU .def("set", paddle::pybind::PyCUDATensorSetFromArray) .def("set", paddle::pybind::PyCUDATensorSetFromArray) #endif .def("shape", [](pd::Tensor& self) { return pd::vectorize(self.dims()); }); py::class_(m, "Variable", R"DOC(Variable Class. All parameter, weight, gradient are variables in Paddle. )DOC") .def("is_int", [](const pd::Variable& var) { return var.IsType(); }) .def("set_int", [](pd::Variable& var, int val) -> void { *var.GetMutable() = val; }) .def("get_int", [](const pd::Variable& var) -> int { return var.Get(); }) .def("get_tensor", [](pd::Variable& self) -> pd::Tensor* { return self.GetMutable(); }, py::return_value_policy::reference) .def("get_net", [](pd::Variable& self) -> pd::NetOp* { return self.GetMutable(); }, py::return_value_policy::reference); py::class_(m, "Scope", "") .def("new_var", [](pd::Scope& self, const std::string& name) -> pd::Variable* { return self.NewVar(name); }, py::return_value_policy::reference) .def("find_var", &pd::Scope::FindVar, py::return_value_policy::reference) .def(py::init<>()) .def("new_scope", [](pd::Scope& self) -> pd::Scope* { return &self.NewScope(); }, py::return_value_policy::reference) .def("drop_kids", &pd::Scope::DropKids); //! @note: Be careful! PyBind will return std::string as an unicode, not //! Python str. If you want a str object, you should cast them in Python. m.def("get_all_op_protos", []() -> std::vector { auto& protos = pd::OpRegistry::protos(); std::vector ret_values; for (auto it = protos.begin(); it != protos.end(); ++it) { PADDLE_ENFORCE(it->second.IsInitialized(), "OpProto must all be initialized"); std::string str; PADDLE_ENFORCE(it->second.SerializeToString(&str), "Serialize OpProto Error. This could be a bug of Paddle."); ret_values.push_back(py::bytes(str)); } return ret_values; }); m.def_submodule( "var_names", "The module will return special predefined variable name in Paddle") .def("empty", pd::OperatorBase::EMPTY_VAR_NAME) .def("temp", pd::OperatorBase::TMP_VAR_NAME); //clang-format off py::class_(m, "DeviceContext") .def_static("create", [](paddle::platform::CPUPlace& place) -> paddle::platform::DeviceContext* { return new paddle::platform::CPUDeviceContext(); }) .def_static("create", [](paddle::platform::GPUPlace& place) -> paddle::platform::DeviceContext* { #ifdef PADDLE_ONLY_CPU PADDLE_THROW("GPUPlace is not supported in CPU device."); #else return new paddle::platform::CUDADeviceContext(place); #endif }); //clang-format on py::class_(m, "GPUPlace").def(py::init()); py::class_(m, "CPUPlace").def(py::init<>()); py::class_> operator_base( m, "Operator"); operator_base.def_static("create", [](py::bytes protobin) { pd::OpDesc desc; PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), "Cannot parse user input to OpDesc"); PADDLE_ENFORCE(desc.IsInitialized(), "User OpDesc is not initialized, reason %s", desc.InitializationErrorString()); return pd::OpRegistry::CreateOp(desc); }); ExposeOperator(operator_base); py::class_> net(m, "Net"); net.def_static("create", []() -> std::shared_ptr { auto retv = std::make_shared(); retv->type_ = "plain_net"; return retv; }) .def("add_op", &pd::NetOp::AddOp) .def("add_op", [](pd::NetOp& self, const std::shared_ptr& net) -> void { self.AddOp(std::static_pointer_cast(net)); }) .def("complete_add_op", &pd::NetOp::CompleteAddOp) .def("complete_add_op", [](std::shared_ptr& self) { self->CompleteAddOp(); }); ExposeOperator(net); m.def("unique_integer", UniqueIntegerGenerator); m.def("is_compile_gpu", IsCompileGPU); return m.ptr(); }