pybind.cc 7.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

L
Luo Tao 已提交
15
#include <Python.h>
Y
Yu Yang 已提交
16
#include <fstream>
Y
Yu Yang 已提交
17
#include <vector>
18

Y
Yu Yang 已提交
19 20 21 22
#include "paddle/framework/net.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/scope.h"
Q
qijun 已提交
23
#include "paddle/platform/enforce.h"
Q
qijun 已提交
24
#include "paddle/platform/place.h"
Y
Yu Yang 已提交
25 26 27 28 29
#include "paddle/pybind/tensor_bind.h"
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"

30 31 32
namespace py = pybind11;
namespace pd = paddle::framework;

Y
Yu Yang 已提交
33
USE_OP(add_two);
Q
Qiao Longfei 已提交
34
USE_OP(onehot_cross_entropy);
Y
Yu Yang 已提交
35
USE_OP_WITHOUT_KERNEL(fc);
Q
Qiao Longfei 已提交
36
USE_OP(sgd);
Q
qijun 已提交
37
USE_OP(mul);
L
liaogang 已提交
38
USE_OP(mean);
Q
qijun 已提交
39 40 41
USE_OP(sigmoid);
USE_OP(softmax);
USE_OP(rowwise_add);
Y
Yan Chunwei 已提交
42
USE_OP_WITHOUT_KERNEL(recurrent_op);
Y
Yu Yang 已提交
43

Y
Yu Yang 已提交
44 45 46 47 48 49 50 51 52 53 54
template <typename ClassType>
void ExposeOperator(ClassType& m) {
  m.def("infer_shape", &ClassType::type::InferShape)
      .def("run", &ClassType::type::Run)
      .def("outputs",
           [](const typename ClassType::type& op) -> std::vector<std::string> {
             return op.outputs_;
           })
      .def("__str__", &ClassType::type::DebugString);
}

55 56 57 58 59
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
60 61 62 63 64 65 66 67
bool IsCompileGPU() {
#ifdef PADDLE_ONLY_CPU
  return false;
#else
  return true;
#endif
}

68
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
69
  py::module m("core", "C++ core of PaddlePaddle");
70

Y
Yu Yang 已提交
71 72
  py::class_<pd::Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer([](pd::Tensor& self) -> py::buffer_info {
73
        return paddle::pybind::CastToPyBuffer(self);
Y
Yu Yang 已提交
74 75 76 77 78
      })
      .def("get_dims",
           [](const pd::Tensor& self) { return pd::vectorize(self.dims()); })
      .def("set_dims",
           [](pd::Tensor& self, const std::vector<int>& dim) {
F
fengjiayi 已提交
79
             self.Resize(pd::make_ddim(dim));
Y
Yu Yang 已提交
80 81
           })
      .def("alloc_float",
Q
qijun 已提交
82
           [](pd::Tensor& self, paddle::platform::GPUPlace& place) {
Q
qijun 已提交
83
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
84
           })
Q
qijun 已提交
85
      .def("alloc_float",
Q
qijun 已提交
86 87
           [](pd::Tensor& self, paddle::platform::CPUPlace& place) {
             self.mutable_data<float>(place);
Q
qijun 已提交
88
           })
Y
Yu Yang 已提交
89
      .def("alloc_int",
Q
qijun 已提交
90
           [](pd::Tensor& self, paddle::platform::CPUPlace& place) {
Q
qijun 已提交
91
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
92
           })
Q
qijun 已提交
93
      .def("alloc_int",
Q
qijun 已提交
94 95
           [](pd::Tensor& self, paddle::platform::GPUPlace& place) {
             self.mutable_data<int>(place);
Q
qijun 已提交
96
           })
Q
qijun 已提交
97 98
      .def("set", paddle::pybind::PyCPUTensorSetFromArray<float>)
      .def("set", paddle::pybind::PyCPUTensorSetFromArray<int>)
Q
qijun 已提交
99 100
#ifndef PADDLE_ONLY_CPU
      .def("set", paddle::pybind::PyCUDATensorSetFromArray<float>)
Q
qijun 已提交
101
      .def("set", paddle::pybind::PyCUDATensorSetFromArray<int>)
Q
qijun 已提交
102
#endif
Y
Yu Yang 已提交
103 104
      .def("shape",
           [](pd::Tensor& self) { return pd::vectorize(self.dims()); });
Y
Yu Yang 已提交
105

106 107 108 109 110 111 112 113 114 115
  py::class_<pd::Variable>(m, "Variable", R"DOC(Variable Class.

All parameter, weight, gradient are variables in Paddle.
)DOC")
      .def("is_int", [](const pd::Variable& var) { return var.IsType<int>(); })
      .def("set_int",
           [](pd::Variable& var, int val) -> void {
             *var.GetMutable<int>() = val;
           })
      .def("get_int",
Y
Yu Yang 已提交
116 117 118 119 120
           [](const pd::Variable& var) -> int { return var.Get<int>(); })
      .def("get_tensor",
           [](pd::Variable& self) -> pd::Tensor* {
             return self.GetMutable<pd::Tensor>();
           },
Y
Yan Chunwei 已提交
121 122 123 124 125
           py::return_value_policy::reference)
      .def("get_net",
           [](pd::Variable& self) -> pd::NetOp* {
             return self.GetMutable<pd::NetOp>();
           },
Y
Yu Yang 已提交
126
           py::return_value_policy::reference);
127

Y
Yu Yang 已提交
128 129 130 131 132
  py::class_<pd::Scope>(m, "Scope", "")
      .def("new_var",
           [](pd::Scope& self, const std::string& name) -> pd::Variable* {
             return self.NewVar(name);
           },
133
           py::return_value_policy::reference)
Y
Yu Yang 已提交
134 135 136 137
      .def("find_var", &pd::Scope::FindVar, py::return_value_policy::reference)
      .def(py::init<>())
      .def("new_scope",
           [](pd::Scope& self) -> pd::Scope* { return &self.NewScope(); },
138
           py::return_value_policy::reference)
Y
Yu Yang 已提交
139
      .def("drop_kids", &pd::Scope::DropKids);
140

Y
Yu Yang 已提交
141 142
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
143
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
Y
Yu Yang 已提交
144
    auto& protos = pd::OpRegistry::protos();
Y
Yu Yang 已提交
145
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
146
    for (auto it = protos.begin(); it != protos.end(); ++it) {
Y
Yu Yang 已提交
147 148
      PADDLE_ENFORCE(it->second.IsInitialized(),
                     "OpProto must all be initialized");
Y
Yu Yang 已提交
149 150
      std::string str;
      PADDLE_ENFORCE(it->second.SerializeToString(&str),
Y
Yu Yang 已提交
151
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
152
      ret_values.push_back(py::bytes(str));
Y
Yu Yang 已提交
153 154 155
    }
    return ret_values;
  });
156 157 158 159 160
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
      .def("empty", pd::OperatorBase::EMPTY_VAR_NAME)
      .def("temp", pd::OperatorBase::TMP_VAR_NAME);
Q
qijun 已提交
161
  //clang-format off
Y
Yu Yang 已提交
162
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
163 164
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
165
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
166 167 168 169 170
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
171
#ifdef PADDLE_ONLY_CPU
Q
qijun 已提交
172
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
173
#else
Q
qijun 已提交
174
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
175
#endif
Q
qijun 已提交
176 177
                  });
  //clang-format on
Q
qijun 已提交
178

Q
qijun 已提交
179
  py::class_<paddle::platform::GPUPlace>(m, "GPUPlace").def(py::init<int>());
Q
qijun 已提交
180

Q
qijun 已提交
181
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace").def(py::init<>());
Y
Yu Yang 已提交
182

Y
Yu Yang 已提交
183 184
  py::class_<pd::OperatorBase, std::shared_ptr<pd::OperatorBase>> operator_base(
      m, "Operator");
Y
Yu Yang 已提交
185

Y
Yu Yang 已提交
186
  operator_base.def_static("create", [](py::bytes protobin) {
Y
Yu Yang 已提交
187 188 189 190 191 192 193 194 195 196
    pd::OpDesc desc;
    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                   "Cannot parse user input to OpDesc");
    PADDLE_ENFORCE(desc.IsInitialized(),
                   "User OpDesc is not initialized, reason %s",
                   desc.InitializationErrorString());
    return pd::OpRegistry::CreateOp(desc);
  });
  ExposeOperator(operator_base);

Y
Yu Yang 已提交
197
  py::class_<pd::NetOp, std::shared_ptr<pd::NetOp>> net(m, "Net");
Y
Yu Yang 已提交
198

Y
Yu Yang 已提交
199
  net.def_static("create",
Y
Yu Yang 已提交
200 201
                 []() -> std::shared_ptr<pd::NetOp> {
                   auto retv = std::make_shared<pd::NetOp>();
Y
Yu Yang 已提交
202
                   retv->type_ = "plain_net";
Y
Yu Yang 已提交
203 204
                   return retv;
                 })
Y
Yu Yang 已提交
205
      .def("add_op", &pd::NetOp::AddOp)
Y
Yu Yang 已提交
206
      .def("add_op",
Y
Yu Yang 已提交
207 208
           [](pd::NetOp& self, const std::shared_ptr<pd::NetOp>& net) -> void {
             self.AddOp(std::static_pointer_cast<pd::OperatorBase>(net));
Y
Yu Yang 已提交
209
           })
Y
Yu Yang 已提交
210 211 212
      .def("complete_add_op", &pd::NetOp::CompleteAddOp)
      .def("complete_add_op",
           [](std::shared_ptr<pd::NetOp>& self) { self->CompleteAddOp(); });
Y
Yu Yang 已提交
213
  ExposeOperator(net);
Y
Yu Yang 已提交
214

215
  m.def("unique_integer", UniqueIntegerGenerator);
Y
Yu Yang 已提交
216

Q
qijun 已提交
217 218
  m.def("is_compile_gpu", IsCompileGPU);

219
  return m.ptr();
L
Luo Tao 已提交
220
}