pybind.cc 8.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

L
Luo Tao 已提交
15
#include <Python.h>
Y
Yu Yang 已提交
16
#include <fstream>
Y
Yu Yang 已提交
17
#include <vector>
18

Q
Qiao Longfei 已提交
19
#include "paddle/framework/backward.h"
Y
Yu Yang 已提交
20 21 22 23
#include "paddle/framework/net.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/scope.h"
Q
qijun 已提交
24
#include "paddle/platform/enforce.h"
Q
qijun 已提交
25
#include "paddle/platform/place.h"
Y
Yu Yang 已提交
26 27 28 29 30
#include "paddle/pybind/tensor_bind.h"
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"

31 32 33
namespace py = pybind11;
namespace pd = paddle::framework;

Y
Yu Yang 已提交
34
USE_OP(add_two);
Q
Qiao Longfei 已提交
35
USE_OP(onehot_cross_entropy);
Y
Yu Yang 已提交
36
USE_OP_WITHOUT_KERNEL(fc);
Q
Qiao Longfei 已提交
37
USE_OP(sgd);
Q
qijun 已提交
38
USE_OP(mul);
L
liaogang 已提交
39
USE_OP(mean);
Q
qijun 已提交
40 41 42
USE_OP(sigmoid);
USE_OP(softmax);
USE_OP(rowwise_add);
Y
Yan Chunwei 已提交
43
USE_OP_WITHOUT_KERNEL(recurrent_op);
Y
Yu Yang 已提交
44

Y
Yu Yang 已提交
45 46 47 48
template <typename ClassType>
void ExposeOperator(ClassType& m) {
  m.def("infer_shape", &ClassType::type::InferShape)
      .def("run", &ClassType::type::Run)
Q
Qiao Longfei 已提交
49 50 51 52
      .def("type",
           [](const typename ClassType::type& op) -> std::string {
             return op.type_;
           })
Y
Yu Yang 已提交
53 54 55 56 57 58 59
      .def("outputs",
           [](const typename ClassType::type& op) -> std::vector<std::string> {
             return op.outputs_;
           })
      .def("__str__", &ClassType::type::DebugString);
}

60 61 62 63 64
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
65 66 67 68 69 70 71 72
bool IsCompileGPU() {
#ifdef PADDLE_ONLY_CPU
  return false;
#else
  return true;
#endif
}

73
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
74
  py::module m("core", "C++ core of PaddlePaddle");
75

Y
Yu Yang 已提交
76 77
  py::class_<pd::Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer([](pd::Tensor& self) -> py::buffer_info {
78
        return paddle::pybind::CastToPyBuffer(self);
Y
Yu Yang 已提交
79 80 81 82 83
      })
      .def("get_dims",
           [](const pd::Tensor& self) { return pd::vectorize(self.dims()); })
      .def("set_dims",
           [](pd::Tensor& self, const std::vector<int>& dim) {
F
fengjiayi 已提交
84
             self.Resize(pd::make_ddim(dim));
Y
Yu Yang 已提交
85 86
           })
      .def("alloc_float",
Q
qijun 已提交
87
           [](pd::Tensor& self, paddle::platform::GPUPlace& place) {
Q
qijun 已提交
88
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
89
           })
Q
qijun 已提交
90
      .def("alloc_float",
Q
qijun 已提交
91 92
           [](pd::Tensor& self, paddle::platform::CPUPlace& place) {
             self.mutable_data<float>(place);
Q
qijun 已提交
93
           })
Y
Yu Yang 已提交
94
      .def("alloc_int",
Q
qijun 已提交
95
           [](pd::Tensor& self, paddle::platform::CPUPlace& place) {
Q
qijun 已提交
96
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
97
           })
Q
qijun 已提交
98
      .def("alloc_int",
Q
qijun 已提交
99 100
           [](pd::Tensor& self, paddle::platform::GPUPlace& place) {
             self.mutable_data<int>(place);
Q
qijun 已提交
101
           })
Q
qijun 已提交
102 103
      .def("set", paddle::pybind::PyCPUTensorSetFromArray<float>)
      .def("set", paddle::pybind::PyCPUTensorSetFromArray<int>)
Q
qijun 已提交
104 105
#ifndef PADDLE_ONLY_CPU
      .def("set", paddle::pybind::PyCUDATensorSetFromArray<float>)
Q
qijun 已提交
106
      .def("set", paddle::pybind::PyCUDATensorSetFromArray<int>)
Q
qijun 已提交
107
#endif
Y
Yu Yang 已提交
108 109
      .def("shape",
           [](pd::Tensor& self) { return pd::vectorize(self.dims()); });
Y
Yu Yang 已提交
110

111 112 113 114 115 116 117 118 119 120
  py::class_<pd::Variable>(m, "Variable", R"DOC(Variable Class.

All parameter, weight, gradient are variables in Paddle.
)DOC")
      .def("is_int", [](const pd::Variable& var) { return var.IsType<int>(); })
      .def("set_int",
           [](pd::Variable& var, int val) -> void {
             *var.GetMutable<int>() = val;
           })
      .def("get_int",
Y
Yu Yang 已提交
121 122 123 124 125
           [](const pd::Variable& var) -> int { return var.Get<int>(); })
      .def("get_tensor",
           [](pd::Variable& self) -> pd::Tensor* {
             return self.GetMutable<pd::Tensor>();
           },
Y
Yan Chunwei 已提交
126 127 128 129 130
           py::return_value_policy::reference)
      .def("get_net",
           [](pd::Variable& self) -> pd::NetOp* {
             return self.GetMutable<pd::NetOp>();
           },
Y
Yu Yang 已提交
131
           py::return_value_policy::reference);
132

Y
Yu Yang 已提交
133 134 135 136 137
  py::class_<pd::Scope>(m, "Scope", "")
      .def("new_var",
           [](pd::Scope& self, const std::string& name) -> pd::Variable* {
             return self.NewVar(name);
           },
138
           py::return_value_policy::reference)
Y
Yu Yang 已提交
139 140 141 142
      .def("find_var", &pd::Scope::FindVar, py::return_value_policy::reference)
      .def(py::init<>())
      .def("new_scope",
           [](pd::Scope& self) -> pd::Scope* { return &self.NewScope(); },
143
           py::return_value_policy::reference)
Y
Yu Yang 已提交
144
      .def("drop_kids", &pd::Scope::DropKids);
145

Y
Yu Yang 已提交
146 147
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
148
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
Y
Yu Yang 已提交
149
    auto& protos = pd::OpRegistry::protos();
Y
Yu Yang 已提交
150
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
151
    for (auto it = protos.begin(); it != protos.end(); ++it) {
Y
Yu Yang 已提交
152 153
      PADDLE_ENFORCE(it->second.IsInitialized(),
                     "OpProto must all be initialized");
Y
Yu Yang 已提交
154 155
      std::string str;
      PADDLE_ENFORCE(it->second.SerializeToString(&str),
Y
Yu Yang 已提交
156
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
157
      ret_values.push_back(py::bytes(str));
Y
Yu Yang 已提交
158 159 160
    }
    return ret_values;
  });
161 162 163 164 165
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
      .def("empty", pd::OperatorBase::EMPTY_VAR_NAME)
      .def("temp", pd::OperatorBase::TMP_VAR_NAME);
Q
qijun 已提交
166
  // clang-format off
Y
Yu Yang 已提交
167
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
168 169
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
170
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
171 172 173 174 175
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
176
#ifdef PADDLE_ONLY_CPU
Q
qijun 已提交
177
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
178
#else
Q
qijun 已提交
179
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
180
#endif
Q
qijun 已提交
181
                  });
Q
qijun 已提交
182
  // clang-format on
Q
qijun 已提交
183

Q
qijun 已提交
184
  py::class_<paddle::platform::GPUPlace>(m, "GPUPlace").def(py::init<int>());
Q
qijun 已提交
185

Q
qijun 已提交
186
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace").def(py::init<>());
Y
Yu Yang 已提交
187

Y
Yu Yang 已提交
188 189
  py::class_<pd::OperatorBase, std::shared_ptr<pd::OperatorBase>> operator_base(
      m, "Operator");
Y
Yu Yang 已提交
190

Y
Yu Yang 已提交
191
  operator_base.def_static("create", [](py::bytes protobin) {
Y
Yu Yang 已提交
192 193 194 195 196 197 198 199
    pd::OpDesc desc;
    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                   "Cannot parse user input to OpDesc");
    PADDLE_ENFORCE(desc.IsInitialized(),
                   "User OpDesc is not initialized, reason %s",
                   desc.InitializationErrorString());
    return pd::OpRegistry::CreateOp(desc);
  });
Q
Qiao Longfei 已提交
200 201 202 203 204 205 206

  operator_base.def("backward",
                    [](const pd::OperatorBase& forwardOp,
                       const std::unordered_set<std::string>& no_grad_vars) {
                      return pd::Backward(forwardOp, no_grad_vars);
                    });

Y
Yu Yang 已提交
207 208
  ExposeOperator(operator_base);

Y
Yu Yang 已提交
209
  py::class_<pd::NetOp, std::shared_ptr<pd::NetOp>> net(m, "Net");
Y
Yu Yang 已提交
210

Y
Yu Yang 已提交
211
  net.def_static("create",
Y
Yu Yang 已提交
212 213
                 []() -> std::shared_ptr<pd::NetOp> {
                   auto retv = std::make_shared<pd::NetOp>();
Y
Yu Yang 已提交
214
                   retv->type_ = "plain_net";
Y
Yu Yang 已提交
215 216
                   return retv;
                 })
Y
Yu Yang 已提交
217
      .def("add_op", &pd::NetOp::AddOp)
Y
Yu Yang 已提交
218
      .def("add_op",
Y
Yu Yang 已提交
219 220
           [](pd::NetOp& self, const std::shared_ptr<pd::NetOp>& net) -> void {
             self.AddOp(std::static_pointer_cast<pd::OperatorBase>(net));
Y
Yu Yang 已提交
221
           })
Y
Yu Yang 已提交
222 223 224
      .def("complete_add_op", &pd::NetOp::CompleteAddOp)
      .def("complete_add_op",
           [](std::shared_ptr<pd::NetOp>& self) { self->CompleteAddOp(); });
Y
Yu Yang 已提交
225
  ExposeOperator(net);
Y
Yu Yang 已提交
226

227
  m.def("unique_integer", UniqueIntegerGenerator);
Y
Yu Yang 已提交
228

Q
qijun 已提交
229 230
  m.def("is_compile_gpu", IsCompileGPU);

231
  return m.ptr();
L
Luo Tao 已提交
232
}