pybind.cc 7.8 KB
Newer Older
1 2 3 4 5 6
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

L
Luo Tao 已提交
15
#include <Python.h>
Y
Yu Yang 已提交
16
#include <fstream>
Y
Yu Yang 已提交
17
#include <vector>
18

Q
Qiao Longfei 已提交
19
#include "paddle/framework/backward.h"
Y
Yu Yang 已提交
20 21 22 23
#include "paddle/framework/net.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/scope.h"
24
#include "paddle/framework/tensor_bind.h"
Q
qijun 已提交
25
#include "paddle/platform/enforce.h"
Q
qijun 已提交
26
#include "paddle/platform/place.h"
Y
Yu Yang 已提交
27 28 29 30
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"

31 32
namespace py = pybind11;

Y
Yu Yang 已提交
33
USE_OP(add_two);
Q
Qiao Longfei 已提交
34
USE_OP(onehot_cross_entropy);
Y
Yu Yang 已提交
35
USE_OP_WITHOUT_KERNEL(fc);
Q
Qiao Longfei 已提交
36
USE_OP(sgd);
Q
qijun 已提交
37
USE_OP(mul);
L
liaogang 已提交
38
USE_OP(mean);
Q
qijun 已提交
39 40 41
USE_OP(sigmoid);
USE_OP(softmax);
USE_OP(rowwise_add);
Y
Yan Chunwei 已提交
42
USE_OP_WITHOUT_KERNEL(recurrent_op);
43 44
namespace paddle {
namespace framework {
Y
Yu Yang 已提交
45
template <typename ClassType>
46
void ExposeOperator(ClassType &m) {
Y
Yu Yang 已提交
47 48
  m.def("infer_shape", &ClassType::type::InferShape)
      .def("run", &ClassType::type::Run)
Q
Qiao Longfei 已提交
49
      .def("type",
50
           [](const typename ClassType::type &op) -> std::string {
Q
Qiao Longfei 已提交
51 52
             return op.type_;
           })
Y
Yu Yang 已提交
53
      .def("outputs",
54
           [](const typename ClassType::type &op) -> std::vector<std::string> {
Y
Yu Yang 已提交
55 56 57 58
             return op.outputs_;
           })
      .def("__str__", &ClassType::type::DebugString);
}
Y
Yu Yang 已提交
59

60 61 62 63 64
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
65 66 67 68 69 70 71 72
bool IsCompileGPU() {
#ifdef PADDLE_ONLY_CPU
  return false;
#else
  return true;
#endif
}

73
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
74
  py::module m("core", "C++ core of PaddlePaddle");
75

76 77 78
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
Yu Yang 已提交
79
      .def("get_dims",
80
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
81
      .def("set_dims",
82 83
           [](Tensor &self, const std::vector<int> &dim) {
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
84 85
           })
      .def("alloc_float",
Y
Yu Yang 已提交
86
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
87
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
88
           })
Q
qijun 已提交
89
      .def("alloc_float",
Y
Yu Yang 已提交
90
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
91
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
92 93
           })
      .def("alloc_int",
Y
Yu Yang 已提交
94
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
95
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
96
           })
Q
qijun 已提交
97
      .def("alloc_int",
Y
Yu Yang 已提交
98
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
99
             self.mutable_data<int>(place);
Q
qijun 已提交
100
           })
Y
Yu Yang 已提交
101 102
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
Q
qijun 已提交
103
#ifndef PADDLE_ONLY_CPU
Y
Yu Yang 已提交
104 105
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
Q
qijun 已提交
106
#endif
Y
Yu Yang 已提交
107
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); });
Y
Yu Yang 已提交
108

109
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
110 111 112

All parameter, weight, gradient are variables in Paddle.
)DOC")
113
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
114
      .def("set_int",
115 116
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
Y
Yu Yang 已提交
117
      .def("get_tensor",
118
           [](Variable &self) -> Tensor * { return self.GetMutable<Tensor>(); },
Y
Yan Chunwei 已提交
119 120
           py::return_value_policy::reference)
      .def("get_net",
121
           [](Variable &self) -> NetOp * { return self.GetMutable<NetOp>(); },
Y
Yu Yang 已提交
122
           py::return_value_policy::reference);
123

124
  py::class_<Scope>(m, "Scope", "")
Y
Yu Yang 已提交
125
      .def("new_var",
126
           [](Scope &self, const std::string &name) -> Variable * {
Y
Yu Yang 已提交
127 128
             return self.NewVar(name);
           },
129
           py::return_value_policy::reference)
130
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
131
      .def(py::init<>())
132
      .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
133
           py::return_value_policy::reference)
134
      .def("drop_kids", &Scope::DropKids);
135

Y
Yu Yang 已提交
136 137
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
138
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
139
    auto &protos = OpRegistry::protos();
Y
Yu Yang 已提交
140
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
141
    for (auto it = protos.begin(); it != protos.end(); ++it) {
Y
Yu Yang 已提交
142 143
      PADDLE_ENFORCE(it->second.IsInitialized(),
                     "OpProto must all be initialized");
Y
Yu Yang 已提交
144 145
      std::string str;
      PADDLE_ENFORCE(it->second.SerializeToString(&str),
Y
Yu Yang 已提交
146
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
147
      ret_values.push_back(py::bytes(str));
Y
Yu Yang 已提交
148 149 150
    }
    return ret_values;
  });
151 152 153
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yu Yang 已提交
154 155
      .def("empty", OperatorBase::EMPTY_VAR_NAME)
      .def("temp", OperatorBase::TMP_VAR_NAME);
Q
qijun 已提交
156
  // clang-format off
Y
Yu Yang 已提交
157
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
158 159
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
160
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
161 162 163 164 165
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
166
#ifdef PADDLE_ONLY_CPU
Q
qijun 已提交
167
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
168
#else
Q
qijun 已提交
169
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
170
#endif
Q
qijun 已提交
171
                  });
Q
qijun 已提交
172
  // clang-format on
Q
qijun 已提交
173

Q
qijun 已提交
174
  py::class_<paddle::platform::GPUPlace>(m, "GPUPlace").def(py::init<int>());
Q
qijun 已提交
175

Q
qijun 已提交
176
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace").def(py::init<>());
Y
Yu Yang 已提交
177

178
  py::class_<OperatorBase, std::shared_ptr<OperatorBase>> operator_base(
Y
Yu Yang 已提交
179
      m, "Operator");
Y
Yu Yang 已提交
180

Y
Yu Yang 已提交
181
  operator_base.def_static("create", [](py::bytes protobin) {
182
    OpDesc desc;
Y
Yu Yang 已提交
183 184 185 186 187
    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                   "Cannot parse user input to OpDesc");
    PADDLE_ENFORCE(desc.IsInitialized(),
                   "User OpDesc is not initialized, reason %s",
                   desc.InitializationErrorString());
188
    return OpRegistry::CreateOp(desc);
Y
Yu Yang 已提交
189
  });
Q
Qiao Longfei 已提交
190 191

  operator_base.def("backward",
Y
Yu Yang 已提交
192
                    [](const OperatorBase &forwardOp,
193
                       const std::unordered_set<std::string> &no_grad_vars) {
Y
Yu Yang 已提交
194
                      return Backward(forwardOp, no_grad_vars);
Q
Qiao Longfei 已提交
195 196
                    });

Y
Yu Yang 已提交
197 198
  ExposeOperator(operator_base);

199
  py::class_<NetOp, std::shared_ptr<NetOp>> net(m, "Net");
Y
Yu Yang 已提交
200 201

  net.def_static("create",
202 203
                 []() -> std::shared_ptr<NetOp> {
                   auto retv = std::make_shared<NetOp>();
Y
Yu Yang 已提交
204
                   retv->type_ = "plain_net";
Y
Yu Yang 已提交
205 206
                   return retv;
                 })
207
      .def("add_op", &NetOp::AddOp)
Y
Yu Yang 已提交
208
      .def("add_op",
209 210
           [](NetOp &self, const std::shared_ptr<NetOp> &net) -> void {
             self.AddOp(std::static_pointer_cast<OperatorBase>(net));
Y
Yu Yang 已提交
211
           })
212
      .def("complete_add_op", &NetOp::CompleteAddOp)
Y
Yu Yang 已提交
213
      .def("complete_add_op",
214
           [](std::shared_ptr<NetOp> &self) { self->CompleteAddOp(); });
Y
Yu Yang 已提交
215
  ExposeOperator(net);
Y
Yu Yang 已提交
216

217 218
  m.def("unique_integer", UniqueIntegerGenerator);

Q
qijun 已提交
219 220
  m.def("is_compile_gpu", IsCompileGPU);

221
  return m.ptr();
L
Luo Tao 已提交
222
}
223 224
}  // namespace framework
}  // namespace paddle