pybind.cc 7.9 KB
Newer Older
1 2 3 4 5 6
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

L
Luo Tao 已提交
15
#include <Python.h>
Y
Yu Yang 已提交
16
#include <fstream>
Y
Yu Yang 已提交
17
#include <vector>
18

Q
Qiao Longfei 已提交
19
#include "paddle/framework/backward.h"
Y
Yu Yang 已提交
20 21 22 23
#include "paddle/framework/net.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/scope.h"
24
#include "paddle/framework/tensor_bind.h"
Q
qijun 已提交
25
#include "paddle/platform/enforce.h"
Q
qijun 已提交
26
#include "paddle/platform/place.h"
Y
Yu Yang 已提交
27 28 29 30
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"

31 32
namespace py = pybind11;

Y
Yu Yang 已提交
33
USE_OP(add_two);
Q
Qiao Longfei 已提交
34
USE_OP(onehot_cross_entropy);
Y
Yu Yang 已提交
35
USE_OP_WITHOUT_KERNEL(fc);
Q
Qiao Longfei 已提交
36
USE_OP(sgd);
Q
qijun 已提交
37
USE_OP(mul);
L
liaogang 已提交
38
USE_OP(mean);
Q
qijun 已提交
39 40 41
USE_OP(sigmoid);
USE_OP(softmax);
USE_OP(rowwise_add);
Y
Yan Chunwei 已提交
42
USE_OP_WITHOUT_KERNEL(recurrent_op);
43 44
namespace paddle {
namespace framework {
Y
Yu Yang 已提交
45
template <typename ClassType>
46
void ExposeOperator(ClassType &m) {
Y
Yu Yang 已提交
47 48
  m.def("infer_shape", &ClassType::type::InferShape)
      .def("run", &ClassType::type::Run)
Q
Qiao Longfei 已提交
49
      .def("type",
50
           [](const typename ClassType::type &op) -> std::string {
Q
Qiao Longfei 已提交
51 52
             return op.type_;
           })
Y
Yu Yang 已提交
53
      .def("outputs",
54
           [](const typename ClassType::type &op) -> std::vector<std::string> {
Y
Yu Yang 已提交
55 56 57 58
             return op.outputs_;
           })
      .def("__str__", &ClassType::type::DebugString);
}
Y
Yu Yang 已提交
59

60 61 62 63 64
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
65 66 67 68 69 70 71 72
bool IsCompileGPU() {
#ifdef PADDLE_ONLY_CPU
  return false;
#else
  return true;
#endif
}

73
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
74
  py::module m("core", "C++ core of PaddlePaddle");
75

76 77 78
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
Yu Yang 已提交
79
      .def("get_dims",
80
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
81
      .def("set_dims",
82 83
           [](Tensor &self, const std::vector<int> &dim) {
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
84 85
           })
      .def("alloc_float",
86
           [](pd::Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
87
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
88
           })
Q
qijun 已提交
89
      .def("alloc_float",
90
           [](pd::Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
91
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
92 93
           })
      .def("alloc_int",
94
           [](pd::Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
95
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
96
           })
Q
qijun 已提交
97
      .def("alloc_int",
98
           [](pd::Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
99
             self.mutable_data<int>(place);
Q
qijun 已提交
100
           })
Q
qijun 已提交
101 102
      .def("set", paddle::pybind::PyCPUTensorSetFromArray<float>)
      .def("set", paddle::pybind::PyCPUTensorSetFromArray<int>)
Q
qijun 已提交
103 104
#ifndef PADDLE_ONLY_CPU
      .def("set", paddle::pybind::PyCUDATensorSetFromArray<float>)
Q
qijun 已提交
105
      .def("set", paddle::pybind::PyCUDATensorSetFromArray<int>)
Q
qijun 已提交
106
#endif
Y
Yu Yang 已提交
107
      .def("shape",
108
           [](pd::Tensor &self) { return pd::vectorize(self.dims()); });
Y
Yu Yang 已提交
109

110
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
111 112 113

All parameter, weight, gradient are variables in Paddle.
)DOC")
114
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
115
      .def("set_int",
116 117
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
Y
Yu Yang 已提交
118
      .def("get_tensor",
119
           [](Variable &self) -> Tensor * { return self.GetMutable<Tensor>(); },
Y
Yan Chunwei 已提交
120 121
           py::return_value_policy::reference)
      .def("get_net",
122
           [](Variable &self) -> NetOp * { return self.GetMutable<NetOp>(); },
Y
Yu Yang 已提交
123
           py::return_value_policy::reference);
124

125
  py::class_<Scope>(m, "Scope", "")
Y
Yu Yang 已提交
126
      .def("new_var",
127
           [](Scope &self, const std::string &name) -> Variable * {
Y
Yu Yang 已提交
128 129
             return self.NewVar(name);
           },
130
           py::return_value_policy::reference)
131
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
132
      .def(py::init<>())
133
      .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
134
           py::return_value_policy::reference)
135
      .def("drop_kids", &Scope::DropKids);
136

Y
Yu Yang 已提交
137 138
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
139
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
140
    auto &protos = OpRegistry::protos();
Y
Yu Yang 已提交
141
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
142
    for (auto it = protos.begin(); it != protos.end(); ++it) {
Y
Yu Yang 已提交
143 144
      PADDLE_ENFORCE(it->second.IsInitialized(),
                     "OpProto must all be initialized");
Y
Yu Yang 已提交
145 146
      std::string str;
      PADDLE_ENFORCE(it->second.SerializeToString(&str),
Y
Yu Yang 已提交
147
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
148
      ret_values.push_back(py::bytes(str));
Y
Yu Yang 已提交
149 150 151
    }
    return ret_values;
  });
152 153 154 155 156
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
      .def("empty", pd::OperatorBase::EMPTY_VAR_NAME)
      .def("temp", pd::OperatorBase::TMP_VAR_NAME);
Q
qijun 已提交
157
  // clang-format off
Y
Yu Yang 已提交
158
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
159 160
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
161
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
162 163 164 165 166
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
167
#ifdef PADDLE_ONLY_CPU
Q
qijun 已提交
168
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
169
#else
Q
qijun 已提交
170
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
171
#endif
Q
qijun 已提交
172
                  });
Q
qijun 已提交
173
  // clang-format on
Q
qijun 已提交
174

Q
qijun 已提交
175
  py::class_<paddle::platform::GPUPlace>(m, "GPUPlace").def(py::init<int>());
Q
qijun 已提交
176

Q
qijun 已提交
177
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace").def(py::init<>());
Y
Yu Yang 已提交
178

179
  py::class_<OperatorBase, std::shared_ptr<OperatorBase>> operator_base(
Y
Yu Yang 已提交
180
      m, "Operator");
Y
Yu Yang 已提交
181

Y
Yu Yang 已提交
182
  operator_base.def_static("create", [](py::bytes protobin) {
183
    OpDesc desc;
Y
Yu Yang 已提交
184 185 186 187 188
    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                   "Cannot parse user input to OpDesc");
    PADDLE_ENFORCE(desc.IsInitialized(),
                   "User OpDesc is not initialized, reason %s",
                   desc.InitializationErrorString());
189
    return OpRegistry::CreateOp(desc);
Y
Yu Yang 已提交
190
  });
Q
Qiao Longfei 已提交
191 192

  operator_base.def("backward",
193 194
                    [](const pd::OperatorBase &forwardOp,
                       const std::unordered_set<std::string> &no_grad_vars) {
Q
Qiao Longfei 已提交
195 196 197
                      return pd::Backward(forwardOp, no_grad_vars);
                    });

Y
Yu Yang 已提交
198 199
  ExposeOperator(operator_base);

200
  py::class_<NetOp, std::shared_ptr<NetOp>> net(m, "Net");
Y
Yu Yang 已提交
201 202

  net.def_static("create",
203 204
                 []() -> std::shared_ptr<NetOp> {
                   auto retv = std::make_shared<NetOp>();
Y
Yu Yang 已提交
205
                   retv->type_ = "plain_net";
Y
Yu Yang 已提交
206 207
                   return retv;
                 })
208
      .def("add_op", &NetOp::AddOp)
Y
Yu Yang 已提交
209
      .def("add_op",
210 211
           [](NetOp &self, const std::shared_ptr<NetOp> &net) -> void {
             self.AddOp(std::static_pointer_cast<OperatorBase>(net));
Y
Yu Yang 已提交
212
           })
213
      .def("complete_add_op", &NetOp::CompleteAddOp)
Y
Yu Yang 已提交
214
      .def("complete_add_op",
215
           [](std::shared_ptr<NetOp> &self) { self->CompleteAddOp(); });
Y
Yu Yang 已提交
216
  ExposeOperator(net);
Y
Yu Yang 已提交
217

218 219
  m.def("unique_integer", UniqueIntegerGenerator);

Q
qijun 已提交
220 221
  m.def("is_compile_gpu", IsCompileGPU);

222
  return m.ptr();
L
Luo Tao 已提交
223
}
224 225
}  // namespace framework
}  // namespace paddle