pybind.cc 7.9 KB
Newer Older
1 2 3 4 5 6
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

L
Luo Tao 已提交
15
#include <Python.h>
Y
Yu Yang 已提交
16
#include <fstream>
Y
Yu Yang 已提交
17
#include <vector>
18

Q
Qiao Longfei 已提交
19
#include "paddle/framework/backward.h"
Y
Yu Yang 已提交
20 21 22
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/scope.h"
23
#include "paddle/framework/tensor_py.h"
Y
Yan Chunwei 已提交
24 25
#include "paddle/operators/net_op.h"
#include "paddle/operators/type_alias.h"
Q
qijun 已提交
26
#include "paddle/platform/enforce.h"
Q
qijun 已提交
27
#include "paddle/platform/place.h"
Y
Yu Yang 已提交
28 29 30 31
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"

32 33
namespace py = pybind11;

Y
Yu Yang 已提交
34
USE_OP(add_two);
Q
Qiao Longfei 已提交
35
USE_OP(onehot_cross_entropy);
Y
Yu Yang 已提交
36
USE_OP_WITHOUT_KERNEL(fc);
Q
Qiao Longfei 已提交
37
USE_OP(sgd);
Q
qijun 已提交
38
USE_OP(mul);
L
liaogang 已提交
39
USE_OP(mean);
Q
qijun 已提交
40 41 42
USE_OP(sigmoid);
USE_OP(softmax);
USE_OP(rowwise_add);
F
fengjiayi 已提交
43
USE_OP(fill_zeros_like);
Y
Yan Chunwei 已提交
44
USE_OP_WITHOUT_KERNEL(recurrent_op);
45 46
namespace paddle {
namespace framework {
Y
Yu Yang 已提交
47
template <typename ClassType>
48
void ExposeOperator(ClassType &m) {
Y
Yu Yang 已提交
49 50
  m.def("infer_shape", &ClassType::type::InferShape)
      .def("run", &ClassType::type::Run)
Q
Qiao Longfei 已提交
51
      .def("type",
52
           [](const typename ClassType::type &op) -> std::string {
Q
Qiao Longfei 已提交
53 54
             return op.type_;
           })
Y
Yu Yang 已提交
55
      .def("outputs",
56
           [](const typename ClassType::type &op) -> std::vector<std::string> {
Y
Yu Yang 已提交
57 58 59 60
             return op.outputs_;
           })
      .def("__str__", &ClassType::type::DebugString);
}
Y
Yu Yang 已提交
61

62 63 64 65 66
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
67 68 69 70 71 72 73 74
bool IsCompileGPU() {
#ifdef PADDLE_ONLY_CPU
  return false;
#else
  return true;
#endif
}

75
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
76
  py::module m("core", "C++ core of PaddlePaddle");
77

78 79 80
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
Yu Yang 已提交
81
      .def("get_dims",
82
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
83
      .def("set_dims",
84 85
           [](Tensor &self, const std::vector<int> &dim) {
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
86 87
           })
      .def("alloc_float",
Y
Yu Yang 已提交
88
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
89
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
90
           })
Q
qijun 已提交
91
      .def("alloc_float",
Y
Yu Yang 已提交
92
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
93
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
94 95
           })
      .def("alloc_int",
Y
Yu Yang 已提交
96
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
97
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
98
           })
Q
qijun 已提交
99
      .def("alloc_int",
Y
Yu Yang 已提交
100
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
101
             self.mutable_data<int>(place);
Q
qijun 已提交
102
           })
Y
Yu Yang 已提交
103 104
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
Q
qijun 已提交
105
#ifndef PADDLE_ONLY_CPU
Y
Yu Yang 已提交
106 107
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
Q
qijun 已提交
108
#endif
Y
Yu Yang 已提交
109
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); });
Y
Yu Yang 已提交
110

111
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
112 113 114

All parameter, weight, gradient are variables in Paddle.
)DOC")
115
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
116
      .def("set_int",
117 118
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
Y
Yu Yang 已提交
119
      .def("get_tensor",
120
           [](Variable &self) -> Tensor * { return self.GetMutable<Tensor>(); },
Y
Yan Chunwei 已提交
121 122
           py::return_value_policy::reference)
      .def("get_net",
Y
Yan Chunwei 已提交
123 124 125
           [](Variable &self) -> ops::NetOp * {
             return self.GetMutable<ops::NetOp>();
           },
Y
Yu Yang 已提交
126
           py::return_value_policy::reference);
127

128
  py::class_<Scope>(m, "Scope", "")
Y
Yu Yang 已提交
129
      .def("new_var",
130
           [](Scope &self, const std::string &name) -> Variable * {
Y
Yu Yang 已提交
131 132
             return self.NewVar(name);
           },
133
           py::return_value_policy::reference)
134
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
135
      .def(py::init<>())
136
      .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
137
           py::return_value_policy::reference)
138
      .def("drop_kids", &Scope::DropKids);
139

Y
Yu Yang 已提交
140 141
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
142
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
143
    auto &protos = OpRegistry::protos();
Y
Yu Yang 已提交
144
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
145
    for (auto it = protos.begin(); it != protos.end(); ++it) {
Y
Yu Yang 已提交
146 147
      PADDLE_ENFORCE(it->second.IsInitialized(),
                     "OpProto must all be initialized");
Y
Yu Yang 已提交
148 149
      std::string str;
      PADDLE_ENFORCE(it->second.SerializeToString(&str),
Y
Yu Yang 已提交
150
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
151
      ret_values.push_back(py::bytes(str));
Y
Yu Yang 已提交
152 153 154
    }
    return ret_values;
  });
155 156 157
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yu Yang 已提交
158 159
      .def("empty", OperatorBase::EMPTY_VAR_NAME)
      .def("temp", OperatorBase::TMP_VAR_NAME);
Q
qijun 已提交
160
  // clang-format off
Y
Yu Yang 已提交
161
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
162 163
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
164
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
165 166 167 168 169
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
170
#ifdef PADDLE_ONLY_CPU
Q
qijun 已提交
171
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
172
#else
Q
qijun 已提交
173
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
174
#endif
Q
qijun 已提交
175
                  });
Q
qijun 已提交
176
  // clang-format on
Q
qijun 已提交
177

Q
qijun 已提交
178
  py::class_<paddle::platform::GPUPlace>(m, "GPUPlace").def(py::init<int>());
Q
qijun 已提交
179

Q
qijun 已提交
180
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace").def(py::init<>());
Y
Yu Yang 已提交
181

182
  py::class_<OperatorBase, std::shared_ptr<OperatorBase>> operator_base(
Y
Yu Yang 已提交
183
      m, "Operator");
Y
Yu Yang 已提交
184

Y
Yu Yang 已提交
185
  operator_base.def_static("create", [](py::bytes protobin) {
186
    OpDesc desc;
Y
Yu Yang 已提交
187 188 189 190 191
    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                   "Cannot parse user input to OpDesc");
    PADDLE_ENFORCE(desc.IsInitialized(),
                   "User OpDesc is not initialized, reason %s",
                   desc.InitializationErrorString());
192
    return OpRegistry::CreateOp(desc);
Y
Yu Yang 已提交
193
  });
Q
Qiao Longfei 已提交
194 195

  operator_base.def("backward",
Y
Yu Yang 已提交
196
                    [](const OperatorBase &forwardOp,
197
                       const std::unordered_set<std::string> &no_grad_vars) {
Y
Yu Yang 已提交
198
                      return Backward(forwardOp, no_grad_vars);
Q
Qiao Longfei 已提交
199 200
                    });

Y
Yu Yang 已提交
201 202
  ExposeOperator(operator_base);

Y
Yan Chunwei 已提交
203
  py::class_<ops::NetOp, std::shared_ptr<ops::NetOp>> net(m, "Net");
Y
Yu Yang 已提交
204 205

  net.def_static("create",
Y
Yan Chunwei 已提交
206 207
                 []() -> std::shared_ptr<ops::NetOp> {
                   auto retv = std::make_shared<ops::NetOp>();
Y
Yu Yang 已提交
208
                   retv->type_ = "plain_net";
Y
Yu Yang 已提交
209 210
                   return retv;
                 })
Y
Yan Chunwei 已提交
211 212 213 214 215 216 217
      .def("add_op", &ops::NetOp::AddOp)
      .def(
          "add_op",
          [](ops::NetOp &self, const std::shared_ptr<ops::NetOp> &net) -> void {
            self.AddOp(std::static_pointer_cast<OperatorBase>(net));
          })
      .def("complete_add_op", &ops::NetOp::CompleteAddOp)
Y
Yu Yang 已提交
218
      .def("complete_add_op",
Y
Yan Chunwei 已提交
219 220
           [](std::shared_ptr<ops::NetOp> &self) { self->CompleteAddOp(); });

Y
Yu Yang 已提交
221
  ExposeOperator(net);
Y
Yu Yang 已提交
222

223 224
  m.def("unique_integer", UniqueIntegerGenerator);

Q
qijun 已提交
225 226
  m.def("is_compile_gpu", IsCompileGPU);

227
  return m.ptr();
L
Luo Tao 已提交
228
}
229 230
}  // namespace framework
}  // namespace paddle