pybind.cc 8.5 KB
Newer Older
1 2 3 4 5 6
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

L
Luo Tao 已提交
15
#include <Python.h>
Y
Yu Yang 已提交
16
#include <fstream>
Y
Yu Yang 已提交
17
#include <vector>
18

Q
Qiao Longfei 已提交
19
#include "paddle/framework/backward.h"
Y
Yu Yang 已提交
20
#include "paddle/framework/op_registry.h"
21
#include "paddle/framework/tensor_py.h"
Y
Yan Chunwei 已提交
22
#include "paddle/operators/net_op.h"
Q
qijun 已提交
23
#include "paddle/platform/enforce.h"
Q
qijun 已提交
24
#include "paddle/platform/place.h"
25
#include "paddle/string/to_string.h"
Y
Yu Yang 已提交
26 27 28 29
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"

30 31
namespace py = pybind11;

Y
Yu Yang 已提交
32
USE_OP(add_two);
33
USE_OP_CPU(onehot_cross_entropy);
Y
Yu Yang 已提交
34
USE_OP_WITHOUT_KERNEL(fc);
Q
Qiao Longfei 已提交
35
USE_OP(sgd);
Q
qijun 已提交
36
USE_OP(mul);
L
liaogang 已提交
37
USE_OP(mean);
Q
qijun 已提交
38 39 40
USE_OP(sigmoid);
USE_OP(softmax);
USE_OP(rowwise_add);
F
fengjiayi 已提交
41
USE_OP(fill_zeros_like);
Y
Yan Chunwei 已提交
42
USE_OP_WITHOUT_KERNEL(recurrent_op);
Y
Yu Yang 已提交
43
USE_OP(uniform_random);
44 45
namespace paddle {
namespace framework {
D
dongzhihong 已提交
46 47 48

using Tensor = framework::Tensor;

Y
Yu Yang 已提交
49
template <typename ClassType>
50
void ExposeOperator(ClassType &m) {
Y
Yu Yang 已提交
51 52
  m.def("infer_shape", &ClassType::type::InferShape)
      .def("run", &ClassType::type::Run)
Q
Qiao Longfei 已提交
53
      .def("type",
54
           [](const typename ClassType::type &op) -> std::string {
Q
Qiao Longfei 已提交
55 56
             return op.type_;
           })
Y
Yu Yang 已提交
57
      .def("outputs",
Y
Yu Yang 已提交
58 59
           [](const typename ClassType::type &op)
               -> std::unordered_map<std::string, std::vector<std::string>> {
Q
qingqing01 已提交
60 61
             return op.outputs_;
           })
Y
Yu Yang 已提交
62 63
      .def("__str__", &ClassType::type::DebugString);
}
Y
Yu Yang 已提交
64

65 66 67 68 69
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
70 71 72 73 74 75 76 77
bool IsCompileGPU() {
#ifdef PADDLE_ONLY_CPU
  return false;
#else
  return true;
#endif
}

78
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
79
  py::module m("core", "C++ core of PaddlePaddle");
80

81 82 83
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
Yu Yang 已提交
84
      .def("get_dims",
85
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
86
      .def("set_dims",
87 88
           [](Tensor &self, const std::vector<int> &dim) {
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
89 90
           })
      .def("alloc_float",
Y
Yu Yang 已提交
91
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
92
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
93
           })
Q
qijun 已提交
94
      .def("alloc_float",
Y
Yu Yang 已提交
95
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
96
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
97 98
           })
      .def("alloc_int",
Y
Yu Yang 已提交
99
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
100
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
101
           })
Q
qijun 已提交
102
      .def("alloc_int",
Y
Yu Yang 已提交
103
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
104
             self.mutable_data<int>(place);
Q
qijun 已提交
105
           })
Y
Yu Yang 已提交
106 107
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
Q
qijun 已提交
108
#ifndef PADDLE_ONLY_CPU
Y
Yu Yang 已提交
109 110
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
Q
qijun 已提交
111
#endif
112
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
113
      .def("set_float_element",
114
           [](Tensor &self, size_t offset, float f) {
Y
Yu Yang 已提交
115
             // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
116 117
             self.data<float>()[offset] = f;
           })
118
      .def("get_float_element", [](Tensor &self, size_t offset) -> float {
Y
Yu Yang 已提交
119
        // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
120 121
        return self.data<float>()[offset];
      });
Y
Yu Yang 已提交
122

123
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
124 125 126

All parameter, weight, gradient are variables in Paddle.
)DOC")
127
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
128
      .def("set_int",
129 130
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
Y
Yu Yang 已提交
131
      .def("get_tensor",
132
           [](Variable &self) -> Tensor * { return self.GetMutable<Tensor>(); },
Y
Yan Chunwei 已提交
133 134
           py::return_value_policy::reference)
      .def("get_net",
D
dongzhihong 已提交
135 136
           [](Variable &self) -> operators::NetOp * {
             return self.GetMutable<operators::NetOp>();
Y
Yan Chunwei 已提交
137
           },
Y
Yu Yang 已提交
138
           py::return_value_policy::reference);
139

140
  py::class_<Scope>(m, "Scope", "")
Y
Yu Yang 已提交
141
      .def("new_var",
142
           [](Scope &self, const std::string &name) -> Variable * {
Y
Yu Yang 已提交
143 144
             return self.NewVar(name);
           },
145
           py::return_value_policy::reference)
146
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
147
      .def(py::init<>())
148
      .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
149
           py::return_value_policy::reference)
150
      .def("drop_kids", &Scope::DropKids);
151

Y
Yu Yang 已提交
152 153
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
154
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
Q
qingqing01 已提交
155
    auto &protos = OpProtos();
Y
Yu Yang 已提交
156
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
157
    for (auto it = protos.begin(); it != protos.end(); ++it) {
Y
Yu Yang 已提交
158 159
      PADDLE_ENFORCE(it->second.IsInitialized(),
                     "OpProto must all be initialized");
Y
Yu Yang 已提交
160 161
      std::string str;
      PADDLE_ENFORCE(it->second.SerializeToString(&str),
Y
Yu Yang 已提交
162
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
163
      ret_values.push_back(py::bytes(str));
Y
Yu Yang 已提交
164 165 166
    }
    return ret_values;
  });
167 168 169
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yi Wang 已提交
170 171
      .def("empty", []() { return kEmptyVarName; })
      .def("temp", []() { return kTempVarName; });
Q
qijun 已提交
172
  // clang-format off
Y
Yu Yang 已提交
173
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
174 175
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
176
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
177 178 179 180 181
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
182
#ifdef PADDLE_ONLY_CPU
Q
qijun 已提交
183
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
184
#else
Q
qijun 已提交
185
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
186
#endif
Q
qijun 已提交
187
                  });
Q
qijun 已提交
188
  // clang-format on
Q
qijun 已提交
189

190 191 192
  py::class_<platform::GPUPlace>(m, "GPUPlace")
      .def(py::init<int>())
      .def("__str__", string::to_string<const platform::GPUPlace &>);
Q
qijun 已提交
193

194 195 196
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CPUPlace &>);
Y
Yu Yang 已提交
197

198
  py::class_<OperatorBase, std::shared_ptr<OperatorBase>> operator_base(
Y
Yu Yang 已提交
199
      m, "Operator");
Y
Yu Yang 已提交
200

Y
Yu Yang 已提交
201
  operator_base.def_static("create", [](py::bytes protobin) {
202
    OpDesc desc;
Y
Yu Yang 已提交
203 204 205 206 207
    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                   "Cannot parse user input to OpDesc");
    PADDLE_ENFORCE(desc.IsInitialized(),
                   "User OpDesc is not initialized, reason %s",
                   desc.InitializationErrorString());
208
    return OpRegistry::CreateOp(desc);
Y
Yu Yang 已提交
209
  });
Q
Qiao Longfei 已提交
210 211

  operator_base.def("backward",
Y
Yu Yang 已提交
212
                    [](const OperatorBase &forwardOp,
213
                       const std::unordered_set<std::string> &no_grad_vars) {
Y
Yu Yang 已提交
214
                      return Backward(forwardOp, no_grad_vars);
Q
Qiao Longfei 已提交
215 216
                    });

Y
Yu Yang 已提交
217 218
  ExposeOperator(operator_base);

D
dongzhihong 已提交
219
  py::class_<operators::NetOp, std::shared_ptr<operators::NetOp>> net(m, "Net");
Y
Yu Yang 已提交
220 221

  net.def_static("create",
D
dongzhihong 已提交
222 223
                 []() -> std::shared_ptr<operators::NetOp> {
                   auto retv = std::make_shared<operators::NetOp>();
Y
Yu Yang 已提交
224
                   retv->type_ = "plain_net";
Y
Yu Yang 已提交
225 226
                   return retv;
                 })
D
dongzhihong 已提交
227 228 229 230 231 232 233 234 235 236
      .def("add_op", &operators::NetOp::AddOp)
      .def("add_op",
           [](operators::NetOp &self,
              const std::shared_ptr<operators::NetOp> &net) -> void {
             self.AddOp(std::static_pointer_cast<OperatorBase>(net));
           })
      .def("complete_add_op", &operators::NetOp::CompleteAddOp)
      .def("complete_add_op", [](std::shared_ptr<operators::NetOp> &self) {
        self->CompleteAddOp();
      });
Y
Yan Chunwei 已提交
237

Y
Yu Yang 已提交
238
  ExposeOperator(net);
Y
Yu Yang 已提交
239

240 241
  m.def("unique_integer", UniqueIntegerGenerator);

Q
qijun 已提交
242 243
  m.def("is_compile_gpu", IsCompileGPU);

244
  return m.ptr();
L
Luo Tao 已提交
245
}
246 247
}  // namespace framework
}  // namespace paddle