pybind.cc 12.3 KB
Newer Older
1 2 3 4 5 6
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

L
Luo Tao 已提交
15
#include <Python.h>
Y
Yu Yang 已提交
16
#include <fstream>
Y
Yu Yang 已提交
17
#include <vector>
18

Q
Qiao Longfei 已提交
19
#include "paddle/framework/backward.h"
D
dangqingqing 已提交
20
#include "paddle/framework/lod_tensor.h"
Y
Yu Yang 已提交
21
#include "paddle/framework/op_registry.h"
Z
zchen0211 已提交
22
#include "paddle/operators/cond_op.h"
Y
Yan Chunwei 已提交
23
#include "paddle/operators/net_op.h"
Y
Yan Chunwei 已提交
24
#include "paddle/operators/recurrent_op.h"
Q
qijun 已提交
25
#include "paddle/platform/enforce.h"
Q
qijun 已提交
26
#include "paddle/platform/place.h"
Y
Yu Yang 已提交
27
#include "paddle/pybind/exception.h"
L
Luo Tao 已提交
28
#include "paddle/pybind/pybind.h"
29
#include "paddle/pybind/tensor_py.h"
30
#include "paddle/string/to_string.h"
Y
Yu Yang 已提交
31 32 33 34
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"

35 36
namespace py = pybind11;

37
namespace paddle {
38
namespace pybind {
39 40 41 42 43
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
44 45 46 47 48 49 50 51
bool IsCompileGPU() {
#ifdef PADDLE_ONLY_CPU
  return false;
#else
  return true;
#endif
}

52
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
53
  py::module m("core", "C++ core of PaddlePaddle");
54

55 56 57 58
  // using framework in this function. Since it is inside a function, it will
  // not cause namespace pollution.
  using namespace paddle::framework;  // NOLINT

Y
Yu Yang 已提交
59 60
  BindException(m);

61 62 63
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
Yu Yang 已提交
64
      .def("get_dims",
65
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
66
      .def("set_dims",
Q
qijun 已提交
67
           [](Tensor &self, const std::vector<int64_t> &dim) {
Y
Yu Yang 已提交
68
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
69 70
           })
      .def("alloc_float",
Y
Yu Yang 已提交
71
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
72
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
73
           })
Q
qijun 已提交
74
      .def("alloc_float",
Y
Yu Yang 已提交
75
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
76
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
77 78
           })
      .def("alloc_int",
Y
Yu Yang 已提交
79
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
80
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
81
           })
Q
qijun 已提交
82
      .def("alloc_int",
Y
Yu Yang 已提交
83
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
84
             self.mutable_data<int>(place);
Q
qijun 已提交
85
           })
Y
Yu Yang 已提交
86 87
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
Q
qijun 已提交
88
#ifndef PADDLE_ONLY_CPU
Y
Yu Yang 已提交
89 90
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
Q
qijun 已提交
91
#endif
92
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
93
      .def("set_float_element",
94
           [](Tensor &self, size_t offset, float f) {
Y
Yu Yang 已提交
95
             // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
96 97
             self.data<float>()[offset] = f;
           })
98
      .def("get_float_element", [](Tensor &self, size_t offset) -> float {
Y
Yu Yang 已提交
99
        // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
100 101
        return self.data<float>()[offset];
      });
Y
Yu Yang 已提交
102

103
  py::class_<LoDTensor, Tensor>(m, "LoDTensor")
104 105
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
106 107 108
      .def(
          "__init__",
          [](LoDTensor &instance, const std::vector<std::vector<size_t>> &lod) {
109
#ifdef PADDLE_ONLY_CPU
110
            new (&instance) LoDTensor(lod);
111
#else
Y
Yu Yang 已提交
112
             LoD new_lod;
113 114
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
115
             new (&instance) LoDTensor(new_lod);
116
#endif
117
          })
D
dangqingqing 已提交
118
      .def("set_lod",
119
           [](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) {
120
#ifdef PADDLE_ONLY_CPU
D
dangqingqing 已提交
121
             self.set_lod(lod);
122
#else
Y
Yu Yang 已提交
123
             LoD new_lod;
124 125 126 127
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             self.set_lod(new_lod);
#endif
D
dangqingqing 已提交
128
           })
129 130
      .def("lod", [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
#ifdef PADDLE_ONLY_CPU
D
dangqingqing 已提交
131
        return self.lod();
132 133 134 135 136
#else
           auto lod = self.lod();
           std::vector<std::vector<size_t>> new_lod;
           new_lod.reserve(lod.size());
           std::transform(lod.begin(), lod.end(), std::back_inserter(new_lod),
Y
Yu Yang 已提交
137
               [](Vector<size_t> item) ->
138 139 140 141 142 143 144 145
                   std::vector<size_t> {
                 std::vector<size_t> v;
                 v.reserve(item.size());
                 std::copy(item.begin(), item.end(), std::back_inserter(v));
                 return v;
               });
           return new_lod;
#endif
D
dangqingqing 已提交
146 147
      });

148
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
149 150 151

All parameter, weight, gradient are variables in Paddle.
)DOC")
152
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
153
      .def("set_int",
154 155
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
Y
Yu Yang 已提交
156
      .def("get_tensor",
157 158
           [](Variable &self) -> LoDTensor * {
             return self.GetMutable<LoDTensor>();
D
dangqingqing 已提交
159 160
           },
           py::return_value_policy::reference)
Y
Yan Chunwei 已提交
161
      .def("get_net",
D
dongzhihong 已提交
162 163
           [](Variable &self) -> operators::NetOp * {
             return self.GetMutable<operators::NetOp>();
Y
Yan Chunwei 已提交
164
           },
Y
Yu Yang 已提交
165
           py::return_value_policy::reference);
166

167
  py::class_<Scope>(m, "Scope", "")
Y
Yu Yang 已提交
168
      .def("new_var",
169
           [](Scope &self, const std::string &name) -> Variable * {
Y
Yu Yang 已提交
170 171
             return self.NewVar(name);
           },
172
           py::return_value_policy::reference)
173
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
174
      .def(py::init<>())
175 176
      .def("new_scope",
           [](Scope &self) -> Scope * { return &self.NewScope(); },
177
           py::return_value_policy::reference)
178
      .def("drop_kids", &Scope::DropKids);
179

Y
Yu Yang 已提交
180 181
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
182 183
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
184 185 186 187

    OpInfoMap::Instance().IterAllInfo([&ret_values](const std::string &type,
                                                    const OpInfo &info) {
      if (!info.HasOpProtoAndChecker()) return;
Y
Yu Yang 已提交
188
      std::string str;
Y
Yu Yang 已提交
189
      PADDLE_ENFORCE(info.Proto().SerializeToString(&str),
Y
Yu Yang 已提交
190
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
191 192
      ret_values.emplace_back(str);
    });
Y
Yu Yang 已提交
193 194
    return ret_values;
  });
195 196 197
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yi Wang 已提交
198 199
      .def("empty", []() { return kEmptyVarName; })
      .def("temp", []() { return kTempVarName; });
Q
qijun 已提交
200
  // clang-format off
Y
Yu Yang 已提交
201
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
202 203
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
204
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
205 206 207 208 209
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
210
#ifdef PADDLE_ONLY_CPU
Q
qijun 已提交
211
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
212
#else
Q
qijun 已提交
213
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
214
#endif
Q
qijun 已提交
215
                  });
Q
qijun 已提交
216
  // clang-format on
Q
qijun 已提交
217

218 219 220
  py::class_<platform::GPUPlace>(m, "GPUPlace")
      .def(py::init<int>())
      .def("__str__", string::to_string<const platform::GPUPlace &>);
Q
qijun 已提交
221

222 223 224
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CPUPlace &>);
Y
Yu Yang 已提交
225

Y
Yu Yang 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
  py::class_<OperatorBase>(m, "Operator")
      .def_static("create",
                  [](py::bytes protobin) {
                    OpDesc desc;
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
                    return OpRegistry::CreateOp(desc);
                  })
      .def("backward",
           [](const OperatorBase &forwardOp,
              const std::unordered_set<std::string> &no_grad_vars) {
             return Backward(forwardOp, no_grad_vars).release();
           })
      .def("infer_shape", &OperatorBase::InferShape)
243 244 245 246 247 248 249
      .def("run",
           [](OperatorBase &self,
              const Scope &scope,
              const platform::DeviceContext &dev_ctx) {
             self.Run(scope, dev_ctx);
             dev_ctx.Wait();
           })
Y
Yu Yang 已提交
250 251 252 253 254 255 256
      .def("type",
           [](const OperatorBase &op) -> std::string { return op.Type(); })
      .def("outputs",
           [](const OperatorBase &op)
               -> std::map<std::string, std::vector<std::string>> {
                 return op.Outputs();
               })
Q
qijun 已提交
257 258
      .def("output_vars",
           [](const OperatorBase &op) { return op.OutputVars(true); })
Y
Yu Yang 已提交
259
      .def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
Q
qijun 已提交
260
      .def("input_vars", [](const OperatorBase &op) { return op.InputVars(); })
Y
Yu Yang 已提交
261 262 263 264
      .def("__str__", &OperatorBase::DebugString)
      .def("no_intermediate_outputs",
           [](const OperatorBase &op) { return op.OutputVars(false); })
      .def("support_gpu", &OperatorBase::SupportGPU);
Y
Yu Yang 已提交
265

Y
Yu Yang 已提交
266 267 268 269 270 271 272
  py::class_<operators::NetOp, OperatorBase>(m, "Net")
      .def_static("create",
                  []() -> operators::NetOp * {
                    auto *retv = new operators::NetOp;
                    retv->SetType("plain_net");
                    return retv;
                  })
273 274 275 276
      .def("append_op",
           [](operators::NetOp &self, const OperatorBase &op) {
             self.AppendOp(op);
           })
D
dongzhihong 已提交
277 278 279 280
      .def("complete_add_op", &operators::NetOp::CompleteAddOp)
      .def("complete_add_op", [](std::shared_ptr<operators::NetOp> &self) {
        self->CompleteAddOp();
      });
Y
Yan Chunwei 已提交
281

Y
Yan Chunwei 已提交
282
  // recurrent_op
Y
Yu Yang 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295
  py::class_<operators::RecurrentOp, OperatorBase>(m, "RecurrentOp")
      .def_static(
          "create",
          [](py::bytes protobin) -> operators::RecurrentOp * {
            OpDesc desc;
            PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                           "Cannot parse user input to OpDesc");
            PADDLE_ENFORCE(desc.IsInitialized(),
                           "User OpDesc is not initialized, reason %s",
                           desc.InitializationErrorString());
            auto rnn_op = OpRegistry::CreateOp(desc);
            return static_cast<operators::RecurrentOp *>(rnn_op.release());
          })
296 297 298
      .def("set_stepnet",
           [](operators::RecurrentOp &self, const operators::NetOp &net)
               -> void { self.set_stepnet(net.Clone()); });
Y
Yan Chunwei 已提交
299

Z
cond op  
zchen0211 已提交
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
  // cond_op
  py::class_<operators::CondOp, OperatorBase>(m, "CondOp")
      .def_static("create",
                  [](py::bytes protobin) -> operators::CondOp * {
                    OpDesc desc;
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
                    auto cond_op = OpRegistry::CreateOp(desc);
                    return static_cast<operators::CondOp *>(cond_op.release());
                  })
      .def("set_truenet",
           [](operators::CondOp &self, const operators::NetOp &net) -> void {
             self.set_truenet(net.Clone());
           })
      .def("set_falsenet",
           [](operators::CondOp &self, const operators::NetOp &net) -> void {
             self.set_falsenet(net.Clone());
           });

322 323
  m.def("unique_integer", UniqueIntegerGenerator);

Q
qijun 已提交
324 325
  m.def("is_compile_gpu", IsCompileGPU);

326
  return m.ptr();
L
Luo Tao 已提交
327
}
328
}  // namespace pybind
329
}  // namespace paddle