pybind.cc 17.2 KB
Newer Older
1 2 3 4 5 6
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/pybind/protobuf.h"
16

Q
Qiao Longfei 已提交
17
#include "paddle/framework/backward.h"
F
fengjiayi 已提交
18
#include "paddle/framework/executor.h"
D
dangqingqing 已提交
19
#include "paddle/framework/lod_tensor.h"
Q
qijun 已提交
20
#include "paddle/framework/selected_rows.h"
21
#include "paddle/framework/tensor_array.h"
Z
zchen0211 已提交
22
#include "paddle/operators/cond_op.h"
23
#include "paddle/operators/dynamic_recurrent_op.h"
Y
Yan Chunwei 已提交
24
#include "paddle/operators/net_op.h"
Y
Yan Chunwei 已提交
25
#include "paddle/operators/recurrent_op.h"
Q
qijun 已提交
26
#include "paddle/platform/enforce.h"
Q
qijun 已提交
27
#include "paddle/platform/place.h"
Y
Yu Yang 已提交
28
#include "paddle/pybind/exception.h"
L
Luo Tao 已提交
29
#include "paddle/pybind/pybind.h"
30
#include "paddle/pybind/tensor_py.h"
31
#include "paddle/string/to_string.h"
32

33
namespace paddle {
34
namespace pybind {
35 36 37 38 39
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
40
bool IsCompileGPU() {
41
#ifndef PADDLE_WITH_CUDA
Q
qijun 已提交
42 43 44 45 46 47
  return false;
#else
  return true;
#endif
}

48
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
49
  py::module m("core", "C++ core of PaddlePaddle");
50

51 52 53 54
  // using framework in this function. Since it is inside a function, it will
  // not cause namespace pollution.
  using namespace paddle::framework;  // NOLINT

Y
Yu Yang 已提交
55 56
  BindException(m);

57 58 59
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
Yu Yang 已提交
60
      .def("get_dims",
61
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
62
      .def("set_dims",
Q
qijun 已提交
63
           [](Tensor &self, const std::vector<int64_t> &dim) {
Y
Yu Yang 已提交
64
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
65 66
           })
      .def("alloc_float",
Y
Yu Yang 已提交
67
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
68
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
69
           })
Q
qijun 已提交
70
      .def("alloc_float",
Y
Yu Yang 已提交
71
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
72
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
73 74
           })
      .def("alloc_int",
Y
Yu Yang 已提交
75
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
76
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
77
           })
Q
qijun 已提交
78
      .def("alloc_int",
Y
Yu Yang 已提交
79
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
80
             self.mutable_data<int>(place);
Q
qijun 已提交
81
           })
Y
Yu Yang 已提交
82 83
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
84
      .def("set", PyCPUTensorSetFromArray<double>)
85
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
86 87
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
88
      .def("set", PyCUDATensorSetFromArray<double>)
Q
qijun 已提交
89
#endif
90
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
91 92 93 94 95
      .def("set_float_element", TensorSetElement<float>)
      .def("get_float_element", TensorGetElement<float>)
      .def("set_double_element", TensorSetElement<double>)
      .def("get_double_element", TensorGetElement<double>)
      .def("dtype", [](Tensor &self) { return ToDataType(self.type()); });
Y
Yu Yang 已提交
96

97
  py::class_<LoDTensor, Tensor>(m, "LoDTensor")
98 99
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
100 101 102
      .def(
          "__init__",
          [](LoDTensor &instance, const std::vector<std::vector<size_t>> &lod) {
103
#ifndef PADDLE_WITH_CUDA
104
            new (&instance) LoDTensor(lod);
105
#else
Y
Yu Yang 已提交
106
             LoD new_lod;
107 108
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
109
             new (&instance) LoDTensor(new_lod);
110
#endif
111
          })
D
dangqingqing 已提交
112
      .def("set_lod",
113
           [](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) {
114
#ifndef PADDLE_WITH_CUDA
D
dangqingqing 已提交
115
             self.set_lod(lod);
116
#else
Y
Yu Yang 已提交
117
             LoD new_lod;
118 119 120 121
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             self.set_lod(new_lod);
#endif
D
dangqingqing 已提交
122
           })
123
      .def("lod", [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
124
#ifndef PADDLE_WITH_CUDA
D
dangqingqing 已提交
125
        return self.lod();
126 127 128 129 130
#else
           auto lod = self.lod();
           std::vector<std::vector<size_t>> new_lod;
           new_lod.reserve(lod.size());
           std::transform(lod.begin(), lod.end(), std::back_inserter(new_lod),
Y
Yu Yang 已提交
131
               [](Vector<size_t> item) ->
132 133 134 135 136 137 138 139
                   std::vector<size_t> {
                 std::vector<size_t> v;
                 v.reserve(item.size());
                 std::copy(item.begin(), item.end(), std::back_inserter(v));
                 return v;
               });
           return new_lod;
#endif
D
dangqingqing 已提交
140 141
      });

Q
qijun 已提交
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
  py::class_<SelectedRows>(m, "SelectedRows")
      .def("__init__",
           [](SelectedRows &instance) { new (&instance) SelectedRows(); })
      .def("__init__",
           [](SelectedRows &instance, const std::vector<int64_t> rows,
              const int64_t &height) {
             new (&instance) SelectedRows(rows, height);
           })
      .def("get_tensor",
           [](SelectedRows &self) { return self.mutable_value(); },
           py::return_value_policy::reference)
      .def("set_height", &SelectedRows::set_height)
      .def("height", &SelectedRows::height)
      .def("set_rows", &SelectedRows::set_rows)
      .def("rows", &SelectedRows::rows, py::return_value_policy::reference);

158
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
159 160 161

All parameter, weight, gradient are variables in Paddle.
)DOC")
162
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
163
      .def("set_int",
164 165
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
166 167 168 169 170 171 172
      .def("is_float", [](const Variable &var) { return var.IsType<float>(); })
      .def("set_float",
           [](Variable &var, float val) -> void {
             *var.GetMutable<float>() = val;
           })
      .def("get_float",
           [](const Variable &var) -> float { return var.Get<float>(); })
Y
Yu Yang 已提交
173
      .def("get_tensor",
174 175
           [](Variable &self) -> LoDTensor * {
             return self.GetMutable<LoDTensor>();
D
dangqingqing 已提交
176 177
           },
           py::return_value_policy::reference)
Y
Yan Chunwei 已提交
178
      .def("get_net",
D
dongzhihong 已提交
179 180
           [](Variable &self) -> operators::NetOp * {
             return self.GetMutable<operators::NetOp>();
Y
Yan Chunwei 已提交
181
           },
Y
Yu Yang 已提交
182
           py::return_value_policy::reference);
183

184
  py::class_<Scope>(m, "Scope", "")
D
dongzhihong 已提交
185
      .def("var",
186
           [](Scope &self, const std::string &name) -> Variable * {
D
dongzhihong 已提交
187
             return self.Var(name);
Y
Yu Yang 已提交
188
           },
189
           py::return_value_policy::reference)
190
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
191
      .def(py::init<>())
192
      .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
193
           py::return_value_policy::reference)
194
      .def("drop_kids", &Scope::DropKids);
195

Y
Yu Yang 已提交
196 197
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
198 199
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
200 201 202 203

    OpInfoMap::Instance().IterAllInfo([&ret_values](const std::string &type,
                                                    const OpInfo &info) {
      if (!info.HasOpProtoAndChecker()) return;
Y
Yu Yang 已提交
204
      std::string str;
Y
Yu Yang 已提交
205
      PADDLE_ENFORCE(info.Proto().SerializeToString(&str),
Y
Yu Yang 已提交
206
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
207 208
      ret_values.emplace_back(str);
    });
Y
Yu Yang 已提交
209 210
    return ret_values;
  });
211 212 213
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yi Wang 已提交
214 215
      .def("empty", []() { return kEmptyVarName; })
      .def("temp", []() { return kTempVarName; });
Q
qijun 已提交
216
  // clang-format off
Y
Yu Yang 已提交
217
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
218 219
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
220
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
221 222 223 224 225
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
226
#ifndef PADDLE_WITH_CUDA
Q
qijun 已提交
227
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
228
#else
Q
qijun 已提交
229
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
230
#endif
Q
qijun 已提交
231
                  });
Q
qijun 已提交
232
  // clang-format on
Q
qijun 已提交
233

234 235 236
  py::class_<platform::GPUPlace>(m, "GPUPlace")
      .def(py::init<int>())
      .def("__str__", string::to_string<const platform::GPUPlace &>);
Q
qijun 已提交
237

238 239 240
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CPUPlace &>);
Y
Yu Yang 已提交
241

Y
Yu Yang 已提交
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
  py::class_<OperatorBase>(m, "Operator")
      .def_static("create",
                  [](py::bytes protobin) {
                    OpDesc desc;
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
                    return OpRegistry::CreateOp(desc);
                  })
      .def("backward",
           [](const OperatorBase &forwardOp,
              const std::unordered_set<std::string> &no_grad_vars) {
             return Backward(forwardOp, no_grad_vars).release();
           })
258
      .def("run",
259
           [](OperatorBase &self, const Scope &scope,
260 261 262 263
              const platform::DeviceContext &dev_ctx) {
             self.Run(scope, dev_ctx);
             dev_ctx.Wait();
           })
Y
Yu Yang 已提交
264 265 266 267 268 269 270
      .def("type",
           [](const OperatorBase &op) -> std::string { return op.Type(); })
      .def("outputs",
           [](const OperatorBase &op)
               -> std::map<std::string, std::vector<std::string>> {
                 return op.Outputs();
               })
Q
qijun 已提交
271 272
      .def("output_vars",
           [](const OperatorBase &op) { return op.OutputVars(true); })
Y
Yu Yang 已提交
273
      .def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
Q
qijun 已提交
274
      .def("input_vars", [](const OperatorBase &op) { return op.InputVars(); })
Y
Yu Yang 已提交
275 276 277 278
      .def("__str__", &OperatorBase::DebugString)
      .def("no_intermediate_outputs",
           [](const OperatorBase &op) { return op.OutputVars(false); })
      .def("support_gpu", &OperatorBase::SupportGPU);
Y
Yu Yang 已提交
279

Y
Yu Yang 已提交
280 281 282 283 284 285 286
  py::class_<operators::NetOp, OperatorBase>(m, "Net")
      .def_static("create",
                  []() -> operators::NetOp * {
                    auto *retv = new operators::NetOp;
                    retv->SetType("plain_net");
                    return retv;
                  })
287 288
      .def("append_op", [](operators::NetOp &self,
                           const OperatorBase &op) { self.AppendOp(op); })
D
dongzhihong 已提交
289 290 291 292
      .def("complete_add_op", &operators::NetOp::CompleteAddOp)
      .def("complete_add_op", [](std::shared_ptr<operators::NetOp> &self) {
        self->CompleteAddOp();
      });
Y
Yan Chunwei 已提交
293

294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
  py::class_<framework::TensorArray>(m, "TensorArray")
      .def("__init__",
           [](TensorArray &instance) { new (&instance) TensorArray(); })
      .def("read",
           [](TensorArray &self, size_t index) { return self.Read(index); })
      .def("write", [](TensorArray &self, size_t index,
                       LoDTensor &value) { self.Write(index, value); })
      .def("write_shared",
           [](TensorArray &self, size_t index, const LoDTensor &value) {
             self.WriteShared(index, value);
           })
      .def("size", [](TensorArray &self) { return self.size(); })
      .def("pack",
           [](TensorArray &self, size_t level,
              const std::vector<std::vector<size_t>> &meta_info,
              const std::vector<std::vector<size_t>> &lod) {
             std::vector<DySeqMeta> meta;
             for (auto &info : meta_info) {
               PADDLE_ENFORCE_EQ(info.size(), 3UL);
               meta.emplace_back(info[0], info[1], info[2]);
             }
#ifndef PADDLE_WITH_CUDA
             return self.Pack(level, meta, lod);
#else
             LoD new_lod;
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             return self.Pack(level, meta, new_lod);
#endif
           })
      .def("unpack",
           [](TensorArray &self, const LoDTensor &source, int level,
              bool length_descend) {
             auto metas = self.Unpack(source, level, length_descend);
             std::vector<std::vector<size_t>> meta_info;
             for (auto meta : metas) {
               meta_info.emplace_back(
                   std::vector<size_t>({meta.begin, meta.end, meta.ori_idx}));
             }
             return meta_info;
           })
      .def("stack", [](TensorArray &self) { return self.Stack(); })
      .def("unstack",
           [](TensorArray &self, const LoDTensor &source) {
             return self.Unstack(source);
           })
      .def("unstack_shared", [](TensorArray &self, const LoDTensor &source) {
        return self.UnstackShared(source);
      });

Y
Yan Chunwei 已提交
344
  // recurrent_op
Y
Yu Yang 已提交
345 346 347 348 349 350 351 352 353 354 355 356 357
  py::class_<operators::RecurrentOp, OperatorBase>(m, "RecurrentOp")
      .def_static(
          "create",
          [](py::bytes protobin) -> operators::RecurrentOp * {
            OpDesc desc;
            PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                           "Cannot parse user input to OpDesc");
            PADDLE_ENFORCE(desc.IsInitialized(),
                           "User OpDesc is not initialized, reason %s",
                           desc.InitializationErrorString());
            auto rnn_op = OpRegistry::CreateOp(desc);
            return static_cast<operators::RecurrentOp *>(rnn_op.release());
          })
358 359 360 361
      .def("set_stepnet", [](operators::RecurrentOp &self,
                             const operators::NetOp &net) -> void {
        self.set_stepnet(net.Clone());
      });
Y
Yan Chunwei 已提交
362

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
  py::class_<operators::DynamicRecurrentOp, OperatorBase>(m,
                                                          "DynamicRecurrentOp")
      .def_static("create",
                  [](py::bytes protobin) -> operators::DynamicRecurrentOp * {
                    OpDesc desc;
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
                    auto rnn_op = OpRegistry::CreateOp(desc);
                    return static_cast<operators::DynamicRecurrentOp *>(
                        rnn_op.release());
                  })
      .def("set_stepnet",
           [](operators::DynamicRecurrentOp &self, const operators::NetOp &net)
               -> void { self.SetStepNet(net.Clone()); })
      .def("get_state",
           [](operators::DynamicRecurrentOp &self, const std::string &name)
               -> const TensorArray & { return self.state(name); })
      .def("get_step_input",
           [](operators::DynamicRecurrentOp &self, const std::string &name)
               -> const TensorArray & { return self.step_input(name); })
      .def("get_step_output",
           [](operators::DynamicRecurrentOp &self, const std::string &name)
               -> const TensorArray & { return self.step_output(name); });

Z
cond op  
zchen0211 已提交
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
  // cond_op
  py::class_<operators::CondOp, OperatorBase>(m, "CondOp")
      .def_static("create",
                  [](py::bytes protobin) -> operators::CondOp * {
                    OpDesc desc;
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
                    auto cond_op = OpRegistry::CreateOp(desc);
                    return static_cast<operators::CondOp *>(cond_op.release());
                  })
      .def("set_truenet",
           [](operators::CondOp &self, const operators::NetOp &net) -> void {
             self.set_truenet(net.Clone());
           })
      .def("set_falsenet",
           [](operators::CondOp &self, const operators::NetOp &net) -> void {
             self.set_falsenet(net.Clone());
           });

F
fengjiayi 已提交
412 413 414 415 416 417 418 419
  py::class_<framework::Executor>(m, "Executor")
      .def(py::init<std::vector<platform::Place> &>())
      .def("run",
           [](Executor &self, const ProgramDesc &program_desc, int block_id) {
             framework::Scope &global_scope = GetGlobalScope();
             self.Run(program_desc, &global_scope, block_id);
           });

420 421
  m.def("unique_integer", UniqueIntegerGenerator);

Q
qijun 已提交
422 423
  m.def("is_compile_gpu", IsCompileGPU);

F
fengjiayi 已提交
424 425 426 427
  BindProgramDesc(m);
  BindBlockDesc(m);
  BindVarDsec(m);
  BindOpDesc(m);
Y
Yu Yang 已提交
428

429
  return m.ptr();
L
Luo Tao 已提交
430
}
431
}  // namespace pybind
432
}  // namespace paddle