pybind.cc 56.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
L
lgone2000 已提交
14
#include <Python.h>
C
chengduoZH 已提交
15 16
#include <algorithm>
#include <map>
S
sneaxiy 已提交
17
#include <memory>
C
chengduoZH 已提交
18 19 20 21 22
#include <mutex>  // NOLINT // for call_once
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
23

Y
Yi Wang 已提交
24 25 26
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/framework.pb.h"
S
sneaxiy 已提交
27
#include "paddle/fluid/framework/garbage_collector.h"
28
#include "paddle/fluid/framework/ir/pass_builder.h"
Y
Yi Wang 已提交
29 30 31
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
32
#include "paddle/fluid/framework/op_registry.h"
Y
Yu Yang 已提交
33
#include "paddle/fluid/framework/parallel_executor.h"
Y
Yi Wang 已提交
34
#include "paddle/fluid/framework/prune.h"
Y
Refine  
Yu Yang 已提交
35
#include "paddle/fluid/framework/reader.h"
S
sneaxiy 已提交
36
#include "paddle/fluid/framework/scope_pool.h"
Y
Yi Wang 已提交
37
#include "paddle/fluid/framework/selected_rows.h"
X
Xin Pan 已提交
38
#include "paddle/fluid/framework/version.h"
39
#include "paddle/fluid/imperative/layer.h"
M
minqiyang 已提交
40
#include "paddle/fluid/imperative/profiler.h"
Y
Refine  
Yu Yang 已提交
41
#include "paddle/fluid/memory/allocation/allocator_strategy.h"
42
#include "paddle/fluid/memory/allocation/legacy_allocator.h"
D
dzhwinter 已提交
43
#include "paddle/fluid/operators/activation_op.h"
S
sneaxiy 已提交
44
#include "paddle/fluid/operators/py_func_op.h"
S
sneaxiy 已提交
45
#include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h"
Y
Yu Yang 已提交
46
#include "paddle/fluid/platform/cpu_info.h"
Y
Yi Wang 已提交
47
#include "paddle/fluid/platform/enforce.h"
48
#include "paddle/fluid/platform/init.h"
Y
Yi Wang 已提交
49 50
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler.h"
W
Wang Guibao 已提交
51
#include "paddle/fluid/pybind/async_executor_py.h"
Y
Yi Wang 已提交
52 53
#include "paddle/fluid/pybind/const_value.h"
#include "paddle/fluid/pybind/exception.h"
54
#include "paddle/fluid/pybind/imperative.h"
F
flame 已提交
55
#include "paddle/fluid/pybind/inference_api.h"
F
flame 已提交
56
#include "paddle/fluid/pybind/ir.h"
57 58
#include "paddle/fluid/pybind/protobuf.h"
#include "paddle/fluid/pybind/pybind.h"  // NOLINT
S
sneaxiy 已提交
59
#include "paddle/fluid/pybind/reader_py.h"
Y
Yu Yang 已提交
60
#include "paddle/fluid/pybind/recordio.h"
Y
Yi Wang 已提交
61
#include "paddle/fluid/pybind/tensor_py.h"
Y
Yu Yang 已提交
62

63
#include "paddle/fluid/string/to_string.h"
64

D
Dong Zhihong 已提交
65
#ifdef PADDLE_WITH_CUDA
P
peizhilin 已提交
66
#ifndef _WIN32
Y
Yi Wang 已提交
67
#include "paddle/fluid/operators/nccl/nccl_gpu_common.h"
P
peizhilin 已提交
68
#endif
Y
Yi Wang 已提交
69 70
#include "paddle/fluid/platform/cuda_profiler.h"
#include "paddle/fluid/platform/gpu_info.h"
D
Dong Zhihong 已提交
71 72
#endif

M
minqiyang 已提交
73 74
#include "pybind11/stl.h"

75 76 77 78
DEFINE_bool(reader_queue_speed_test_mode, false,
            "If set true, the queue.pop will only get data from queue but not "
            "remove the data from queue for speed testing");

S
sneaxiy 已提交
79 80 81 82
DECLARE_double(eager_delete_tensor_gb);
DECLARE_bool(fast_eager_deletion_mode);
DECLARE_double(memory_fraction_of_eager_deletion);

Q
Qiao Longfei 已提交
83 84 85
// disable auto conversion to list in Python
PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray);

86
namespace paddle {
87
namespace pybind {
88
bool IsCompiledWithCUDA() {
89
#ifndef PADDLE_WITH_CUDA
Q
qijun 已提交
90 91 92 93 94 95
  return false;
#else
  return true;
#endif
}

96 97 98 99 100 101 102 103
bool IsCompiledWithMKLDNN() {
#ifndef PADDLE_WITH_MKLDNN
  return false;
#else
  return true;
#endif
}

104 105 106 107 108 109 110 111
bool IsCompiledWithNGRAPH() {
#ifndef PADDLE_WITH_NGRAPH
  return false;
#else
  return true;
#endif
}

112
bool IsCompiledWithBrpc() {
113
#ifndef PADDLE_WITH_DISTRIBUTE
114 115
  return false;
#endif
116 117 118 119 120 121

#ifdef PADDLE_WITH_GRPC
  return false;
#endif

  return true;
122 123
}

Y
update  
Yancey1989 已提交
124
bool IsCompiledWithDIST() {
Y
Yancey1989 已提交
125
#ifdef PADDLE_WITH_DISTRIBUTE
Y
update  
Yancey1989 已提交
126 127 128 129 130 131
  return true;
#else
  return false;
#endif
}

S
sneaxiy 已提交
132 133 134 135 136
template <typename PlaceType1, typename PlaceType2>
static inline bool IsSamePlace(const PlaceType1 &p1, const PlaceType2 &p2) {
  return paddle::platform::Place(p1) == paddle::platform::Place(p2);
}

S
sneaxiy 已提交
137 138 139 140 141
template <typename PlaceType>
static inline int PlaceIndex(const PlaceType &p) {
  return static_cast<int>(paddle::platform::Place(p).which());
}

142
PYBIND11_MODULE(core, m) {
Y
Yu Yang 已提交
143 144 145
  // Not used, just make sure cpu_info.cc is linked.
  paddle::platform::CpuTotalPhysicalMemory();

Y
Refine  
Yu Yang 已提交
146
  paddle::memory::allocation::UseAllocatorStrategyGFlag();
S
sneaxiy 已提交
147 148
  paddle::framework::UseGarbageCollectorGFlags();

149
  m.doc() = "C++ core of PaddlePaddle";
150

151 152 153 154
  // using framework in this function. Since it is inside a function, it will
  // not cause namespace pollution.
  using namespace paddle::framework;  // NOLINT

155
  BindException(&m);
Y
Yu Yang 已提交
156

S
sneaxiy 已提交
157
  m.def(
S
sneaxiy 已提交
158
      "_append_python_callable_object_and_return_id",
S
sneaxiy 已提交
159 160 161 162
      [](py::object py_obj) -> size_t {
        return paddle::operators::AppendPythonCallableObjectAndReturnId(py_obj);
      });

S
sneaxiy 已提交
163 164 165 166 167 168 169 170 171 172
  // NOTE(zjl): ctest would load environment variables at the beginning even
  // though we have not `import paddle.fluid as fluid`. So we add this API
  // to enable eager deletion mode in unittest.
  m.def("_set_eager_deletion_mode",
        [](double threshold, double fraction, bool fast_mode) {
          FLAGS_eager_delete_tensor_gb = threshold;
          FLAGS_memory_fraction_of_eager_deletion = fraction;
          FLAGS_fast_eager_deletion_mode = fast_mode;
        });

S
sneaxiy 已提交
173 174 175
  m.add_object("_cleanup",
               py::capsule([]() { ScopePool::Instance().Clear(); }));

176 177 178 179 180 181 182
  m.def("get_mem_usage", [](int device) {
    return memory::allocation::GPUMemMonitor.GetMemUsage(device);
  });

  m.def("print_mem_usage",
        []() { return memory::allocation::GPUMemMonitor.PrintMemUsage(); });

M
minqiyang 已提交
183
  m.def("start_imperative_gperf_profiler",
M
minqiyang 已提交
184 185
        []() { imperative::StartProfile(); });

M
minqiyang 已提交
186
  m.def("stop_imperative_gperf_profiler", []() { imperative::StopProfile(); });
M
minqiyang 已提交
187

M
minqiyang 已提交
188
  py::class_<imperative::VarBase>(m, "VarBase", R"DOC()DOC")
189 190 191 192 193 194 195 196
      .def(
          py::init<const std::string &, paddle::framework::proto::VarType::Type,
                   const std::vector<int64_t>, const paddle::platform::CPUPlace,
                   bool, bool>())
      .def(
          py::init<const std::string &, paddle::framework::proto::VarType::Type,
                   const std::vector<int64_t>,
                   const paddle::platform::CUDAPlace, bool, bool>())
197
      .def("_run_backward",
X
Xin Pan 已提交
198
           [](imperative::VarBase &self) { self.RunBackward(); })
M
minqiyang 已提交
199
      .def("_grad_name", &imperative::VarBase::GradName)
M
minqiyang 已提交
200
      .def("_grad_value", &imperative::VarBase::GradValue)
X
Xin Pan 已提交
201
      .def("_clear_gradient", &imperative::VarBase::ClearGradient)
M
minqiyang 已提交
202
      .def("_grad_ivar",
M
minqiyang 已提交
203
           [](const imperative::VarBase &self) { return self.grads_; },
M
minqiyang 已提交
204
           py::return_value_policy::reference)
M
minqiyang 已提交
205
      .def("_copy_to",
P
Paddle CI 已提交
206
           [](const imperative::VarBase &self, const platform::CPUPlace &place,
M
minqiyang 已提交
207 208 209 210 211
              bool blocking) {
             std::unique_ptr<imperative::VarBase> new_var =
                 self.NewVarBase(place, blocking);
             return new_var.release();
           },
P
Paddle CI 已提交
212
           py::return_value_policy::take_ownership)
M
minqiyang 已提交
213
      .def("_copy_to",
P
Paddle CI 已提交
214
           [](const imperative::VarBase &self, const platform::CUDAPlace &place,
M
minqiyang 已提交
215 216 217 218 219
              bool blocking) {
             std::unique_ptr<imperative::VarBase> new_var =
                 self.NewVarBase(place, blocking);
             return new_var.release();
           },
M
minqiyang 已提交
220
           py::return_value_policy::take_ownership)
M
minqiyang 已提交
221
      .def("value", [](const imperative::VarBase &self) { return self.var_; },
M
minqiyang 已提交
222
           py::return_value_policy::reference)
223 224 225
      .def_property("name", &imperative::VarBase::Name,
                    &imperative::VarBase::SetName)
      .def_property_readonly("shape", &imperative::VarBase::Shape)
M
minqiyang 已提交
226
      .def_property_readonly("dtype", &imperative::VarBase::DataType)
227 228 229 230
      .def_property("persistable", &imperative::VarBase::IsPersistable,
                    &imperative::VarBase::SetPersistable)
      .def_property("stop_gradient", &imperative::VarBase::IsStopGradient,
                    &imperative::VarBase::SetStopGradient);
231

232
  py::class_<imperative::OpBase, PyOpBase>(m, "OpBase", R"DOC()DOC")
233
      .def(py::init<const std::string &>())
234 235 236 237
      .def("register_backward_hooks",
           [](imperative::OpBase &self, const py::object &callable) {
             self.RegisterBackwardHooks(callable);
           })
M
minqiyang 已提交
238 239 240 241 242 243 244 245 246 247
      .def_property("_trace_id",
                    [](const imperative::OpBase &self) {
                      pybind11::gil_scoped_release release;
                      return self.trace_id_;
                    },
                    [](imperative::OpBase &self, int trace_id) {
                      pybind11::gil_scoped_release release;
                      self.trace_id_ = trace_id;
                    },
                    py::return_value_policy::reference)
X
Xin Pan 已提交
248 249 250 251 252 253
      .def_property(
          "forward_id",
          [](const imperative::OpBase &self) { return self.forward_id_; },
          [](imperative::OpBase &self, int forward_id) {
            self.forward_id_ = forward_id;
          },
X
Xin Pan 已提交
254 255 256 257 258 259 260
          py::return_value_policy::reference)
      .def_property(
          "backward_id",
          [](const imperative::OpBase &self) { return self.backward_id_; },
          [](imperative::OpBase &self, int backward_id) {
            self.backward_id_ = backward_id;
          },
261 262
          py::return_value_policy::reference);

X
Xin Pan 已提交
263
  py::class_<imperative::Layer, Layer /* <--- trampoline*/> layer(m, "Layer");
264
  layer.def(py::init<>())
X
Xin Pan 已提交
265 266 267
      .def("forward", [](imperative::Layer &self,
                         const std::vector<imperative::VarBase> &inputs) {
        return self.Forward(inputs);
X
Xin Pan 已提交
268
      });
X
Xin Pan 已提交
269

X
polish  
Xin Pan 已提交
270
  py::class_<imperative::PyLayer>(m, "PyLayer")
X
Xin Pan 已提交
271
      .def(py::init<>())
X
Xin Pan 已提交
272 273
      .def_static(
          "apply",
X
Xin Pan 已提交
274
          [](int func_id, const std::vector<imperative::VarBase *> &inputs)
X
Xin Pan 已提交
275
              -> std::vector<imperative::VarBase *> {
276 277 278 279 280 281 282 283 284 285 286
                auto ret_vars = imperative::PyLayer::Apply(func_id, inputs);
                std::vector<imperative::VarBase *> outputs;
                outputs.reserve(ret_vars.size());
                for (size_t i = 0U; i != ret_vars.size(); ++i) {
                  framework::Variable *v = ret_vars[i];
                  // TODO(minqiyang): use unique_name generator to set a name
                  outputs.emplace_back(
                      new imperative::VarBase("", v, nullptr, true));
                }

                return outputs;
X
Xin Pan 已提交
287 288
              },
          py::return_value_policy::take_ownership)
X
polish  
Xin Pan 已提交
289 290 291 292 293
      .def_static("register_func",
                  [](int func_id, const py::object &callable) {
                    imperative::PyLayer::RegisterFunc(func_id, callable);
                  })
      .def_static("num_funcs", &imperative::PyLayer::NumFuncs);
X
Xin Pan 已提交
294

295 296
  BindTracer(&m);

297 298 299
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
S
sneaxiy 已提交
300 301
      .def("_is_initialized",
           [](const Tensor &self) { return self.IsInitialized(); })
Y
yuyang18 已提交
302
      .def("_get_dims",
303
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
yuyang18 已提交
304
      .def("_set_dims",
Q
qijun 已提交
305
           [](Tensor &self, const std::vector<int64_t> &dim) {
Y
Yu Yang 已提交
306
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
307
           })
Y
yuyang18 已提交
308
      .def("_set_layout",
D
dzhwinter 已提交
309 310 311
           [](Tensor &self, const std::string &layout) {
             self.set_layout(StringToDataLayout(layout));
           })
Y
yuyang18 已提交
312
      .def("_alloc_float",
D
dzhwinter 已提交
313
           [](Tensor &self, paddle::platform::CUDAPlace &place) {
Q
qijun 已提交
314
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
315
           })
Y
yuyang18 已提交
316
      .def("_alloc_float",
Y
Yu Yang 已提交
317
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
318
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
319
           })
Y
yuyang18 已提交
320
      .def("_alloc_int",
Y
Yu Yang 已提交
321
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
322
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
323
           })
Y
yuyang18 已提交
324
      .def("_alloc_int",
D
dzhwinter 已提交
325
           [](Tensor &self, paddle::platform::CUDAPlace &place) {
Q
qijun 已提交
326
             self.mutable_data<int>(place);
Q
qijun 已提交
327
           })
Y
yuyang18 已提交
328
      .def("_alloc_int",
C
chengduoZH 已提交
329 330 331
           [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) {
             self.mutable_data<int>(place);
           })
Y
yuyang18 已提交
332
      .def("_alloc_float",
C
chengduoZH 已提交
333 334 335
           [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) {
             self.mutable_data<float>(place);
           })
Y
Yu Yang 已提交
336 337
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
338
      .def("set", PyCPUTensorSetFromArray<double>)
339
      .def("set", PyCPUTensorSetFromArray<int64_t>)
Y
Yu Yang 已提交
340
      .def("set", PyCPUTensorSetFromArray<bool>)
341
      .def("set", PyCPUTensorSetFromArray<uint16_t>)
F
fengjiayi 已提交
342
      .def("set", PyCPUTensorSetFromArray<uint8_t>)
Q
qingqing01 已提交
343
      .def("set", PyCPUTensorSetFromArray<int8_t>)
344
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
345 346
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
347
      .def("set", PyCUDATensorSetFromArray<double>)
348
      .def("set", PyCUDATensorSetFromArray<int64_t>)
Y
Yu Yang 已提交
349
      .def("set", PyCUDATensorSetFromArray<bool>)
350
      .def("set", PyCUDATensorSetFromArray<uint16_t>)
F
fengjiayi 已提交
351
      .def("set", PyCUDATensorSetFromArray<uint8_t>)
Q
qingqing01 已提交
352
      .def("set", PyCUDATensorSetFromArray<int8_t>)
C
chengduoZH 已提交
353 354 355 356 357 358
      .def("set", PyCUDAPinnedTensorSetFromArray<float>)
      .def("set", PyCUDAPinnedTensorSetFromArray<int>)
      .def("set", PyCUDAPinnedTensorSetFromArray<double>)
      .def("set", PyCUDAPinnedTensorSetFromArray<int64_t>)
      .def("set", PyCUDAPinnedTensorSetFromArray<bool>)
      .def("set", PyCUDAPinnedTensorSetFromArray<uint16_t>)
F
fengjiayi 已提交
359
      .def("set", PyCUDAPinnedTensorSetFromArray<uint8_t>)
Q
qingqing01 已提交
360
      .def("set", PyCUDAPinnedTensorSetFromArray<int8_t>)
Q
qijun 已提交
361
#endif
362
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
Y
yuyang18 已提交
363 364 365 366
      .def("_set_float_element", TensorSetElement<float>)
      .def("_get_float_element", TensorGetElement<float>)
      .def("_set_double_element", TensorSetElement<double>)
      .def("_get_double_element", TensorGetElement<double>)
X
xuezhong 已提交
367
      .def("_place", [](Tensor &self) { return self.place(); })
Y
Yu Yang 已提交
368
      .def("_dtype", [](Tensor &self) { return self.type(); });
Y
Yu Yang 已提交
369

X
Xin Pan 已提交
370 371 372 373 374 375 376 377 378 379 380 381 382
  py::class_<LoDTensor, Tensor>(m, "LoDTensor", R"DOC(
    LoDTensor is a Tensor with optional LoD information.

    np.array(lod_tensor) can convert LoDTensor to numpy array.
    lod_tensor.lod() can retrieve the LoD information.

    LoD is short for Level of Details and is usually used for varied sequence
    length. You can skip the following comment if you don't need optional LoD.

  For example:
     A LoDTensor X can look like the example below. It contains 2 sequences.
     The first has length 2 and the second has length 3, as described by x.lod.

X
fix doc  
Xin Pan 已提交
383
     The first tensor dimension 5=2+3 is calculated from LoD if it's available.
X
Xin Pan 已提交
384
     It means the total number of sequence element. In X, each element has 2
X
fix doc  
Xin Pan 已提交
385
     columns, hence [5, 2].
X
Xin Pan 已提交
386 387 388

      x.lod  = [[2, 3]]
      x.data = [[1, 2], [3, 4],
X
fix doc  
Xin Pan 已提交
389 390
                [5, 6], [7, 8], [9, 10]]
      x.shape = [5, 2]
X
Xin Pan 已提交
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413

      LoD can have multiple levels (for example, a paragraph can have multiple
      sentences and a sentence can have multiple words). In the following
      LodTensor Y, the lod_level is 2. It means there are 2 sequence, the
      first sequence length is 2 (has 2 sub-sequences), the second one's
      length is 1. The first sequence's 2 sub-sequences have length 2 and 2,
      respectively. And the second sequence's 1 sub-sequence has length 3.

      y.lod = [[2 1], [2 2 3]]
      y.shape = [2+2+3, ...]

  Note:
      In above description, LoD is length-based. In Paddle internal
      implementation, lod is offset-based. Hence, internally,
      y.lod is represented as [[0, 2, 3], [0, 2, 4, 7]] (length-based
      equivlent would be [[2-0, 3-2], [2-0, 4-2, 7-4]]).

      Sometimes LoD is called recursive_sequence_length to be more
      self-explanatory. In this case, it must be length-based. Due to history
      reasons. when LoD is called lod in public API, it might be offset-based.
      Users should be careful about it.

        )DOC")
414 415
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
416 417 418 419 420 421 422 423 424 425 426 427 428 429
      .def("__init__",
           [](LoDTensor &instance, const std::vector<std::vector<size_t>>
                                       &recursive_sequence_lengths) {
             LoD new_lod;
             new_lod.reserve(recursive_sequence_lengths.size());
             std::copy(recursive_sequence_lengths.begin(),
                       recursive_sequence_lengths.end(),
                       std::back_inserter(new_lod));
             LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
             PADDLE_ENFORCE(
                 CheckLoD(new_offset_lod, -1),
                 "the provided recursive_sequence_lengths info is invalid");
             new (&instance) LoDTensor(new_offset_lod);
           })
Y
Yu Yang 已提交
430
      .def("__init__", [](LoDTensor &instance) { new (&instance) LoDTensor(); })
G
gongweibao 已提交
431 432 433 434 435
      // We implement offset based LOD in C++ while we use length based with
      // Python API. So we changed set_lod to set_recursive_sequence_lengths to
      // avoid misuse.
      // The discussion is here:
      // https://github.com/PaddlePaddle/Paddle/issues/10855
D
dangqingqing 已提交
436
      .def("set_lod",
437
           [](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) {
438
             // the input lod is offset-based level-of-detail info
Y
Yu Yang 已提交
439
             LoD new_lod;
440 441
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
442 443
             PADDLE_ENFORCE(CheckLoD(new_lod, vectorize(self.dims()).front()),
                            "the provided lod info is invalid");
444
             self.set_lod(new_lod);
S
sneaxiy 已提交
445 446 447 448 449 450 451
           },
           py::arg("lod"), R"DOC(
           Set LoD of the LoDTensor.

           Args:
               lod (List[List[int]]): the lod to be set.
           )DOC")
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
      .def("set_recursive_sequence_lengths",
           [](LoDTensor &self, const std::vector<std::vector<size_t>>
                                   &recursive_sequence_lengths) {
             // the input recursive_sequence_lengths is length-based
             // level-of-detail info
             LoD new_lod;
             new_lod.reserve(recursive_sequence_lengths.size());
             std::copy(recursive_sequence_lengths.begin(),
                       recursive_sequence_lengths.end(),
                       std::back_inserter(new_lod));
             LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
             PADDLE_ENFORCE(
                 CheckLoD(new_offset_lod, vectorize(self.dims()).front()),
                 "the provided recursive_sequence_lengths info is invalid");
             self.set_lod(new_offset_lod);
S
sneaxiy 已提交
467 468 469 470
           },
           py::arg("recursive_sequence_lengths"), R"DOC(
           Set LoD of the LoDTensor according to recursive sequence length.

S
sneaxiy 已提交
471
           For example, if recursive_sequence_lengths=[[2, 3]], meaning that
472 473
           there are two sequences with length 2 and 3 respectively, the
           corresponding lod would be [[0, 2, 2+3]], i.e, [[0, 2, 5]].
S
sneaxiy 已提交
474 475

           Args:
476
                recursive_sequence_lengths (List[List[int]]): sequence lengths.
S
sneaxiy 已提交
477
           )DOC")
478 479 480 481 482 483 484 485
      .def("lod",
           [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
             // output the offset-based lod info
             LoD lod = self.lod();
             std::vector<std::vector<size_t>> new_lod;
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             return new_lod;
S
sneaxiy 已提交
486 487 488 489 490 491 492
           },
           R"DOC(
           Return the LoD of the LoDTensor.

           Returns:
               out (List[List[int]]): the lod of the LoDTensor.
           )DOC")
G
gongweibao 已提交
493
      // Set above comments of set_lod.
494 495 496 497 498 499 500 501
      .def("recursive_sequence_lengths",
           [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
             // output the length-based lod info
             LoD lod = ConvertToLengthBasedLoD(self.lod());
             std::vector<std::vector<size_t>> new_lod;
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             return new_lod;
S
sneaxiy 已提交
502 503 504 505 506
           },
           R"DOC(
           Return the sequence length of the LoDTensor corresponding to LoD.

           Returns:
507
               out (List[List[int]): the sequence lengths.
S
sneaxiy 已提交
508 509 510 511 512 513 514 515 516 517 518 519 520
           )DOC")
      .def("has_valid_recursive_sequence_lengths",
           [](LoDTensor &self) -> bool {
             // Check that the lod info is valid and match the outermost
             // dimension of the LoDTensor data
             return CheckLoD(self.lod(), vectorize(self.dims()).front());
           },
           R"DOC(
           Check whether the lod of the LoDTensor is valid.

           Returns:
               out (bool): whether the lod is valid.
           )DOC");
D
dangqingqing 已提交
521

Q
qijun 已提交
522 523 524 525 526 527 528 529 530 531 532
  py::class_<SelectedRows>(m, "SelectedRows")
      .def("__init__",
           [](SelectedRows &instance) { new (&instance) SelectedRows(); })
      .def("__init__",
           [](SelectedRows &instance, const std::vector<int64_t> rows,
              const int64_t &height) {
             new (&instance) SelectedRows(rows, height);
           })
      .def("get_tensor",
           [](SelectedRows &self) { return self.mutable_value(); },
           py::return_value_policy::reference)
533 534
      .def("numel",
           [](SelectedRows &self) -> int64_t { return self.value().numel(); })
Q
qijun 已提交
535 536
      .def("set_height", &SelectedRows::set_height)
      .def("height", &SelectedRows::height)
Q
qijun 已提交
537 538 539 540 541 542 543 544 545
      .def("set_rows",
           [](SelectedRows &self, std::vector<int64_t> rows) {
#ifndef PADDLE_WITH_CUDA
             self.set_rows(rows);
#else
        Vector<int64_t> new_rows(rows);
        self.set_rows(new_rows);
#endif
           })
546
      .def("sync_index", [](SelectedRows &instance) { instance.SyncIndex(); })
547
      .def("rows", [](SelectedRows &self) {
548 549 550 551 552
        auto rows = self.rows();
        std::vector<int64_t> new_rows;
        new_rows.reserve(rows.size());
        std::copy(rows.begin(), rows.end(), std::back_inserter(new_rows));
        return new_rows;
553
      });
Q
qijun 已提交
554

555
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
556 557 558

All parameter, weight, gradient are variables in Paddle.
)DOC")
S
sneaxiy 已提交
559
      .def(py::init<>())
560
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
561
      .def("set_int",
562 563
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
564 565 566 567 568 569 570
      .def("is_float", [](const Variable &var) { return var.IsType<float>(); })
      .def("set_float",
           [](Variable &var, float val) -> void {
             *var.GetMutable<float>() = val;
           })
      .def("get_float",
           [](const Variable &var) -> float { return var.Get<float>(); })
Y
Yu Yang 已提交
571
      .def("get_tensor",
572 573
           [](Variable &self) -> LoDTensor * {
             return self.GetMutable<LoDTensor>();
D
dangqingqing 已提交
574 575
           },
           py::return_value_policy::reference)
Y
Yu Yang 已提交
576 577 578
      .def("get_lod_rank_table",
           [](Variable &self) { return self.GetMutable<LoDRankTable>(); },
           py::return_value_policy::reference)
Q
qijun 已提交
579 580 581 582 583
      .def("get_selected_rows",
           [](Variable &self) -> SelectedRows * {
             return self.GetMutable<SelectedRows>();
           },
           py::return_value_policy::reference)
Y
Yu Yang 已提交
584 585 586
      .def("get_lod_tensor_array",
           [](Variable &self) { return self.GetMutable<LoDTensorArray>(); },
           py::return_value_policy::reference)
P
peizhilin 已提交
587
#if (defined(PADDLE_WITH_CUDA) && !defined(_WIN32))
D
Dong Zhihong 已提交
588 589 590 591 592
      .def("get_communicator",
           [](Variable &self) -> platform::Communicator * {
             return self.GetMutable<platform::Communicator>();
           },
           py::return_value_policy::reference)
Y
Yu Yang 已提交
593
#endif
Y
Refine  
Yu Yang 已提交
594 595 596 597 598
      .def("get_reader",
           [](Variable &self) -> framework::ReaderHolder * {
             PADDLE_ENFORCE(self.IsType<framework::ReaderHolder>());
             return self.GetMutable<framework::ReaderHolder>();
           },
W
wopeizl 已提交
599
           py::return_value_policy::reference);
600

S
sneaxiy 已提交
601
  BindReader(&m);
Y
Refine  
Yu Yang 已提交
602

S
sneaxiy 已提交
603 604 605 606
  using LoDTensorBlockingQueue =
      ::paddle::operators::reader::LoDTensorBlockingQueue;
  using LoDTensorBlockingQueueHolder =
      ::paddle::operators::reader::LoDTensorBlockingQueueHolder;
S
sneaxiy 已提交
607

S
sneaxiy 已提交
608 609
  py::class_<LoDTensorBlockingQueue, std::shared_ptr<LoDTensorBlockingQueue>>(
      m, "LoDTensorBlockingQueue", "")
S
sneaxiy 已提交
610
      .def("push",
S
sneaxiy 已提交
611
           [](LoDTensorBlockingQueue &self,
S
sneaxiy 已提交
612
              const std::vector<framework::LoDTensor> &lod_tensor_vec) {
S
sneaxiy 已提交
613
             pybind11::gil_scoped_release release;
S
sneaxiy 已提交
614
             return self.Push(lod_tensor_vec);
S
sneaxiy 已提交
615
           })
S
sneaxiy 已提交
616 617 618 619
      .def("size", &LoDTensorBlockingQueue::Size)
      .def("capacity", &LoDTensorBlockingQueue::Cap)
      .def("close", &LoDTensorBlockingQueue::Close)
      .def("is_closed", &LoDTensorBlockingQueue::IsClosed);
S
sneaxiy 已提交
620

S
sneaxiy 已提交
621
  m.def("init_lod_tensor_blocking_queue",
Q
Qiao Longfei 已提交
622 623 624 625 626 627
        [](Variable &var,
           size_t capacity) -> std::shared_ptr<LoDTensorBlockingQueue> {
          auto *holder = var.GetMutable<LoDTensorBlockingQueueHolder>();
          holder->InitOnce(capacity, FLAGS_reader_queue_speed_test_mode);
          return holder->GetQueue();
        },
S
sneaxiy 已提交
628
        py::return_value_policy::copy);
S
sneaxiy 已提交
629

S
sneaxiy 已提交
630
  py::class_<Scope>(m, "_Scope", R"DOC(
Q
Qiao Longfei 已提交
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
    Scope is an association of a name to Variable. All variables belong to Scope.

    Variables in a parent scope can be retrieved from local scope.

    You need to specify a scope to run a Net, i.e., `exe.Run(&scope)`.
    One net can run in different scopes and update different variable in the
    scope.

    You can create var in a scope and get it from the scope.

    Examples:
        .. code-block:: python

          # create tensor from a scope and set value to it.
          param = scope.var('Param').get_tensor()
          param_array = np.full((height, row_numel), 5.0).astype("float32")
          param.set(param_array, place)

        )DOC")
S
sneaxiy 已提交
650 651
      .def("_remove_from_pool",
           [](Scope &self) { ScopePool::Instance().Remove(&self); })
D
dongzhihong 已提交
652
      .def("var",
653
           [](Scope &self, const std::string &name) -> Variable * {
D
dongzhihong 已提交
654
             return self.Var(name);
Y
Yu Yang 已提交
655
           },
S
sneaxiy 已提交
656 657
           py::arg("name"),
           R"DOC(
658
           Find or create variable named :code:`name` in the current scope.
S
sneaxiy 已提交
659

660
           If the variable named :code:`name` does not exist in the
S
sneaxiy 已提交
661
           current scope, the variable would be created. Otherwise,
662
           return the existing variable.
S
sneaxiy 已提交
663 664

           Args:
665 666
               name (str): the variable name.

S
sneaxiy 已提交
667
           Returns:
668
               out (core.Variable): the found or created variable.
S
sneaxiy 已提交
669 670 671 672
           )DOC",
           py::return_value_policy::reference)
      .def("find_var", &Scope::FindVar, py::arg("name"),
           R"DOC(
673
           Find variable named :code:`name` in the current scope or
S
sneaxiy 已提交
674
           its parent scope. Return None if not found.
675

S
sneaxiy 已提交
676 677
           Args:
               name (str): the variable name.
678

S
sneaxiy 已提交
679
           Returns:
680
               out (core.Variable|None): the found variable or None.
S
sneaxiy 已提交
681
           )DOC",
682
           py::return_value_policy::reference)
683
      .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
S
sneaxiy 已提交
684 685 686 687 688 689
           R"DOC(
           Create a new sub-scope of the current scope.

           Returns:
               out (core._Scope): the created sub-scope.
           )DOC",
690
           py::return_value_policy::reference)
S
sneaxiy 已提交
691 692 693
      .def("drop_kids", &Scope::DropKids,
           R"DOC(
           Delete all sub-scopes of the current scope.
S
sneaxiy 已提交
694 695
           )DOC")
      .def("_kids", &Scope::kids);
696

S
sneaxiy 已提交
697 698 699 700 701 702
  m.def("Scope",
        []() -> Scope * {
          auto *s = new Scope();
          ScopePool::Instance().Insert(std::unique_ptr<Scope>(s));
          return s;
        },
S
sneaxiy 已提交
703 704
        R"DOC(
        Create a new scope.
705

S
sneaxiy 已提交
706 707 708
        Returns:
            out (core._Scope): the created scope.
        )DOC",
S
sneaxiy 已提交
709 710
        py::return_value_policy::reference);

Y
Yu Yang 已提交
711 712
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
713 714
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
    std::vector<py::bytes> ret_values;
715 716 717 718 719 720 721 722 723 724
    for (auto &iter : OpInfoMap::Instance().map()) {
      auto &info = iter.second;
      if (info.HasOpProtoAndChecker()) {
        std::string str;
        PADDLE_ENFORCE(
            info.Proto().SerializeToString(&str),
            "Serialize OpProto Error. This could be a bug of Paddle.");
        ret_values.emplace_back(str);
      }
    }
Y
Yu Yang 已提交
725 726
    return ret_values;
  });
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
  m.def(
      "get_grad_op_desc", [](const OpDesc &op_desc,
                             const std::unordered_set<std::string> &no_grad_set,
                             const std::vector<BlockDesc *> &grad_sub_block) {
        std::unordered_map<std::string, std::string> grad_to_var;
        std::vector<std::unique_ptr<OpDesc>> grad_op_descs =
            framework::OpInfoMap::Instance()
                .Get(op_desc.Type())
                .GradOpMaker()(op_desc, no_grad_set, &grad_to_var,
                               grad_sub_block);
        std::vector<OpDesc *> grad_op_desc_ptrs(grad_op_descs.size());
        std::transform(grad_op_descs.begin(), grad_op_descs.end(),
                       grad_op_desc_ptrs.begin(),
                       [](std::unique_ptr<OpDesc> &p) { return p.release(); });
        return std::make_pair(grad_op_desc_ptrs, grad_to_var);
      });
Y
Yu Yang 已提交
743
  m.def("prune", [](const ProgramDesc &origin,
744
                    const std::vector<std::array<size_t, 2>> &targets) {
Y
Yu Yang 已提交
745
    ProgramDesc prog_with_targets(origin);
746
    for (const auto &t : targets) {
747
      prog_with_targets.MutableBlock(t[0])->Op(t[1])->SetIsTarget(true);
748
    }
749
    proto::ProgramDesc pruned_desc;
750
    Prune(*prog_with_targets.Proto(), &pruned_desc);
Y
Yu Yang 已提交
751
    return new ProgramDesc(pruned_desc);
752
  });
753 754 755 756
  m.def("empty_var_name",
        []() { return std::string(framework::kEmptyVarName); });
  m.def("grad_var_suffix",
        []() { return std::string(framework::kGradVarSuffix); });
757 758 759
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yi Wang 已提交
760 761
      .def("empty", []() { return kEmptyVarName; })
      .def("temp", []() { return kTempVarName; });
Q
qijun 已提交
762
  // clang-format off
Y
Yu Yang 已提交
763
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
764 765
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
766
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
767 768 769
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
D
dzhwinter 已提交
770
                  [](paddle::platform::CUDAPlace& place)
Q
qijun 已提交
771
                      -> paddle::platform::DeviceContext* {
772
#ifndef PADDLE_WITH_CUDA
D
dzhwinter 已提交
773
                    PADDLE_THROW("CUDAPlace is not supported in CPU device.");
Q
qijun 已提交
774
#else
Q
qijun 已提交
775
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
776
#endif
C
chengduoZH 已提交
777 778 779 780 781 782 783 784 785 786 787
                  })
          .def_static("create",
                [](paddle::platform::CUDAPinnedPlace& place)
                        -> paddle::platform::DeviceContext* {
#ifndef PADDLE_WITH_CUDA
                  PADDLE_THROW(
                        "CUDAPinnedPlace is not supported in CPU device.");
#else
                  return new paddle::platform::CUDAPinnedDeviceContext(place);
#endif
                });;
D
Dong Zhihong 已提交
788
// clang-format on
P
peizhilin 已提交
789
#if (defined(PADDLE_WITH_CUDA) && !defined(_WIN32))
D
Dong Zhihong 已提交
790 791
  py::class_<platform::Communicator>(m, "Communicator").def(py::init<>());
#endif
D
dzhwinter 已提交
792
  py::class_<platform::CUDAPlace>(m, "CUDAPlace")
S
sneaxiy 已提交
793 794 795 796 797 798 799 800 801 802 803 804
      .def("__init__",
           [](platform::CUDAPlace &self, int dev_id) {
#ifdef PADDLE_WITH_CUDA
             PADDLE_ENFORCE(
                 dev_id >= 0 && dev_id < platform::GetCUDADeviceCount(),
                 "Invalid CUDAPlace(%d), must inside [0, %d)", dev_id,
                 platform::GetCUDADeviceCount());
             new (&self) platform::CUDAPlace(dev_id);
#else
             PADDLE_THROW("Cannot use CUDAPlace in CPU only version");
#endif
           })
S
sneaxiy 已提交
805
      .def("_type", &PlaceIndex<platform::CUDAPlace>)
S
sneaxiy 已提交
806 807 808 809 810
      .def("_equals", &IsSamePlace<platform::CUDAPlace, platform::Place>)
      .def("_equals", &IsSamePlace<platform::CUDAPlace, platform::CUDAPlace>)
      .def("_equals", &IsSamePlace<platform::CUDAPlace, platform::CPUPlace>)
      .def("_equals",
           &IsSamePlace<platform::CUDAPlace, platform::CUDAPinnedPlace>)
D
dzhwinter 已提交
811
      .def("__str__", string::to_string<const platform::CUDAPlace &>);
Q
qijun 已提交
812

813 814
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace")
      .def(py::init<>())
S
sneaxiy 已提交
815
      .def("_type", &PlaceIndex<platform::CPUPlace>)
S
sneaxiy 已提交
816 817 818 819 820
      .def("_equals", &IsSamePlace<platform::CPUPlace, platform::Place>)
      .def("_equals", &IsSamePlace<platform::CPUPlace, platform::CUDAPlace>)
      .def("_equals", &IsSamePlace<platform::CPUPlace, platform::CPUPlace>)
      .def("_equals",
           &IsSamePlace<platform::CPUPlace, platform::CUDAPinnedPlace>)
821
      .def("__str__", string::to_string<const platform::CPUPlace &>);
Y
Yu Yang 已提交
822

C
chengduoZH 已提交
823
  py::class_<paddle::platform::CUDAPinnedPlace>(m, "CUDAPinnedPlace")
S
sneaxiy 已提交
824
      .def("__init__",
S
sneaxiy 已提交
825
           [](platform::CUDAPinnedPlace &self) {
S
sneaxiy 已提交
826 827 828
#ifndef PADDLE_WITH_CUDA
             PADDLE_THROW("Cannot use CUDAPinnedPlace in CPU only version");
#endif
S
sneaxiy 已提交
829
             new (&self) platform::CUDAPinnedPlace();
S
sneaxiy 已提交
830
           })
S
sneaxiy 已提交
831
      .def("_type", &PlaceIndex<platform::CUDAPinnedPlace>)
S
sneaxiy 已提交
832 833 834 835 836 837 838
      .def("_equals", &IsSamePlace<platform::CUDAPinnedPlace, platform::Place>)
      .def("_equals",
           &IsSamePlace<platform::CUDAPinnedPlace, platform::CUDAPlace>)
      .def("_equals",
           &IsSamePlace<platform::CUDAPinnedPlace, platform::CPUPlace>)
      .def("_equals",
           &IsSamePlace<platform::CUDAPinnedPlace, platform::CUDAPinnedPlace>)
C
chengduoZH 已提交
839 840
      .def("__str__", string::to_string<const platform::CUDAPinnedPlace &>);

Y
Yu Yang 已提交
841 842
  py::class_<platform::Place>(m, "Place")
      .def(py::init<>())
S
sneaxiy 已提交
843
      .def("_type", &PlaceIndex<platform::Place>)
S
sneaxiy 已提交
844 845 846 847
      .def("_equals", &IsSamePlace<platform::Place, platform::Place>)
      .def("_equals", &IsSamePlace<platform::Place, platform::CUDAPlace>)
      .def("_equals", &IsSamePlace<platform::Place, platform::CPUPlace>)
      .def("_equals", &IsSamePlace<platform::Place, platform::CUDAPinnedPlace>)
X
xuezhong 已提交
848 849
      .def("is_gpu_place",
           [](platform::Place &self) { return platform::is_gpu_place(self); })
S
sneaxiy 已提交
850 851 852 853 854 855
      .def("is_cpu_place",
           [](platform::Place &self) { return platform::is_cpu_place(self); })
      .def("is_cuda_pinned_place",
           [](platform::Place &self) {
             return platform::is_cuda_pinned_place(self);
           })
X
xuezhong 已提交
856 857 858 859
      .def("gpu_device_id",
           [](platform::Place &self) {
             return boost::get<platform::CUDAPlace>(self).device;
           })
S
sneaxiy 已提交
860 861
      .def("set_place", [](platform::Place &self,
                           const platform::Place &other) { self = other; })
Y
Yu Yang 已提交
862 863 864 865 866
      .def("set_place",
           [](platform::Place &self, const platform::CPUPlace &cpu_place) {
             self = cpu_place;
           })
      .def("set_place",
D
dzhwinter 已提交
867
           [](platform::Place &self, const platform::CUDAPlace &gpu_place) {
Y
Yu Yang 已提交
868
             self = gpu_place;
C
chengduoZH 已提交
869 870
           })
      .def("set_place", [](platform::Place &self,
C
chengduoZH 已提交
871 872
                           const platform::CUDAPinnedPlace &cuda_pinned_place) {
        self = cuda_pinned_place;
C
chengduoZH 已提交
873
      });
Y
Yu Yang 已提交
874

Y
Yu Yang 已提交
875 876 877
  py::class_<OperatorBase>(m, "Operator")
      .def_static("create",
                  [](py::bytes protobin) {
878
                    proto::OpDesc desc;
Y
Yu Yang 已提交
879 880 881 882 883
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
884
                    return OpRegistry::CreateOp(desc);
Y
Yu Yang 已提交
885
                  })
886
      .def("run",
887
           [](OperatorBase &self, const Scope &scope,
D
dzhwinter 已提交
888 889 890
              const platform::CPUPlace &place) { self.Run(scope, place); })
      .def("run",
           [](OperatorBase &self, const Scope &scope,
D
dzhwinter 已提交
891
              const platform::CUDAPlace &place) { self.Run(scope, place); })
C
chengduoZH 已提交
892 893 894 895 896
      .def("run",
           [](OperatorBase &self, const Scope &scope,
              const platform::CUDAPinnedPlace &place) {
             self.Run(scope, place);
           })
Y
Yu Yang 已提交
897 898 899 900 901 902 903
      .def("type",
           [](const OperatorBase &op) -> std::string { return op.Type(); })
      .def("outputs",
           [](const OperatorBase &op)
               -> std::map<std::string, std::vector<std::string>> {
                 return op.Outputs();
               })
Q
qijun 已提交
904 905
      .def("output_vars",
           [](const OperatorBase &op) { return op.OutputVars(true); })
Y
Yu Yang 已提交
906
      .def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
Q
qijun 已提交
907
      .def("input_vars", [](const OperatorBase &op) { return op.InputVars(); })
Y
Yu Yang 已提交
908 909 910 911
      .def("__str__", &OperatorBase::DebugString)
      .def("no_intermediate_outputs",
           [](const OperatorBase &op) { return op.OutputVars(false); })
      .def("support_gpu", &OperatorBase::SupportGPU);
Y
Yu Yang 已提交
912

F
fengjiayi 已提交
913
  py::class_<framework::Executor>(m, "Executor")
D
dzhwinter 已提交
914
      .def(py::init<const platform::Place &>())
Y
Yancey1989 已提交
915
      .def("close", &Executor::Close)
S
sneaxiy 已提交
916
      .def("run", [](Executor &self, const ProgramDesc &prog, Scope *scope,
S
sneaxiy 已提交
917 918
                     int block_id, bool create_local_scope, bool create_vars,
                     const std::vector<std::string> &fetch_vars) {
S
sneaxiy 已提交
919
        pybind11::gil_scoped_release release;
S
sneaxiy 已提交
920 921
        self.Run(prog, scope, block_id, create_local_scope, create_vars,
                 fetch_vars);
S
sneaxiy 已提交
922
      });
S
sneaxiy 已提交
923

D
dzhwinter 已提交
924
  m.def("init_gflags", framework::InitGflags);
Y
Yang Yu 已提交
925
  m.def("init_glog", framework::InitGLOG);
X
Xin Pan 已提交
926 927
  m.def("init_devices",
        [](bool init_p2p) { framework::InitDevices(init_p2p); });
928

929
  m.def("is_compiled_with_ngraph", IsCompiledWithNGRAPH);
930
  m.def("is_compiled_with_cuda", IsCompiledWithCUDA);
931
  m.def("is_compiled_with_mkldnn", IsCompiledWithMKLDNN);
932
  m.def("is_compiled_with_brpc", IsCompiledWithBrpc);
Y
update  
Yancey1989 已提交
933
  m.def("is_compiled_with_dist", IsCompiledWithDIST);
934 935 936 937 938 939
#ifdef PADDLE_WITH_CUDA
  m.def("is_float16_supported", [](const platform::CUDAPlace &place) -> bool {
    // Only GPUs with Compute Capability >= 53 support float16
    return platform::GetCUDAComputeCapability(place.device) >= 53;
  });
#endif
940

941
  m.def("set_feed_variable", framework::SetFeedVariable);
Q
qijun 已提交
942
  m.def("get_fetch_variable", framework::GetFetchVariable);
943
  m.def("get_variable_tensor", framework::GetVariableTensor);
Q
qijun 已提交
944

X
Xin Pan 已提交
945 946
  m.def("_is_program_version_supported", IsProgramVersionSupported);

947 948 949 950 951
  BindProgramDesc(&m);
  BindBlockDesc(&m);
  BindVarDsec(&m);
  BindOpDesc(&m);
  BindConstValue(&m);
Y
Yu Yang 已提交
952

Y
Yu Yang 已提交
953 954 955 956 957 958 959 960 961
  py::class_<framework::LoDRankTable>(m, "LodRankTable")
      .def("items", [](framework::LoDRankTable &table) {
        std::vector<std::pair<size_t, size_t>> res;
        for (auto &item : table.items()) {
          res.push_back({item.index, item.length});
        }
        return res;
      });

Y
Yu Yang 已提交
962
  py::class_<LoDTensorArray>(m, "LoDTensorArray")
S
sneaxiy 已提交
963 964
      .def("__init__",
           [](LoDTensorArray &instance) { new (&instance) LoDTensorArray(); })
Y
Yu Yang 已提交
965 966 967 968 969 970 971 972 973 974
      .def("__getitem__",
           [](LoDTensorArray &self, size_t i) { return &self.at(i); },
           py::return_value_policy::reference)
      .def("__len__", [](LoDTensorArray &self) { return self.size(); })
      .def("__setitem__",
           [](LoDTensorArray &self, size_t i, const LoDTensor &t) {
             PADDLE_ENFORCE_LT(i, self.size());
             self[i].ShareDataWith(t);
             self[i].set_lod(t.lod());
           })
S
sneaxiy 已提交
975 976 977 978 979 980 981
      .def("append",
           [](LoDTensorArray &self, const LoDTensor &t) {
             self.emplace_back();
             self.back().ShareDataWith(t);
             self.back().set_lod(t.lod());
           },
           py::arg("tensor"), "Append a LoDensor to LoDTensorArray.");
Y
Yu Yang 已提交
982

D
dzhwinter 已提交
983 984 985
  m.def("IsInplace",
        [](std::string op) -> bool { return operators::IsInplace(op); });

Y
Yu Yang 已提交
986
  m.def("op_support_gpu", OpSupportGPU);
D
Dong Zhihong 已提交
987
#ifdef PADDLE_WITH_CUDA
D
Dong Zhihong 已提交
988
  m.def("get_cuda_device_count", platform::GetCUDADeviceCount);
D
dangqingqing 已提交
989

P
peizhilin 已提交
990
#ifndef _WIN32
D
dangqingqing 已提交
991 992 993
  m.def("nvprof_init", platform::CudaProfilerInit);
  m.def("nvprof_start", platform::CudaProfilerStart);
  m.def("nvprof_stop", platform::CudaProfilerStop);
D
Dong Zhihong 已提交
994
#endif
P
peizhilin 已提交
995
#endif
Y
Yu Yang 已提交
996

997 998 999 1000
  py::enum_<platform::ProfilerState>(m, "ProfilerState", py::arithmetic())
      .value("kDisabled", platform::ProfilerState::kDisabled)
      .value("kCPU", platform::ProfilerState::kCPU)
      .value("kCUDA", platform::ProfilerState::kCUDA)
1001
      .value("kAll", platform::ProfilerState::kAll)
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
      .export_values();

  py::enum_<platform::EventSortingKey>(m, "EventSortingKey", py::arithmetic())
      .value("kDefault", platform::EventSortingKey::kDefault)
      .value("kCalls", platform::EventSortingKey::kCalls)
      .value("kTotal", platform::EventSortingKey::kTotal)
      .value("kMin", platform::EventSortingKey::kMin)
      .value("kMax", platform::EventSortingKey::kMax)
      .value("kAve", platform::EventSortingKey::kAve)
      .export_values();

  m.def("enable_profiler", platform::EnableProfiler);
  m.def("disable_profiler", platform::DisableProfiler);
X
Xin Pan 已提交
1015
  m.def("is_profiler_enabled", platform::IsProfileEnabled);
1016
  m.def("reset_profiler", platform::ResetProfiler);
1017
  m.def("get_pass", [](const std::string &pass_type) {
W
WangZhen 已提交
1018 1019 1020
    auto pass = framework::ir::PassRegistry::Instance().Get(pass_type);
    return std::shared_ptr<framework::ir::Pass>(std::move(pass));
  });
Y
Yu Yang 已提交
1021

1022 1023
  py::class_<ir::Pass, std::shared_ptr<ir::Pass>> pass(m, "Pass");
  pass.def(py::init())
W
WangZhen 已提交
1024
      .def("has", &ir::Pass::Has)
1025 1026 1027
      .def("set_not_owned",
           [](ir::Pass &self, const std::string &attr_name, ProgramDesc &attr) {
             self.SetNotOwned<ProgramDesc>(attr_name, &attr);
W
WangZhen 已提交
1028
           })
1029
      .def(
1030
          "set",
1031 1032 1033
          [](ir::Pass &self, const std::string &name, const std::string &attr) {
            self.Set<std::string>(name, new std::string(attr));
          })
1034 1035
      .def("set", [](ir::Pass &self, const std::string &name,
                     int val) { self.Set<const int>(name, new int(val)); })
F
flame 已提交
1036 1037 1038 1039
      .def("type", &ir::Pass::Type)
      .def("apply", [](ir::Pass &self, std::shared_ptr<ir::Graph> graph) {
        std::unique_ptr<ir::Graph> origin_graph(graph.get());
        auto optim_graph = self.Apply(std::move(origin_graph));
W
WangZhen 已提交
1040
        optim_graph.release();
F
flame 已提交
1041
      });
1042

X
fix  
Xin Pan 已提交
1043 1044
  py::class_<ir::PassBuilder, std::shared_ptr<ir::PassBuilder>> pb(
      m, "PassBuilder");
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
  pb.def(py::init())
      .def("append_pass",
           [](ir::PassBuilder &self,
              const std::string &pass_type) -> std::shared_ptr<ir::Pass> {
             return self.AppendPass(pass_type);
           })
      .def("all_passes", [](ir::PassBuilder &self) { return self.AllPasses(); })
      .def("insert_pass",
           [](ir::PassBuilder &self, size_t idx, const std::string &pass_type) {
             return self.InsertPass(idx, pass_type);
           })
      .def("remove_pass",
           [](ir::PassBuilder &self, size_t idx) { self.RemovePass(idx); });

Y
yuyang18 已提交
1059
  // -- python binds for parallel executor.
X
Xin Pan 已提交
1060

Y
yuyang18 已提交
1061
  py::class_<ParallelExecutor> pe(m, "ParallelExecutor");
C
chengduo 已提交
1062 1063 1064 1065
  py::class_<ExecutionStrategy> exec_strategy(pe, "ExecutionStrategy", R"DOC(
    ExecutionStrategy allows the user to more preciously control how to run
    the program in ParallelExecutor by setting the property.

C
chengduo 已提交
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
    Examples:
        .. code-block:: python

          exec_strategy = fluid.ExecutionStrategy()
          exec_strategy.num_threads = 4

          train_exe = fluid.ParallelExecutor(use_cuda=True,
                                             loss_name=loss.name,
                                             exec_strategy=exec_strategy)

          train_loss, = train_exe.run([loss.name], feed=feed_dict)
C
chengduo 已提交
1077 1078 1079

        )DOC");

Y
yuyang18 已提交
1080
  exec_strategy.def(py::init())
Y
yuyang18 已提交
1081 1082 1083 1084 1085
      .def_property(
          "num_threads",
          [](const ExecutionStrategy &self) { return self.num_threads_; },
          [](ExecutionStrategy &self, size_t num_threads) {
            self.num_threads_ = num_threads;
C
chengduo 已提交
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
          },
          R"DOC(The type is INT, num_threads represents the size of thread pool that
            used to run the operators of the current program in ParallelExecutor.
            If :math:`num\_threads=1`, all the operators will execute one by one,
            but the order maybe difference between iterations.
            If it is not set, it will be set in ParallelExecutor according to the
            device type and device count, for GPU, :math:`num\_threads=device\_count*4`, for CPU,
            :math:`num\_threads=CPU\_NUM*4`, the explanation of:math:`CPU\_NUM` is in ParallelExecutor.
            if it is not set, ParallelExecutor will get the cpu count by calling
            `multiprocessing.cpu_count()`. Default 0.)DOC")
Y
yuyang18 已提交
1096
      .def_property(
1097 1098 1099 1100
          "use_cuda",
          [](const ExecutionStrategy &self) { return self.use_cuda_; },
          [](ExecutionStrategy &self, bool use_cuda) {
            self.use_cuda_ = use_cuda;
C
chengduo 已提交
1101 1102 1103 1104
          })  // FIXME(chengduo): Doesn't add doc for 'use_cuda', use_cuda may
      // make user confuse, because ParallelExecutor has a parameter named
      // 'use_cuda' too, in current implementation, ParallelExecutor's
      // 'use_cuda' will rewrite ExecutionStrategy's 'use_cuda'.
Y
yuyang18 已提交
1105 1106 1107 1108 1109
      .def_property(
          "allow_op_delay",
          [](const ExecutionStrategy &self) { return self.allow_op_delay_; },
          [](ExecutionStrategy &self, bool allow_op_delay) {
            self.allow_op_delay_ = allow_op_delay;
C
chengduo 已提交
1110 1111 1112 1113
          },
          R"DOC(The type is BOOL, allow_op_delay represents whether to delay the
                communication operators to run, it may make the execution faster.
                Note that in some models, allow_op_delay may cause program hang. Default False.)DOC")
Y
yuyang18 已提交
1114 1115 1116 1117 1118 1119 1120
      .def_property(
          "num_iteration_per_drop_scope",
          [](const ExecutionStrategy &self) {
            return self.num_iteration_per_drop_scope_;
          },
          [](ExecutionStrategy &self, size_t num_iteration_per_drop_scope) {
            self.num_iteration_per_drop_scope_ = num_iteration_per_drop_scope;
C
chengduo 已提交
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
          },
          R"DOC(The type is INT, num_iteration_per_drop_scope indicates how
                many iterations to clean up the temp variables which
                is generated during execution. It may make the execution faster,
                because the temp variable's shape maybe the same between two iterations. Default 100.

                NOTES:
                    1. If you fetch data when calling the 'run', the ParallelExecutor
                       will clean up the temp variables at the end of the current iteration.
                    2. In some NLP model, it may cause the GPU memory is insufficient,
                       in this case, you should reduce `num_iteration_per_drop_scope`.
1132 1133 1134 1135 1136 1137
              )DOC")
      .def_property("_dry_run",
                    [](const ExecutionStrategy &self) { return self.dry_run_; },
                    [](ExecutionStrategy &self, bool dry_run) {
                      self.dry_run_ = dry_run;
                    });
C
chengduo 已提交
1138

Y
yuyang18 已提交
1139
  exec_strategy.def_property(
Y
yuyang18 已提交
1140 1141 1142 1143 1144 1145 1146
      "use_experimental_executor",
      [](const ExecutionStrategy &self) {
        return self.type_ == ExecutionStrategy::kExperimental;
      },
      [](ExecutionStrategy &self, bool experimental) {
        self.type_ = experimental ? ExecutionStrategy::kExperimental
                                  : ExecutionStrategy::kDefault;
Y
yuyang18 已提交
1147 1148
      });

C
chengduo 已提交
1149 1150 1151 1152
  py::class_<BuildStrategy> build_strategy(pe, "BuildStrategy", R"DOC(
    BuildStrategy allows the user to more preciously control how to
    build the SSA Graph in ParallelExecutor by setting the property.

C
chengduo 已提交
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
    Examples:
        .. code-block:: python

          build_strategy = fluid.BuildStrategy()
          build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce

          train_exe = fluid.ParallelExecutor(use_cuda=True,
                                             loss_name=loss.name,
                                             build_strategy=build_strategy)

          train_loss, = train_exe.run([loss.name], feed=feed_dict)
C
chengduo 已提交
1164
)DOC");
Y
yuyang18 已提交
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180

  py::enum_<BuildStrategy::ReduceStrategy>(build_strategy, "ReduceStrategy")
      .value("Reduce", BuildStrategy::ReduceStrategy::kReduce)
      .value("AllReduce", BuildStrategy::ReduceStrategy::kAllReduce);
  py::enum_<BuildStrategy::GradientScaleStrategy>(build_strategy,
                                                  "GradientScaleStrategy")
      .value("CoeffNumDevice",
             BuildStrategy::GradientScaleStrategy::kCoeffNumDevice)
      .value("One", BuildStrategy::GradientScaleStrategy::kOne)
      .value("Customized", BuildStrategy::GradientScaleStrategy::kCustomized);

  build_strategy.def(py::init())
      .def_property(
          "reduce_strategy",
          [](const BuildStrategy &self) { return self.reduce_; },
          [](BuildStrategy &self, BuildStrategy::ReduceStrategy strategy) {
X
Xin Pan 已提交
1181
            PADDLE_ENFORCE(!self.IsFinalized(), "BuildStrategy is finlaized.");
Y
yuyang18 已提交
1182
            self.reduce_ = strategy;
C
chengduo 已提交
1183 1184 1185 1186 1187 1188 1189
          },
          R"DOC(The type is STR, there are two reduce strategies in ParallelExecutor,
                  'AllReduce' and 'Reduce'. If you want that all the parameters'
                  optimization are done on all devices independently, you should choose 'AllReduce';
                  if you choose 'Reduce', all the parameters' optimization will be evenly distributed
                  to different devices, and then broadcast the optimized parameter to other devices.
                  In some models, `Reduce` is faster. Default 'AllReduce'. )DOC")
Y
yuyang18 已提交
1190 1191 1192 1193 1194
      .def_property(
          "gradient_scale_strategy",
          [](const BuildStrategy &self) { return self.gradient_scale_; },
          [](BuildStrategy &self,
             BuildStrategy::GradientScaleStrategy strategy) {
X
Xin Pan 已提交
1195
            PADDLE_ENFORCE(!self.IsFinalized(), "BuildStrategy is finlaized.");
Y
yuyang18 已提交
1196
            self.gradient_scale_ = strategy;
C
chengduo 已提交
1197 1198 1199 1200 1201 1202
          },
          R"DOC(The type is STR, there are three ways of defining :math:`loss@grad` in
                   ParallelExecutor, 'CoeffNumDevice', 'One' and 'Customized'. By default,
                   ParallelExecutor sets the :math:`loss@grad` according to the number of devices.
                   If you want to customize :math:`loss@grad`, you can choose 'Customized'.
                   Default 'CoeffNumDevice'.)DOC")
Y
yuyang18 已提交
1203 1204 1205 1206
      .def_property(
          "debug_graphviz_path",
          [](const BuildStrategy &self) { return self.debug_graphviz_path_; },
          [](BuildStrategy &self, const std::string &path) {
X
Xin Pan 已提交
1207
            PADDLE_ENFORCE(!self.IsFinalized(), "BuildStrategy is finlaized.");
Y
yuyang18 已提交
1208
            self.debug_graphviz_path_ = path;
C
chengduo 已提交
1209 1210 1211 1212
          },
          R"DOC(The type is STR, debug_graphviz_path indicate the path that
                    writing the SSA Graph to file in the form of graphviz, you.
                    It is useful for debugging. Default "")DOC")
S
sneaxiy 已提交
1213 1214 1215 1216 1217 1218
      .def_property(
          "enable_sequential_execution",
          [](const BuildStrategy &self) {
            return self.enable_sequential_execution_;
          },
          [](BuildStrategy &self, bool b) {
X
Xin Pan 已提交
1219
            PADDLE_ENFORCE(!self.IsFinalized(), "BuildStrategy is finlaized.");
S
sneaxiy 已提交
1220 1221 1222 1223 1224 1225 1226 1227 1228
            self.enable_sequential_execution_ = b;
          },
          R"DOC(The type is BOOL. If set True, the execution order of ops would be the same as what is in the program. Default False.)DOC")
      .def_property(
          "remove_unnecessary_lock",
          [](const BuildStrategy &self) {
            return self.remove_unnecessary_lock_;
          },
          [](BuildStrategy &self, bool b) {
X
Xin Pan 已提交
1229
            PADDLE_ENFORCE(!self.IsFinalized(), "BuildStrategy is finlaized.");
S
sneaxiy 已提交
1230 1231
            self.remove_unnecessary_lock_ = b;
          },
S
sneaxiy 已提交
1232
          R"DOC(The type is BOOL. If set True, some locks in GPU ops would be released and ParallelExecutor would run faster. Default True.)DOC")
1233 1234 1235 1236 1237 1238
      .def_property(
          "num_trainers",
          [](const BuildStrategy &self) { return self.num_trainers_; },
          [](BuildStrategy &self, int num_trainers) {
            self.num_trainers_ = num_trainers;
          })
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
      .def_property(
          "trainers_endpoints",
          [](const BuildStrategy &self) { return self.trainers_endpoints_; },
          [](BuildStrategy &self,
             const std::vector<std::string> &trainers_endpoints) {
            self.trainers_endpoints_ = trainers_endpoints;
          })
      .def_property("trainer_id",
                    [](const BuildStrategy &self) { return self.trainer_id_; },
                    [](BuildStrategy &self, int trainer_id) {
                      self.trainer_id_ = trainer_id;
                    })
C
chengduo 已提交
1251 1252 1253 1254 1255 1256
      .def_property(
          "fuse_elewise_add_act_ops",
          [](const BuildStrategy &self) {
            return self.fuse_elewise_add_act_ops_;
          },
          [](BuildStrategy &self, bool b) {
X
Xin Pan 已提交
1257
            PADDLE_ENFORCE(!self.IsFinalized(), "BuildStrategy is finlaized.");
C
chengduo 已提交
1258 1259 1260 1261 1262
            self.fuse_elewise_add_act_ops_ = b;
          },
          R"DOC(The type is BOOL, fuse_elewise_add_act_ops indicate whether
                     to fuse elementwise_add_op and activation_op,
                     it may make the execution faster. Default False)DOC")
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
      .def_property(
          "fuse_relu_depthwise_conv",
          [](const BuildStrategy &self) {
            return self.fuse_relu_depthwise_conv_;
          },
          [](BuildStrategy &self, bool b) {
            PADDLE_ENFORCE(!self.IsFinalized(), "BuildStrategy is finlaized.");
            self.fuse_relu_depthwise_conv_ = b;
          },
          R"DOC(The type is BOOL, fuse_relu_depthwise_conv indicate whether
                      to fuse relu and depthwise_conv2d,
                      it will save GPU memory and may make the execution faster.
                      This options is only available in GPU devices.
                      Default False)DOC")
Q
qingqing01 已提交
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
      .def_property(
          "sync_batch_norm",
          [](const BuildStrategy &self) { return self.sync_batch_norm_; },
          [](BuildStrategy &self, bool b) {
            PADDLE_ENFORCE(!self.IsFinalized(), "BuildStrategy is finlaized.");
            self.sync_batch_norm_ = b;
          },
          R"DOC(The type is BOOL, sync_batch_norm indicates whether to use
                synchronous batch normalization which synchronizes the mean
                and variance through multi-devices in training phase.

                Current implementation doesn't support FP16 training and CPU.
                And only synchronous on one machine, not all machines.

                Default False)DOC")
D
dzhwinter 已提交
1292 1293 1294 1295
      .def_property(
          "memory_optimize",
          [](const BuildStrategy &self) { return self.memory_optimize_; },
          [](BuildStrategy &self, bool b) { self.memory_optimize_ = b; })
1296 1297 1298 1299
      .def_property(
          "is_distribution",
          [](const BuildStrategy &self) { return self.is_distribution_; },
          [](BuildStrategy &self, bool b) { self.is_distribution_ = b; })
D
dzhwinter 已提交
1300
      .def_property(
D
dzhwinter 已提交
1301 1302 1303
          "enable_inplace",
          [](const BuildStrategy &self) { return self.enable_inplace_; },
          [](BuildStrategy &self, bool b) { self.enable_inplace_ = b; })
C
chengduo 已提交
1304 1305 1306 1307
      .def_property(
          "fuse_all_reduce_ops",
          [](const BuildStrategy &self) { return self.fuse_all_reduce_ops_; },
          [](BuildStrategy &self, bool b) { self.fuse_all_reduce_ops_ = b; })
1308
      .def("_finalize_strategy_and_create_passes",
X
fix  
Xin Pan 已提交
1309
           [](BuildStrategy &self) -> std::shared_ptr<ir::PassBuilder> {
1310 1311 1312 1313 1314
             return self.CreatePassesFromStrategy(true);
           },
           R"DOC(Allow user to customized passes. Normally model-specific
                optimization passes should be defined in this way. BuildStrategy
                cannot be updated after being finalized.)DOC");
Y
yuyang18 已提交
1315 1316

  pe.def(py::init<const std::vector<platform::Place> &,
Y
Yan Xu 已提交
1317
                  const std::vector<std::string> &, const std::string &,
X
Xin Pan 已提交
1318
                  Scope *, std::vector<Scope *> &, const ExecutionStrategy &,
X
Xin Pan 已提交
1319
                  const BuildStrategy &, ir::Graph *>())
Y
Yu Yang 已提交
1320 1321 1322 1323
      // NOTE: even we return a vec<Scope*>* to Python use reference policy.
      // We still cannot get local_scope from this vector, since the element
      // of vec<Scope*> will be freed by Python GC. We can only return Scope*
      // one by one and mark them as reference.
1324 1325 1326 1327 1328
      .def("local_scopes",
           [](ParallelExecutor &self) -> std::vector<Scope *> * {
             return &self.GetLocalScopes();
           },
           py::return_value_policy::reference)
Y
Yu Yang 已提交
1329 1330 1331 1332
      .def("feed_tensors_into_local_scopes",
           &ParallelExecutor::FeedTensorsIntoLocalScopes)
      .def("feed_and_split_tensor_into_local_scopes",
           &ParallelExecutor::FeedAndSplitTensorIntoLocalScopes)
S
sneaxiy 已提交
1333 1334 1335 1336 1337 1338
      .def("run", [](ParallelExecutor &self,
                     const std::vector<std::string> &fetch_tensors,
                     const std::string &fetched_var_name) {
        pybind11::gil_scoped_release release;
        self.Run(fetch_tensors, fetched_var_name);
      });
Y
Yu Yang 已提交
1339

1340
  BindRecordIOWriter(&m);
W
Wang Guibao 已提交
1341
  BindAsyncExecutor(&m);
F
flame 已提交
1342 1343
  BindGraph(&m);
  BindNode(&m);
F
flame 已提交
1344
  BindInferenceApi(&m);
L
Luo Tao 已提交
1345
}
1346
}  // namespace pybind
1347
}  // namespace paddle