pybind.cc 26.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
L
lgone2000 已提交
14
#include <Python.h>
C
chengduoZH 已提交
15 16 17 18 19 20 21
#include <algorithm>
#include <map>
#include <mutex>  // NOLINT // for call_once
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
22

23
#include "paddle/fluid/framework/channel.h"
Y
Yi Wang 已提交
24 25 26 27 28 29 30
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/init.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
31
#include "paddle/fluid/framework/op_registry.h"
Y
Yu Yang 已提交
32
#include "paddle/fluid/framework/parallel_executor.h"
Y
Yi Wang 已提交
33
#include "paddle/fluid/framework/prune.h"
Y
Refine  
Yu Yang 已提交
34
#include "paddle/fluid/framework/reader.h"
Y
Yi Wang 已提交
35
#include "paddle/fluid/framework/selected_rows.h"
D
dzhwinter 已提交
36
#include "paddle/fluid/operators/activation_op.h"
Y
Yi Wang 已提交
37 38 39 40 41
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/pybind/const_value.h"
#include "paddle/fluid/pybind/exception.h"
42 43
#include "paddle/fluid/pybind/protobuf.h"
#include "paddle/fluid/pybind/pybind.h"  // NOLINT
Y
Yu Yang 已提交
44
#include "paddle/fluid/pybind/recordio.h"
Y
Yi Wang 已提交
45
#include "paddle/fluid/pybind/tensor_py.h"
Y
Yu Yang 已提交
46

47
#include "paddle/fluid/string/to_string.h"
48

D
Dong Zhihong 已提交
49
#ifdef PADDLE_WITH_CUDA
Y
Yi Wang 已提交
50 51 52
#include "paddle/fluid/operators/nccl/nccl_gpu_common.h"
#include "paddle/fluid/platform/cuda_profiler.h"
#include "paddle/fluid/platform/gpu_info.h"
D
Dong Zhihong 已提交
53 54
#endif

Q
Qiao Longfei 已提交
55 56 57
// disable auto conversion to list in Python
PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray);

58
namespace paddle {
59
namespace pybind {
60
bool IsCompiledWithCUDA() {
61
#ifndef PADDLE_WITH_CUDA
Q
qijun 已提交
62 63 64 65 66 67
  return false;
#else
  return true;
#endif
}

68 69
PYBIND11_PLUGIN(core) {
  py::module m("core", "C++ core of PaddlePaddle");
70

71 72 73 74
  // using framework in this function. Since it is inside a function, it will
  // not cause namespace pollution.
  using namespace paddle::framework;  // NOLINT

75
  BindException(&m);
Y
Yu Yang 已提交
76

77 78 79
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
Yu Yang 已提交
80
      .def("get_dims",
81
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
82
      .def("set_dims",
Q
qijun 已提交
83
           [](Tensor &self, const std::vector<int64_t> &dim) {
Y
Yu Yang 已提交
84
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
85
           })
D
dzhwinter 已提交
86 87 88 89
      .def("set_layout",
           [](Tensor &self, const std::string &layout) {
             self.set_layout(StringToDataLayout(layout));
           })
Y
Yu Yang 已提交
90
      .def("alloc_float",
D
dzhwinter 已提交
91
           [](Tensor &self, paddle::platform::CUDAPlace &place) {
Q
qijun 已提交
92
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
93
           })
Q
qijun 已提交
94
      .def("alloc_float",
Y
Yu Yang 已提交
95
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
96
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
97 98
           })
      .def("alloc_int",
Y
Yu Yang 已提交
99
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
100
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
101
           })
Q
qijun 已提交
102
      .def("alloc_int",
D
dzhwinter 已提交
103
           [](Tensor &self, paddle::platform::CUDAPlace &place) {
Q
qijun 已提交
104
             self.mutable_data<int>(place);
Q
qijun 已提交
105
           })
C
chengduoZH 已提交
106 107 108 109 110 111 112 113
      .def("alloc_int",
           [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) {
             self.mutable_data<int>(place);
           })
      .def("alloc_float",
           [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) {
             self.mutable_data<float>(place);
           })
Y
Yu Yang 已提交
114 115
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
116
      .def("set", PyCPUTensorSetFromArray<double>)
117
      .def("set", PyCPUTensorSetFromArray<int64_t>)
Y
Yu Yang 已提交
118
      .def("set", PyCPUTensorSetFromArray<bool>)
119
      .def("set", PyCPUTensorSetFromArray<uint16_t>)
F
fengjiayi 已提交
120
      .def("set", PyCPUTensorSetFromArray<uint8_t>)
121
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
122 123
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
124
      .def("set", PyCUDATensorSetFromArray<double>)
125
      .def("set", PyCUDATensorSetFromArray<int64_t>)
Y
Yu Yang 已提交
126
      .def("set", PyCUDATensorSetFromArray<bool>)
127
      .def("set", PyCUDATensorSetFromArray<uint16_t>)
F
fengjiayi 已提交
128
      .def("set", PyCUDATensorSetFromArray<uint8_t>)
C
chengduoZH 已提交
129 130 131 132 133 134
      .def("set", PyCUDAPinnedTensorSetFromArray<float>)
      .def("set", PyCUDAPinnedTensorSetFromArray<int>)
      .def("set", PyCUDAPinnedTensorSetFromArray<double>)
      .def("set", PyCUDAPinnedTensorSetFromArray<int64_t>)
      .def("set", PyCUDAPinnedTensorSetFromArray<bool>)
      .def("set", PyCUDAPinnedTensorSetFromArray<uint16_t>)
F
fengjiayi 已提交
135
      .def("set", PyCUDAPinnedTensorSetFromArray<uint8_t>)
Q
qijun 已提交
136
#endif
137
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
138 139 140 141 142
      .def("set_float_element", TensorSetElement<float>)
      .def("get_float_element", TensorGetElement<float>)
      .def("set_double_element", TensorSetElement<double>)
      .def("get_double_element", TensorGetElement<double>)
      .def("dtype", [](Tensor &self) { return ToDataType(self.type()); });
Y
Yu Yang 已提交
143

144
  py::class_<LoDTensor, Tensor>(m, "LoDTensor")
145 146
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
147 148 149 150 151 152 153 154 155 156 157 158 159 160
      .def("__init__",
           [](LoDTensor &instance, const std::vector<std::vector<size_t>>
                                       &recursive_sequence_lengths) {
             LoD new_lod;
             new_lod.reserve(recursive_sequence_lengths.size());
             std::copy(recursive_sequence_lengths.begin(),
                       recursive_sequence_lengths.end(),
                       std::back_inserter(new_lod));
             LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
             PADDLE_ENFORCE(
                 CheckLoD(new_offset_lod, -1),
                 "the provided recursive_sequence_lengths info is invalid");
             new (&instance) LoDTensor(new_offset_lod);
           })
Y
Yu Yang 已提交
161
      .def("__init__", [](LoDTensor &instance) { new (&instance) LoDTensor(); })
G
gongweibao 已提交
162 163 164 165 166
      // We implement offset based LOD in C++ while we use length based with
      // Python API. So we changed set_lod to set_recursive_sequence_lengths to
      // avoid misuse.
      // The discussion is here:
      // https://github.com/PaddlePaddle/Paddle/issues/10855
D
dangqingqing 已提交
167
      .def("set_lod",
168
           [](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) {
169
             // the input lod is offset-based level-of-detail info
Y
Yu Yang 已提交
170
             LoD new_lod;
171 172
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
173 174
             PADDLE_ENFORCE(CheckLoD(new_lod, vectorize(self.dims()).front()),
                            "the provided lod info is invalid");
175
             self.set_lod(new_lod);
D
dangqingqing 已提交
176
           })
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
      .def("set_recursive_sequence_lengths",
           [](LoDTensor &self, const std::vector<std::vector<size_t>>
                                   &recursive_sequence_lengths) {
             // the input recursive_sequence_lengths is length-based
             // level-of-detail info
             LoD new_lod;
             new_lod.reserve(recursive_sequence_lengths.size());
             std::copy(recursive_sequence_lengths.begin(),
                       recursive_sequence_lengths.end(),
                       std::back_inserter(new_lod));
             LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
             PADDLE_ENFORCE(
                 CheckLoD(new_offset_lod, vectorize(self.dims()).front()),
                 "the provided recursive_sequence_lengths info is invalid");
             self.set_lod(new_offset_lod);
           })
      .def("lod",
           [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
             // output the offset-based lod info
             LoD lod = self.lod();
             std::vector<std::vector<size_t>> new_lod;
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             return new_lod;
           })
G
gongweibao 已提交
202
      // Set above comments of set_lod.
203 204 205 206 207 208 209 210 211 212 213 214 215
      .def("recursive_sequence_lengths",
           [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
             // output the length-based lod info
             LoD lod = ConvertToLengthBasedLoD(self.lod());
             std::vector<std::vector<size_t>> new_lod;
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             return new_lod;
           })
      .def("has_valid_recursive_sequence_lengths", [](LoDTensor &self) -> bool {
        // Check that the lod info is valid and match the outermost
        // dimension of the LoDTensor data
        return CheckLoD(self.lod(), vectorize(self.dims()).front());
D
dangqingqing 已提交
216 217
      });

Q
qijun 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230
  py::class_<SelectedRows>(m, "SelectedRows")
      .def("__init__",
           [](SelectedRows &instance) { new (&instance) SelectedRows(); })
      .def("__init__",
           [](SelectedRows &instance, const std::vector<int64_t> rows,
              const int64_t &height) {
             new (&instance) SelectedRows(rows, height);
           })
      .def("get_tensor",
           [](SelectedRows &self) { return self.mutable_value(); },
           py::return_value_policy::reference)
      .def("set_height", &SelectedRows::set_height)
      .def("height", &SelectedRows::height)
Q
qijun 已提交
231 232 233 234 235 236 237 238 239
      .def("set_rows",
           [](SelectedRows &self, std::vector<int64_t> rows) {
#ifndef PADDLE_WITH_CUDA
             self.set_rows(rows);
#else
        Vector<int64_t> new_rows(rows);
        self.set_rows(new_rows);
#endif
           })
240 241 242 243 244 245 246 247 248 249 250
      .def("rows", [](SelectedRows &self) {
#ifndef PADDLE_WITH_CUDA
        return self.rows();
#else
         auto rows = self.rows();
         std::vector<int64_t> new_rows;
         new_rows.reserve(rows.size());
         std::copy(rows.begin(), rows.end(), std::back_inserter(new_rows));
         return new_rows;
#endif
      });
Q
qijun 已提交
251

252
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
253 254 255

All parameter, weight, gradient are variables in Paddle.
)DOC")
256
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
257
      .def("set_int",
258 259
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
260 261 262 263 264 265 266
      .def("is_float", [](const Variable &var) { return var.IsType<float>(); })
      .def("set_float",
           [](Variable &var, float val) -> void {
             *var.GetMutable<float>() = val;
           })
      .def("get_float",
           [](const Variable &var) -> float { return var.Get<float>(); })
Y
Yu Yang 已提交
267
      .def("get_tensor",
268 269
           [](Variable &self) -> LoDTensor * {
             return self.GetMutable<LoDTensor>();
D
dangqingqing 已提交
270 271
           },
           py::return_value_policy::reference)
Y
Yu Yang 已提交
272 273 274
      .def("get_lod_rank_table",
           [](Variable &self) { return self.GetMutable<LoDRankTable>(); },
           py::return_value_policy::reference)
Q
qijun 已提交
275 276 277 278 279
      .def("get_selected_rows",
           [](Variable &self) -> SelectedRows * {
             return self.GetMutable<SelectedRows>();
           },
           py::return_value_policy::reference)
Y
Yu Yang 已提交
280 281 282
      .def("get_lod_tensor_array",
           [](Variable &self) { return self.GetMutable<LoDTensorArray>(); },
           py::return_value_policy::reference)
D
Dong Zhihong 已提交
283 284 285 286 287 288 289
#ifdef PADDLE_WITH_CUDA
      .def("get_communicator",
           [](Variable &self) -> platform::Communicator * {
             return self.GetMutable<platform::Communicator>();
           },
           py::return_value_policy::reference)
#endif
Y
Refine  
Yu Yang 已提交
290 291 292 293 294
      .def("get_reader",
           [](Variable &self) -> framework::ReaderHolder * {
             PADDLE_ENFORCE(self.IsType<framework::ReaderHolder>());
             return self.GetMutable<framework::ReaderHolder>();
           },
Y
Yu Yang 已提交
295
           py::return_value_policy::reference);
296

Y
Refine  
Yu Yang 已提交
297 298 299
  py::class_<framework::ReaderHolder>(m, "Reader", "")
      .def("reset", &framework::ReaderHolder::ReInit);

300
  py::class_<Scope>(m, "Scope", "")
D
dongzhihong 已提交
301
      .def("var",
302
           [](Scope &self, const std::string &name) -> Variable * {
D
dongzhihong 已提交
303
             return self.Var(name);
Y
Yu Yang 已提交
304
           },
305
           py::return_value_policy::reference)
306
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
307
      .def(py::init<>())
308
      .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
309
           py::return_value_policy::reference)
Y
Yu Yang 已提交
310
      .def("drop_kids", &Scope::DropKids);
311

Y
Yu Yang 已提交
312 313
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
314 315
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
    std::vector<py::bytes> ret_values;
316 317 318 319 320 321 322 323 324 325
    for (auto &iter : OpInfoMap::Instance().map()) {
      auto &info = iter.second;
      if (info.HasOpProtoAndChecker()) {
        std::string str;
        PADDLE_ENFORCE(
            info.Proto().SerializeToString(&str),
            "Serialize OpProto Error. This could be a bug of Paddle.");
        ret_values.emplace_back(str);
      }
    }
Y
Yu Yang 已提交
326 327
    return ret_values;
  });
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
  m.def(
      "get_grad_op_desc", [](const OpDesc &op_desc,
                             const std::unordered_set<std::string> &no_grad_set,
                             const std::vector<BlockDesc *> &grad_sub_block) {
        std::unordered_map<std::string, std::string> grad_to_var;
        std::vector<std::unique_ptr<OpDesc>> grad_op_descs =
            framework::OpInfoMap::Instance()
                .Get(op_desc.Type())
                .GradOpMaker()(op_desc, no_grad_set, &grad_to_var,
                               grad_sub_block);
        std::vector<OpDesc *> grad_op_desc_ptrs(grad_op_descs.size());
        std::transform(grad_op_descs.begin(), grad_op_descs.end(),
                       grad_op_desc_ptrs.begin(),
                       [](std::unique_ptr<OpDesc> &p) { return p.release(); });
        return std::make_pair(grad_op_desc_ptrs, grad_to_var);
      });
Y
Yu Yang 已提交
344
  m.def("prune", [](const ProgramDesc &origin,
345
                    const std::vector<std::array<size_t, 2>> &targets) {
Y
Yu Yang 已提交
346
    ProgramDesc prog_with_targets(origin);
347
    for (const auto &t : targets) {
348
      prog_with_targets.MutableBlock(t[0])->Op(t[1])->SetIsTarget(true);
349
    }
350
    proto::ProgramDesc pruned_desc;
351
    Prune(*prog_with_targets.Proto(), &pruned_desc);
Y
Yu Yang 已提交
352
    return new ProgramDesc(pruned_desc);
353
  });
Y
Yu Yang 已提交
354
  m.def("inference_optimize", [](ProgramDesc &origin) {
355
    proto::ProgramDesc pruned_desc;
356
    InferenceOptimize(*(origin.Proto()), &pruned_desc);
Y
Yu Yang 已提交
357
    return new ProgramDesc(pruned_desc);
358
  });
F
fengjiayi 已提交
359 360
  m.def("empty_var_name", []() { return framework::kEmptyVarName; });
  m.def("grad_var_suffix", []() { return framework::kGradVarSuffix; });
361 362 363
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yi Wang 已提交
364 365
      .def("empty", []() { return kEmptyVarName; })
      .def("temp", []() { return kTempVarName; });
Q
qijun 已提交
366
  // clang-format off
Y
Yu Yang 已提交
367
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
368 369
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
370
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
371 372 373
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
D
dzhwinter 已提交
374
                  [](paddle::platform::CUDAPlace& place)
Q
qijun 已提交
375
                      -> paddle::platform::DeviceContext* {
376
#ifndef PADDLE_WITH_CUDA
D
dzhwinter 已提交
377
                    PADDLE_THROW("CUDAPlace is not supported in CPU device.");
Q
qijun 已提交
378
#else
Q
qijun 已提交
379
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
380
#endif
C
chengduoZH 已提交
381 382 383 384 385 386 387 388 389 390 391
                  })
          .def_static("create",
                [](paddle::platform::CUDAPinnedPlace& place)
                        -> paddle::platform::DeviceContext* {
#ifndef PADDLE_WITH_CUDA
                  PADDLE_THROW(
                        "CUDAPinnedPlace is not supported in CPU device.");
#else
                  return new paddle::platform::CUDAPinnedDeviceContext(place);
#endif
                });;
D
Dong Zhihong 已提交
392 393 394 395
// clang-format on
#ifdef PADDLE_WITH_CUDA
  py::class_<platform::Communicator>(m, "Communicator").def(py::init<>());
#endif
D
dzhwinter 已提交
396
  py::class_<platform::CUDAPlace>(m, "CUDAPlace")
397
      .def(py::init<int>())
D
dzhwinter 已提交
398
      .def("__str__", string::to_string<const platform::CUDAPlace &>);
Q
qijun 已提交
399

400 401 402
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CPUPlace &>);
Y
Yu Yang 已提交
403

C
chengduoZH 已提交
404 405 406 407
  py::class_<paddle::platform::CUDAPinnedPlace>(m, "CUDAPinnedPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CUDAPinnedPlace &>);

Y
Yu Yang 已提交
408 409 410 411 412 413 414
  py::class_<platform::Place>(m, "Place")
      .def(py::init<>())
      .def("set_place",
           [](platform::Place &self, const platform::CPUPlace &cpu_place) {
             self = cpu_place;
           })
      .def("set_place",
D
dzhwinter 已提交
415
           [](platform::Place &self, const platform::CUDAPlace &gpu_place) {
Y
Yu Yang 已提交
416
             self = gpu_place;
C
chengduoZH 已提交
417 418
           })
      .def("set_place", [](platform::Place &self,
C
chengduoZH 已提交
419 420
                           const platform::CUDAPinnedPlace &cuda_pinned_place) {
        self = cuda_pinned_place;
C
chengduoZH 已提交
421
      });
Y
Yu Yang 已提交
422

Y
Yu Yang 已提交
423 424 425
  py::class_<OperatorBase>(m, "Operator")
      .def_static("create",
                  [](py::bytes protobin) {
426
                    proto::OpDesc desc;
Y
Yu Yang 已提交
427 428 429 430 431
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
432
                    return OpRegistry::CreateOp(desc);
Y
Yu Yang 已提交
433
                  })
434
      .def("run",
435
           [](OperatorBase &self, const Scope &scope,
D
dzhwinter 已提交
436 437 438
              const platform::CPUPlace &place) { self.Run(scope, place); })
      .def("run",
           [](OperatorBase &self, const Scope &scope,
D
dzhwinter 已提交
439
              const platform::CUDAPlace &place) { self.Run(scope, place); })
C
chengduoZH 已提交
440 441 442 443 444
      .def("run",
           [](OperatorBase &self, const Scope &scope,
              const platform::CUDAPinnedPlace &place) {
             self.Run(scope, place);
           })
Y
Yu Yang 已提交
445 446 447 448 449 450 451
      .def("type",
           [](const OperatorBase &op) -> std::string { return op.Type(); })
      .def("outputs",
           [](const OperatorBase &op)
               -> std::map<std::string, std::vector<std::string>> {
                 return op.Outputs();
               })
Q
qijun 已提交
452 453
      .def("output_vars",
           [](const OperatorBase &op) { return op.OutputVars(true); })
Y
Yu Yang 已提交
454
      .def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
Q
qijun 已提交
455
      .def("input_vars", [](const OperatorBase &op) { return op.InputVars(); })
Y
Yu Yang 已提交
456 457 458 459
      .def("__str__", &OperatorBase::DebugString)
      .def("no_intermediate_outputs",
           [](const OperatorBase &op) { return op.OutputVars(false); })
      .def("support_gpu", &OperatorBase::SupportGPU);
Y
Yu Yang 已提交
460

F
fengjiayi 已提交
461
  py::class_<framework::Executor>(m, "Executor")
D
dzhwinter 已提交
462
      .def(py::init<const platform::Place &>())
W
Wu Yi 已提交
463 464 465
#ifdef PADDLE_WITH_DISTRIBUTE
      .def("complete", &Executor::Complete)
#endif
466 467 468
      .def("run",
           (void (Executor::*)(const ProgramDesc &, Scope *, int, bool, bool)) &
               Executor::Run);
F
fengjiayi 已提交
469

D
dzhwinter 已提交
470
  m.def("init_gflags", framework::InitGflags);
Y
Yang Yu 已提交
471
  m.def("init_glog", framework::InitGLOG);
X
Xin Pan 已提交
472 473
  m.def("init_devices",
        [](bool init_p2p) { framework::InitDevices(init_p2p); });
474

475
  m.def("is_compiled_with_cuda", IsCompiledWithCUDA);
476 477 478 479 480 481
#ifdef PADDLE_WITH_CUDA
  m.def("is_float16_supported", [](const platform::CUDAPlace &place) -> bool {
    // Only GPUs with Compute Capability >= 53 support float16
    return platform::GetCUDAComputeCapability(place.device) >= 53;
  });
#endif
482

483
  m.def("set_feed_variable", framework::SetFeedVariable);
Q
qijun 已提交
484
  m.def("get_fetch_variable", framework::GetFetchVariable);
Q
qijun 已提交
485

486 487 488 489 490
  BindProgramDesc(&m);
  BindBlockDesc(&m);
  BindVarDsec(&m);
  BindOpDesc(&m);
  BindConstValue(&m);
Y
Yu Yang 已提交
491

Y
Yu Yang 已提交
492 493 494 495 496 497 498 499 500
  py::class_<framework::LoDRankTable>(m, "LodRankTable")
      .def("items", [](framework::LoDRankTable &table) {
        std::vector<std::pair<size_t, size_t>> res;
        for (auto &item : table.items()) {
          res.push_back({item.index, item.length});
        }
        return res;
      });

Y
Yu Yang 已提交
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
  py::class_<LoDTensorArray>(m, "LoDTensorArray")
      .def("__getitem__",
           [](LoDTensorArray &self, size_t i) { return &self.at(i); },
           py::return_value_policy::reference)
      .def("__len__", [](LoDTensorArray &self) { return self.size(); })
      .def("__setitem__",
           [](LoDTensorArray &self, size_t i, const LoDTensor &t) {
             PADDLE_ENFORCE_LT(i, self.size());
             self[i].ShareDataWith(t);
             self[i].set_lod(t.lod());
           })
      .def("append", [](LoDTensorArray &self, const LoDTensor &t) {
        self.emplace_back();
        self.back().ShareDataWith(t);
        self.back().set_lod(t.lod());
      });

D
dzhwinter 已提交
518 519 520
  m.def("IsInplace",
        [](std::string op) -> bool { return operators::IsInplace(op); });

Y
Yu Yang 已提交
521
  m.def("op_support_gpu", OpSupportGPU);
D
Dong Zhihong 已提交
522
#ifdef PADDLE_WITH_CUDA
D
Dong Zhihong 已提交
523
  m.def("get_cuda_device_count", platform::GetCUDADeviceCount);
D
dangqingqing 已提交
524 525 526 527

  m.def("nvprof_init", platform::CudaProfilerInit);
  m.def("nvprof_start", platform::CudaProfilerStart);
  m.def("nvprof_stop", platform::CudaProfilerStop);
D
Dong Zhihong 已提交
528
#endif
Y
Yu Yang 已提交
529

530 531 532 533
  py::enum_<platform::ProfilerState>(m, "ProfilerState", py::arithmetic())
      .value("kDisabled", platform::ProfilerState::kDisabled)
      .value("kCPU", platform::ProfilerState::kCPU)
      .value("kCUDA", platform::ProfilerState::kCUDA)
534
      .value("kAll", platform::ProfilerState::kAll)
535 536 537 538 539 540 541 542 543 544 545 546 547
      .export_values();

  py::enum_<platform::EventSortingKey>(m, "EventSortingKey", py::arithmetic())
      .value("kDefault", platform::EventSortingKey::kDefault)
      .value("kCalls", platform::EventSortingKey::kCalls)
      .value("kTotal", platform::EventSortingKey::kTotal)
      .value("kMin", platform::EventSortingKey::kMin)
      .value("kMax", platform::EventSortingKey::kMax)
      .value("kAve", platform::EventSortingKey::kAve)
      .export_values();

  m.def("enable_profiler", platform::EnableProfiler);
  m.def("disable_profiler", platform::DisableProfiler);
X
Xin Pan 已提交
548
  m.def("is_profiler_enabled", platform::IsProfileEnabled);
549
  m.def("reset_profiler", platform::ResetProfiler);
Y
Yu Yang 已提交
550

Y
yuyang18 已提交
551
  // -- python binds for parallel executor.
Y
yuyang18 已提交
552 553 554 555 556 557 558 559 560 561
  py::class_<ParallelExecutor> pe(m, "ParallelExecutor");
  py::class_<ExecutionStrategy>(pe, "ExecutionStrategy")
      .def(py::init())
      .def_property(
          "num_threads",
          [](const ExecutionStrategy &self) { return self.num_threads_; },
          [](ExecutionStrategy &self, size_t num_threads) {
            self.num_threads_ = num_threads;
          })
      .def_property(
562 563 564 565
          "use_cuda",
          [](const ExecutionStrategy &self) { return self.use_cuda_; },
          [](ExecutionStrategy &self, bool use_cuda) {
            self.use_cuda_ = use_cuda;
Y
yuyang18 已提交
566 567 568 569 570 571
          })
      .def_property(
          "allow_op_delay",
          [](const ExecutionStrategy &self) { return self.allow_op_delay_; },
          [](ExecutionStrategy &self, bool allow_op_delay) {
            self.allow_op_delay_ = allow_op_delay;
Y
yuyang18 已提交
572 573 574 575 576 577 578 579
          })
      .def_property(
          "num_iteration_per_drop_scope",
          [](const ExecutionStrategy &self) {
            return self.num_iteration_per_drop_scope_;
          },
          [](ExecutionStrategy &self, size_t num_iteration_per_drop_scope) {
            self.num_iteration_per_drop_scope_ = num_iteration_per_drop_scope;
Y
yuyang18 已提交
580
          });
Y
yuyang18 已提交
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
  py::class_<BuildStrategy> build_strategy(pe, "BuildStrategy");

  py::enum_<BuildStrategy::ReduceStrategy>(build_strategy, "ReduceStrategy")
      .value("Reduce", BuildStrategy::ReduceStrategy::kReduce)
      .value("AllReduce", BuildStrategy::ReduceStrategy::kAllReduce);
  py::enum_<BuildStrategy::GradientScaleStrategy>(build_strategy,
                                                  "GradientScaleStrategy")
      .value("CoeffNumDevice",
             BuildStrategy::GradientScaleStrategy::kCoeffNumDevice)
      .value("One", BuildStrategy::GradientScaleStrategy::kOne)
      .value("Customized", BuildStrategy::GradientScaleStrategy::kCustomized);

  build_strategy.def(py::init())
      .def_property(
          "reduce_strategy",
          [](const BuildStrategy &self) { return self.reduce_; },
          [](BuildStrategy &self, BuildStrategy::ReduceStrategy strategy) {
            self.reduce_ = strategy;
          })
      .def_property(
          "gradient_scale_strategy",
          [](const BuildStrategy &self) { return self.gradient_scale_; },
          [](BuildStrategy &self,
             BuildStrategy::GradientScaleStrategy strategy) {
            self.gradient_scale_ = strategy;
Y
yuyang18 已提交
606 607 608 609 610 611
          })
      .def_property(
          "debug_graphviz_path",
          [](const BuildStrategy &self) { return self.debug_graphviz_path_; },
          [](BuildStrategy &self, const std::string &path) {
            self.debug_graphviz_path_ = path;
Y
yuyang18 已提交
612
          });
Y
yuyang18 已提交
613 614 615 616

  pe.def(py::init<const std::vector<platform::Place> &,
                  const std::unordered_set<std::string> &,
                  const std::unordered_set<std::string> &, const ProgramDesc &,
Y
yuyang18 已提交
617
                  const std::string &, Scope *, std::vector<Scope *> &,
618 619
                  const ExecutionStrategy &, const BuildStrategy &, size_t,
                  size_t>())
T
typhoonzero 已提交
620
      .def("bcast_params", &ParallelExecutor::BCastParamsToGPUs)
Y
Yu Yang 已提交
621 622 623 624
      // NOTE: even we return a vec<Scope*>* to Python use reference policy.
      // We still cannot get local_scope from this vector, since the element
      // of vec<Scope*> will be freed by Python GC. We can only return Scope*
      // one by one and mark them as reference.
625 626 627 628 629
      .def("local_scopes",
           [](ParallelExecutor &self) -> std::vector<Scope *> * {
             return &self.GetLocalScopes();
           },
           py::return_value_policy::reference)
Y
Yu Yang 已提交
630 631 632 633
      .def("feed_tensors_into_local_scopes",
           &ParallelExecutor::FeedTensorsIntoLocalScopes)
      .def("feed_and_split_tensor_into_local_scopes",
           &ParallelExecutor::FeedAndSplitTensorIntoLocalScopes)
Y
Yu Yang 已提交
634
      .def("run", &ParallelExecutor::Run);
Y
Yu Yang 已提交
635

636
  BindRecordIOWriter(&m);
637
  return m.ptr();
L
Luo Tao 已提交
638
}
639
}  // namespace pybind
640
}  // namespace paddle