pybind.cc 28.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
L
lgone2000 已提交
14
#include <Python.h>
C
chengduoZH 已提交
15 16
#include <algorithm>
#include <map>
S
sneaxiy 已提交
17
#include <memory>
C
chengduoZH 已提交
18 19 20 21 22
#include <mutex>  // NOLINT // for call_once
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
23

24
#include "paddle/fluid/framework/channel.h"
Y
Yi Wang 已提交
25 26 27 28 29 30
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
31
#include "paddle/fluid/framework/op_registry.h"
Y
Yu Yang 已提交
32
#include "paddle/fluid/framework/parallel_executor.h"
Y
Yi Wang 已提交
33
#include "paddle/fluid/framework/prune.h"
Y
Refine  
Yu Yang 已提交
34
#include "paddle/fluid/framework/reader.h"
Y
Yi Wang 已提交
35
#include "paddle/fluid/framework/selected_rows.h"
D
dzhwinter 已提交
36
#include "paddle/fluid/operators/activation_op.h"
S
sneaxiy 已提交
37
#include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h"
Y
Yi Wang 已提交
38
#include "paddle/fluid/platform/enforce.h"
39
#include "paddle/fluid/platform/init.h"
Y
Yi Wang 已提交
40 41 42 43
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/pybind/const_value.h"
#include "paddle/fluid/pybind/exception.h"
44 45
#include "paddle/fluid/pybind/protobuf.h"
#include "paddle/fluid/pybind/pybind.h"  // NOLINT
Y
Yu Yang 已提交
46
#include "paddle/fluid/pybind/recordio.h"
Y
Yi Wang 已提交
47
#include "paddle/fluid/pybind/tensor_py.h"
Y
Yu Yang 已提交
48

49
#include "paddle/fluid/string/to_string.h"
50

D
Dong Zhihong 已提交
51
#ifdef PADDLE_WITH_CUDA
Y
Yi Wang 已提交
52 53 54
#include "paddle/fluid/operators/nccl/nccl_gpu_common.h"
#include "paddle/fluid/platform/cuda_profiler.h"
#include "paddle/fluid/platform/gpu_info.h"
D
Dong Zhihong 已提交
55 56
#endif

Q
Qiao Longfei 已提交
57 58 59
// disable auto conversion to list in Python
PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray);

60
namespace paddle {
61
namespace pybind {
62
bool IsCompiledWithCUDA() {
63
#ifndef PADDLE_WITH_CUDA
Q
qijun 已提交
64 65 66 67 68 69
  return false;
#else
  return true;
#endif
}

70 71
PYBIND11_PLUGIN(core) {
  py::module m("core", "C++ core of PaddlePaddle");
72

73 74 75 76
  // using framework in this function. Since it is inside a function, it will
  // not cause namespace pollution.
  using namespace paddle::framework;  // NOLINT

77
  BindException(&m);
Y
Yu Yang 已提交
78

79 80 81
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
Yu Yang 已提交
82
      .def("get_dims",
83
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
84
      .def("set_dims",
Q
qijun 已提交
85
           [](Tensor &self, const std::vector<int64_t> &dim) {
Y
Yu Yang 已提交
86
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
87
           })
D
dzhwinter 已提交
88 89 90 91
      .def("set_layout",
           [](Tensor &self, const std::string &layout) {
             self.set_layout(StringToDataLayout(layout));
           })
Y
Yu Yang 已提交
92
      .def("alloc_float",
D
dzhwinter 已提交
93
           [](Tensor &self, paddle::platform::CUDAPlace &place) {
Q
qijun 已提交
94
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
95
           })
Q
qijun 已提交
96
      .def("alloc_float",
Y
Yu Yang 已提交
97
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
98
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
99 100
           })
      .def("alloc_int",
Y
Yu Yang 已提交
101
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
102
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
103
           })
Q
qijun 已提交
104
      .def("alloc_int",
D
dzhwinter 已提交
105
           [](Tensor &self, paddle::platform::CUDAPlace &place) {
Q
qijun 已提交
106
             self.mutable_data<int>(place);
Q
qijun 已提交
107
           })
C
chengduoZH 已提交
108 109 110 111 112 113 114 115
      .def("alloc_int",
           [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) {
             self.mutable_data<int>(place);
           })
      .def("alloc_float",
           [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) {
             self.mutable_data<float>(place);
           })
Y
Yu Yang 已提交
116 117
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
118
      .def("set", PyCPUTensorSetFromArray<double>)
119
      .def("set", PyCPUTensorSetFromArray<int64_t>)
Y
Yu Yang 已提交
120
      .def("set", PyCPUTensorSetFromArray<bool>)
121
      .def("set", PyCPUTensorSetFromArray<uint16_t>)
F
fengjiayi 已提交
122
      .def("set", PyCPUTensorSetFromArray<uint8_t>)
123
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
124 125
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
126
      .def("set", PyCUDATensorSetFromArray<double>)
127
      .def("set", PyCUDATensorSetFromArray<int64_t>)
Y
Yu Yang 已提交
128
      .def("set", PyCUDATensorSetFromArray<bool>)
129
      .def("set", PyCUDATensorSetFromArray<uint16_t>)
F
fengjiayi 已提交
130
      .def("set", PyCUDATensorSetFromArray<uint8_t>)
C
chengduoZH 已提交
131 132 133 134 135 136
      .def("set", PyCUDAPinnedTensorSetFromArray<float>)
      .def("set", PyCUDAPinnedTensorSetFromArray<int>)
      .def("set", PyCUDAPinnedTensorSetFromArray<double>)
      .def("set", PyCUDAPinnedTensorSetFromArray<int64_t>)
      .def("set", PyCUDAPinnedTensorSetFromArray<bool>)
      .def("set", PyCUDAPinnedTensorSetFromArray<uint16_t>)
F
fengjiayi 已提交
137
      .def("set", PyCUDAPinnedTensorSetFromArray<uint8_t>)
Q
qijun 已提交
138
#endif
139
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
140 141 142 143 144
      .def("set_float_element", TensorSetElement<float>)
      .def("get_float_element", TensorGetElement<float>)
      .def("set_double_element", TensorSetElement<double>)
      .def("get_double_element", TensorGetElement<double>)
      .def("dtype", [](Tensor &self) { return ToDataType(self.type()); });
Y
Yu Yang 已提交
145

146
  py::class_<LoDTensor, Tensor>(m, "LoDTensor")
147 148
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
149 150 151 152 153 154 155 156 157 158 159 160 161 162
      .def("__init__",
           [](LoDTensor &instance, const std::vector<std::vector<size_t>>
                                       &recursive_sequence_lengths) {
             LoD new_lod;
             new_lod.reserve(recursive_sequence_lengths.size());
             std::copy(recursive_sequence_lengths.begin(),
                       recursive_sequence_lengths.end(),
                       std::back_inserter(new_lod));
             LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
             PADDLE_ENFORCE(
                 CheckLoD(new_offset_lod, -1),
                 "the provided recursive_sequence_lengths info is invalid");
             new (&instance) LoDTensor(new_offset_lod);
           })
Y
Yu Yang 已提交
163
      .def("__init__", [](LoDTensor &instance) { new (&instance) LoDTensor(); })
G
gongweibao 已提交
164 165 166 167 168
      // We implement offset based LOD in C++ while we use length based with
      // Python API. So we changed set_lod to set_recursive_sequence_lengths to
      // avoid misuse.
      // The discussion is here:
      // https://github.com/PaddlePaddle/Paddle/issues/10855
D
dangqingqing 已提交
169
      .def("set_lod",
170
           [](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) {
171
             // the input lod is offset-based level-of-detail info
Y
Yu Yang 已提交
172
             LoD new_lod;
173 174
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
175 176
             PADDLE_ENFORCE(CheckLoD(new_lod, vectorize(self.dims()).front()),
                            "the provided lod info is invalid");
177
             self.set_lod(new_lod);
D
dangqingqing 已提交
178
           })
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
      .def("set_recursive_sequence_lengths",
           [](LoDTensor &self, const std::vector<std::vector<size_t>>
                                   &recursive_sequence_lengths) {
             // the input recursive_sequence_lengths is length-based
             // level-of-detail info
             LoD new_lod;
             new_lod.reserve(recursive_sequence_lengths.size());
             std::copy(recursive_sequence_lengths.begin(),
                       recursive_sequence_lengths.end(),
                       std::back_inserter(new_lod));
             LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
             PADDLE_ENFORCE(
                 CheckLoD(new_offset_lod, vectorize(self.dims()).front()),
                 "the provided recursive_sequence_lengths info is invalid");
             self.set_lod(new_offset_lod);
           })
      .def("lod",
           [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
             // output the offset-based lod info
             LoD lod = self.lod();
             std::vector<std::vector<size_t>> new_lod;
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             return new_lod;
           })
G
gongweibao 已提交
204
      // Set above comments of set_lod.
205 206 207 208 209 210 211 212 213 214 215 216 217
      .def("recursive_sequence_lengths",
           [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
             // output the length-based lod info
             LoD lod = ConvertToLengthBasedLoD(self.lod());
             std::vector<std::vector<size_t>> new_lod;
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             return new_lod;
           })
      .def("has_valid_recursive_sequence_lengths", [](LoDTensor &self) -> bool {
        // Check that the lod info is valid and match the outermost
        // dimension of the LoDTensor data
        return CheckLoD(self.lod(), vectorize(self.dims()).front());
D
dangqingqing 已提交
218 219
      });

Q
qijun 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232
  py::class_<SelectedRows>(m, "SelectedRows")
      .def("__init__",
           [](SelectedRows &instance) { new (&instance) SelectedRows(); })
      .def("__init__",
           [](SelectedRows &instance, const std::vector<int64_t> rows,
              const int64_t &height) {
             new (&instance) SelectedRows(rows, height);
           })
      .def("get_tensor",
           [](SelectedRows &self) { return self.mutable_value(); },
           py::return_value_policy::reference)
      .def("set_height", &SelectedRows::set_height)
      .def("height", &SelectedRows::height)
Q
qijun 已提交
233 234 235 236 237 238 239 240 241
      .def("set_rows",
           [](SelectedRows &self, std::vector<int64_t> rows) {
#ifndef PADDLE_WITH_CUDA
             self.set_rows(rows);
#else
        Vector<int64_t> new_rows(rows);
        self.set_rows(new_rows);
#endif
           })
242 243 244 245 246 247 248 249 250 251 252
      .def("rows", [](SelectedRows &self) {
#ifndef PADDLE_WITH_CUDA
        return self.rows();
#else
         auto rows = self.rows();
         std::vector<int64_t> new_rows;
         new_rows.reserve(rows.size());
         std::copy(rows.begin(), rows.end(), std::back_inserter(new_rows));
         return new_rows;
#endif
      });
Q
qijun 已提交
253

254
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
255 256 257

All parameter, weight, gradient are variables in Paddle.
)DOC")
258
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
259
      .def("set_int",
260 261
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
262 263 264 265 266 267 268
      .def("is_float", [](const Variable &var) { return var.IsType<float>(); })
      .def("set_float",
           [](Variable &var, float val) -> void {
             *var.GetMutable<float>() = val;
           })
      .def("get_float",
           [](const Variable &var) -> float { return var.Get<float>(); })
Y
Yu Yang 已提交
269
      .def("get_tensor",
270 271
           [](Variable &self) -> LoDTensor * {
             return self.GetMutable<LoDTensor>();
D
dangqingqing 已提交
272 273
           },
           py::return_value_policy::reference)
Y
Yu Yang 已提交
274 275 276
      .def("get_lod_rank_table",
           [](Variable &self) { return self.GetMutable<LoDRankTable>(); },
           py::return_value_policy::reference)
Q
qijun 已提交
277 278 279 280 281
      .def("get_selected_rows",
           [](Variable &self) -> SelectedRows * {
             return self.GetMutable<SelectedRows>();
           },
           py::return_value_policy::reference)
Y
Yu Yang 已提交
282 283 284
      .def("get_lod_tensor_array",
           [](Variable &self) { return self.GetMutable<LoDTensorArray>(); },
           py::return_value_policy::reference)
D
Dong Zhihong 已提交
285 286 287 288 289 290 291
#ifdef PADDLE_WITH_CUDA
      .def("get_communicator",
           [](Variable &self) -> platform::Communicator * {
             return self.GetMutable<platform::Communicator>();
           },
           py::return_value_policy::reference)
#endif
Y
Refine  
Yu Yang 已提交
292 293 294 295 296
      .def("get_reader",
           [](Variable &self) -> framework::ReaderHolder * {
             PADDLE_ENFORCE(self.IsType<framework::ReaderHolder>());
             return self.GetMutable<framework::ReaderHolder>();
           },
Y
Yu Yang 已提交
297
           py::return_value_policy::reference);
298

Y
Refine  
Yu Yang 已提交
299
  py::class_<framework::ReaderHolder>(m, "Reader", "")
300
      .def("reset", &framework::ReaderHolder::ResetAll);
Y
Refine  
Yu Yang 已提交
301

S
sneaxiy 已提交
302 303 304 305
  using LoDTensorBlockingQueue =
      ::paddle::operators::reader::LoDTensorBlockingQueue;
  using LoDTensorBlockingQueueHolder =
      ::paddle::operators::reader::LoDTensorBlockingQueueHolder;
S
sneaxiy 已提交
306 307
  py::class_<LoDTensorBlockingQueue, std::shared_ptr<LoDTensorBlockingQueue>>(
      m, "LoDTensorBlockingQueue", "")
S
sneaxiy 已提交
308
      .def("push",
S
sneaxiy 已提交
309
           [](LoDTensorBlockingQueue &self,
S
sneaxiy 已提交
310
              const std::vector<framework::LoDTensor> &lod_tensor_vec) {
S
sneaxiy 已提交
311
             pybind11::gil_scoped_release release;
S
sneaxiy 已提交
312
             return self.Push(lod_tensor_vec);
S
sneaxiy 已提交
313
           })
S
sneaxiy 已提交
314 315
      .def("size", &LoDTensorBlockingQueue::Size)
      .def("capacity", &LoDTensorBlockingQueue::Cap)
S
sneaxiy 已提交
316 317
      .def("push_eof", &LoDTensorBlockingQueue::Close)
      .def("is_eof", &LoDTensorBlockingQueue::IsClosed);
S
sneaxiy 已提交
318

S
sneaxiy 已提交
319
  m.def("init_lod_tensor_blocking_queue",
S
sneaxiy 已提交
320
        [](Variable &var, size_t capacity,
S
sneaxiy 已提交
321
           const std::vector<std::vector<int64_t>> &shapes)
S
sneaxiy 已提交
322
            -> std::shared_ptr<LoDTensorBlockingQueue> {
S
sneaxiy 已提交
323 324 325 326 327 328 329
              std::vector<DDim> dims(shapes.size());
              std::transform(shapes.begin(), shapes.end(), dims.begin(),
                             [](const std::vector<int64_t> &shape) {
                               return make_ddim(shape);
                             });
              auto *holder = var.GetMutable<LoDTensorBlockingQueueHolder>();
              holder->InitOnce(capacity, dims);
S
sneaxiy 已提交
330
              return holder->GetQueue();
S
sneaxiy 已提交
331
            },
S
sneaxiy 已提交
332
        py::return_value_policy::copy);
S
sneaxiy 已提交
333

334
  py::class_<Scope>(m, "Scope", "")
D
dongzhihong 已提交
335
      .def("var",
336
           [](Scope &self, const std::string &name) -> Variable * {
D
dongzhihong 已提交
337
             return self.Var(name);
Y
Yu Yang 已提交
338
           },
339
           py::return_value_policy::reference)
340
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
341
      .def(py::init<>())
342
      .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
343
           py::return_value_policy::reference)
Y
Yu Yang 已提交
344
      .def("drop_kids", &Scope::DropKids);
345

Y
Yu Yang 已提交
346 347
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
348 349
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
    std::vector<py::bytes> ret_values;
350 351 352 353 354 355 356 357 358 359
    for (auto &iter : OpInfoMap::Instance().map()) {
      auto &info = iter.second;
      if (info.HasOpProtoAndChecker()) {
        std::string str;
        PADDLE_ENFORCE(
            info.Proto().SerializeToString(&str),
            "Serialize OpProto Error. This could be a bug of Paddle.");
        ret_values.emplace_back(str);
      }
    }
Y
Yu Yang 已提交
360 361
    return ret_values;
  });
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
  m.def(
      "get_grad_op_desc", [](const OpDesc &op_desc,
                             const std::unordered_set<std::string> &no_grad_set,
                             const std::vector<BlockDesc *> &grad_sub_block) {
        std::unordered_map<std::string, std::string> grad_to_var;
        std::vector<std::unique_ptr<OpDesc>> grad_op_descs =
            framework::OpInfoMap::Instance()
                .Get(op_desc.Type())
                .GradOpMaker()(op_desc, no_grad_set, &grad_to_var,
                               grad_sub_block);
        std::vector<OpDesc *> grad_op_desc_ptrs(grad_op_descs.size());
        std::transform(grad_op_descs.begin(), grad_op_descs.end(),
                       grad_op_desc_ptrs.begin(),
                       [](std::unique_ptr<OpDesc> &p) { return p.release(); });
        return std::make_pair(grad_op_desc_ptrs, grad_to_var);
      });
Y
Yu Yang 已提交
378
  m.def("prune", [](const ProgramDesc &origin,
379
                    const std::vector<std::array<size_t, 2>> &targets) {
Y
Yu Yang 已提交
380
    ProgramDesc prog_with_targets(origin);
381
    for (const auto &t : targets) {
382
      prog_with_targets.MutableBlock(t[0])->Op(t[1])->SetIsTarget(true);
383
    }
384
    proto::ProgramDesc pruned_desc;
385
    Prune(*prog_with_targets.Proto(), &pruned_desc);
Y
Yu Yang 已提交
386
    return new ProgramDesc(pruned_desc);
387
  });
Y
Yu Yang 已提交
388
  m.def("inference_optimize", [](ProgramDesc &origin) {
389
    proto::ProgramDesc pruned_desc;
390
    InferenceOptimize(*(origin.Proto()), &pruned_desc);
Y
Yu Yang 已提交
391
    return new ProgramDesc(pruned_desc);
392
  });
F
fengjiayi 已提交
393 394
  m.def("empty_var_name", []() { return framework::kEmptyVarName; });
  m.def("grad_var_suffix", []() { return framework::kGradVarSuffix; });
395 396 397
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yi Wang 已提交
398 399
      .def("empty", []() { return kEmptyVarName; })
      .def("temp", []() { return kTempVarName; });
Q
qijun 已提交
400
  // clang-format off
Y
Yu Yang 已提交
401
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
402 403
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
404
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
405 406 407
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
D
dzhwinter 已提交
408
                  [](paddle::platform::CUDAPlace& place)
Q
qijun 已提交
409
                      -> paddle::platform::DeviceContext* {
410
#ifndef PADDLE_WITH_CUDA
D
dzhwinter 已提交
411
                    PADDLE_THROW("CUDAPlace is not supported in CPU device.");
Q
qijun 已提交
412
#else
Q
qijun 已提交
413
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
414
#endif
C
chengduoZH 已提交
415 416 417 418 419 420 421 422 423 424 425
                  })
          .def_static("create",
                [](paddle::platform::CUDAPinnedPlace& place)
                        -> paddle::platform::DeviceContext* {
#ifndef PADDLE_WITH_CUDA
                  PADDLE_THROW(
                        "CUDAPinnedPlace is not supported in CPU device.");
#else
                  return new paddle::platform::CUDAPinnedDeviceContext(place);
#endif
                });;
D
Dong Zhihong 已提交
426 427 428 429
// clang-format on
#ifdef PADDLE_WITH_CUDA
  py::class_<platform::Communicator>(m, "Communicator").def(py::init<>());
#endif
D
dzhwinter 已提交
430
  py::class_<platform::CUDAPlace>(m, "CUDAPlace")
431
      .def(py::init<int>())
D
dzhwinter 已提交
432
      .def("__str__", string::to_string<const platform::CUDAPlace &>);
Q
qijun 已提交
433

434 435 436
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CPUPlace &>);
Y
Yu Yang 已提交
437

C
chengduoZH 已提交
438 439 440 441
  py::class_<paddle::platform::CUDAPinnedPlace>(m, "CUDAPinnedPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CUDAPinnedPlace &>);

Y
Yu Yang 已提交
442 443 444 445 446 447 448
  py::class_<platform::Place>(m, "Place")
      .def(py::init<>())
      .def("set_place",
           [](platform::Place &self, const platform::CPUPlace &cpu_place) {
             self = cpu_place;
           })
      .def("set_place",
D
dzhwinter 已提交
449
           [](platform::Place &self, const platform::CUDAPlace &gpu_place) {
Y
Yu Yang 已提交
450
             self = gpu_place;
C
chengduoZH 已提交
451 452
           })
      .def("set_place", [](platform::Place &self,
C
chengduoZH 已提交
453 454
                           const platform::CUDAPinnedPlace &cuda_pinned_place) {
        self = cuda_pinned_place;
C
chengduoZH 已提交
455
      });
Y
Yu Yang 已提交
456

Y
Yu Yang 已提交
457 458 459
  py::class_<OperatorBase>(m, "Operator")
      .def_static("create",
                  [](py::bytes protobin) {
460
                    proto::OpDesc desc;
Y
Yu Yang 已提交
461 462 463 464 465
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
466
                    return OpRegistry::CreateOp(desc);
Y
Yu Yang 已提交
467
                  })
468
      .def("run",
469
           [](OperatorBase &self, const Scope &scope,
D
dzhwinter 已提交
470 471 472
              const platform::CPUPlace &place) { self.Run(scope, place); })
      .def("run",
           [](OperatorBase &self, const Scope &scope,
D
dzhwinter 已提交
473
              const platform::CUDAPlace &place) { self.Run(scope, place); })
C
chengduoZH 已提交
474 475 476 477 478
      .def("run",
           [](OperatorBase &self, const Scope &scope,
              const platform::CUDAPinnedPlace &place) {
             self.Run(scope, place);
           })
Y
Yu Yang 已提交
479 480 481 482 483 484 485
      .def("type",
           [](const OperatorBase &op) -> std::string { return op.Type(); })
      .def("outputs",
           [](const OperatorBase &op)
               -> std::map<std::string, std::vector<std::string>> {
                 return op.Outputs();
               })
Q
qijun 已提交
486 487
      .def("output_vars",
           [](const OperatorBase &op) { return op.OutputVars(true); })
Y
Yu Yang 已提交
488
      .def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
Q
qijun 已提交
489
      .def("input_vars", [](const OperatorBase &op) { return op.InputVars(); })
Y
Yu Yang 已提交
490 491 492 493
      .def("__str__", &OperatorBase::DebugString)
      .def("no_intermediate_outputs",
           [](const OperatorBase &op) { return op.OutputVars(false); })
      .def("support_gpu", &OperatorBase::SupportGPU);
Y
Yu Yang 已提交
494

F
fengjiayi 已提交
495
  py::class_<framework::Executor>(m, "Executor")
D
dzhwinter 已提交
496
      .def(py::init<const platform::Place &>())
W
Wu Yi 已提交
497
#ifdef PADDLE_WITH_DISTRIBUTE
Y
Yancey1989 已提交
498 499
      .def("begin_pass", &Executor::BeginPass)
      .def("end_pass", &Executor::EndPass)
W
Wu Yi 已提交
500
#endif
S
sneaxiy 已提交
501 502 503 504 505
      .def("run", [](Executor &self, const ProgramDesc &prog, Scope *scope,
                     int block_id, bool create_local_scope, bool create_vars) {
        pybind11::gil_scoped_release release;
        self.Run(prog, scope, block_id, create_local_scope, create_vars);
      });
F
fengjiayi 已提交
506

D
dzhwinter 已提交
507
  m.def("init_gflags", framework::InitGflags);
Y
Yang Yu 已提交
508
  m.def("init_glog", framework::InitGLOG);
X
Xin Pan 已提交
509 510
  m.def("init_devices",
        [](bool init_p2p) { framework::InitDevices(init_p2p); });
511

512
  m.def("is_compiled_with_cuda", IsCompiledWithCUDA);
513 514 515 516 517 518
#ifdef PADDLE_WITH_CUDA
  m.def("is_float16_supported", [](const platform::CUDAPlace &place) -> bool {
    // Only GPUs with Compute Capability >= 53 support float16
    return platform::GetCUDAComputeCapability(place.device) >= 53;
  });
#endif
519

520
  m.def("set_feed_variable", framework::SetFeedVariable);
Q
qijun 已提交
521
  m.def("get_fetch_variable", framework::GetFetchVariable);
Q
qijun 已提交
522

523 524 525 526 527
  BindProgramDesc(&m);
  BindBlockDesc(&m);
  BindVarDsec(&m);
  BindOpDesc(&m);
  BindConstValue(&m);
Y
Yu Yang 已提交
528

Y
Yu Yang 已提交
529 530 531 532 533 534 535 536 537
  py::class_<framework::LoDRankTable>(m, "LodRankTable")
      .def("items", [](framework::LoDRankTable &table) {
        std::vector<std::pair<size_t, size_t>> res;
        for (auto &item : table.items()) {
          res.push_back({item.index, item.length});
        }
        return res;
      });

Y
Yu Yang 已提交
538
  py::class_<LoDTensorArray>(m, "LoDTensorArray")
S
sneaxiy 已提交
539 540
      .def("__init__",
           [](LoDTensorArray &instance) { new (&instance) LoDTensorArray(); })
Y
Yu Yang 已提交
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
      .def("__getitem__",
           [](LoDTensorArray &self, size_t i) { return &self.at(i); },
           py::return_value_policy::reference)
      .def("__len__", [](LoDTensorArray &self) { return self.size(); })
      .def("__setitem__",
           [](LoDTensorArray &self, size_t i, const LoDTensor &t) {
             PADDLE_ENFORCE_LT(i, self.size());
             self[i].ShareDataWith(t);
             self[i].set_lod(t.lod());
           })
      .def("append", [](LoDTensorArray &self, const LoDTensor &t) {
        self.emplace_back();
        self.back().ShareDataWith(t);
        self.back().set_lod(t.lod());
      });

D
dzhwinter 已提交
557 558 559
  m.def("IsInplace",
        [](std::string op) -> bool { return operators::IsInplace(op); });

Y
Yu Yang 已提交
560
  m.def("op_support_gpu", OpSupportGPU);
D
Dong Zhihong 已提交
561
#ifdef PADDLE_WITH_CUDA
D
Dong Zhihong 已提交
562
  m.def("get_cuda_device_count", platform::GetCUDADeviceCount);
D
dangqingqing 已提交
563 564 565 566

  m.def("nvprof_init", platform::CudaProfilerInit);
  m.def("nvprof_start", platform::CudaProfilerStart);
  m.def("nvprof_stop", platform::CudaProfilerStop);
D
Dong Zhihong 已提交
567
#endif
Y
Yu Yang 已提交
568

569 570 571 572
  py::enum_<platform::ProfilerState>(m, "ProfilerState", py::arithmetic())
      .value("kDisabled", platform::ProfilerState::kDisabled)
      .value("kCPU", platform::ProfilerState::kCPU)
      .value("kCUDA", platform::ProfilerState::kCUDA)
573
      .value("kAll", platform::ProfilerState::kAll)
574 575 576 577 578 579 580 581 582 583 584 585 586
      .export_values();

  py::enum_<platform::EventSortingKey>(m, "EventSortingKey", py::arithmetic())
      .value("kDefault", platform::EventSortingKey::kDefault)
      .value("kCalls", platform::EventSortingKey::kCalls)
      .value("kTotal", platform::EventSortingKey::kTotal)
      .value("kMin", platform::EventSortingKey::kMin)
      .value("kMax", platform::EventSortingKey::kMax)
      .value("kAve", platform::EventSortingKey::kAve)
      .export_values();

  m.def("enable_profiler", platform::EnableProfiler);
  m.def("disable_profiler", platform::DisableProfiler);
X
Xin Pan 已提交
587
  m.def("is_profiler_enabled", platform::IsProfileEnabled);
588
  m.def("reset_profiler", platform::ResetProfiler);
Y
Yu Yang 已提交
589

Y
yuyang18 已提交
590
  // -- python binds for parallel executor.
Y
yuyang18 已提交
591 592 593 594 595 596 597 598 599 600
  py::class_<ParallelExecutor> pe(m, "ParallelExecutor");
  py::class_<ExecutionStrategy>(pe, "ExecutionStrategy")
      .def(py::init())
      .def_property(
          "num_threads",
          [](const ExecutionStrategy &self) { return self.num_threads_; },
          [](ExecutionStrategy &self, size_t num_threads) {
            self.num_threads_ = num_threads;
          })
      .def_property(
601 602 603 604
          "use_cuda",
          [](const ExecutionStrategy &self) { return self.use_cuda_; },
          [](ExecutionStrategy &self, bool use_cuda) {
            self.use_cuda_ = use_cuda;
Y
yuyang18 已提交
605 606 607 608 609 610
          })
      .def_property(
          "allow_op_delay",
          [](const ExecutionStrategy &self) { return self.allow_op_delay_; },
          [](ExecutionStrategy &self, bool allow_op_delay) {
            self.allow_op_delay_ = allow_op_delay;
Y
yuyang18 已提交
611 612 613 614 615 616 617 618
          })
      .def_property(
          "num_iteration_per_drop_scope",
          [](const ExecutionStrategy &self) {
            return self.num_iteration_per_drop_scope_;
          },
          [](ExecutionStrategy &self, size_t num_iteration_per_drop_scope) {
            self.num_iteration_per_drop_scope_ = num_iteration_per_drop_scope;
Y
yuyang18 已提交
619
          });
Y
yuyang18 已提交
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
  py::class_<BuildStrategy> build_strategy(pe, "BuildStrategy");

  py::enum_<BuildStrategy::ReduceStrategy>(build_strategy, "ReduceStrategy")
      .value("Reduce", BuildStrategy::ReduceStrategy::kReduce)
      .value("AllReduce", BuildStrategy::ReduceStrategy::kAllReduce);
  py::enum_<BuildStrategy::GradientScaleStrategy>(build_strategy,
                                                  "GradientScaleStrategy")
      .value("CoeffNumDevice",
             BuildStrategy::GradientScaleStrategy::kCoeffNumDevice)
      .value("One", BuildStrategy::GradientScaleStrategy::kOne)
      .value("Customized", BuildStrategy::GradientScaleStrategy::kCustomized);

  build_strategy.def(py::init())
      .def_property(
          "reduce_strategy",
          [](const BuildStrategy &self) { return self.reduce_; },
          [](BuildStrategy &self, BuildStrategy::ReduceStrategy strategy) {
            self.reduce_ = strategy;
          })
      .def_property(
          "gradient_scale_strategy",
          [](const BuildStrategy &self) { return self.gradient_scale_; },
          [](BuildStrategy &self,
             BuildStrategy::GradientScaleStrategy strategy) {
            self.gradient_scale_ = strategy;
Y
yuyang18 已提交
645 646 647 648 649 650
          })
      .def_property(
          "debug_graphviz_path",
          [](const BuildStrategy &self) { return self.debug_graphviz_path_; },
          [](BuildStrategy &self, const std::string &path) {
            self.debug_graphviz_path_ = path;
F
fengjiayi 已提交
651 652 653 654 655
          })
      .def_property(
          "enable_data_balance",
          [](const BuildStrategy &self) { return self.enable_data_balance_; },
          [](BuildStrategy &self, bool b) { self.enable_data_balance_ = b; });
Y
yuyang18 已提交
656 657 658 659

  pe.def(py::init<const std::vector<platform::Place> &,
                  const std::unordered_set<std::string> &,
                  const std::unordered_set<std::string> &, const ProgramDesc &,
Y
yuyang18 已提交
660
                  const std::string &, Scope *, std::vector<Scope *> &,
661 662
                  const ExecutionStrategy &, const BuildStrategy &, size_t,
                  size_t>())
T
typhoonzero 已提交
663
      .def("bcast_params", &ParallelExecutor::BCastParamsToGPUs)
Y
Yu Yang 已提交
664 665 666 667
      // NOTE: even we return a vec<Scope*>* to Python use reference policy.
      // We still cannot get local_scope from this vector, since the element
      // of vec<Scope*> will be freed by Python GC. We can only return Scope*
      // one by one and mark them as reference.
668 669 670 671 672
      .def("local_scopes",
           [](ParallelExecutor &self) -> std::vector<Scope *> * {
             return &self.GetLocalScopes();
           },
           py::return_value_policy::reference)
Y
Yu Yang 已提交
673 674 675 676
      .def("feed_tensors_into_local_scopes",
           &ParallelExecutor::FeedTensorsIntoLocalScopes)
      .def("feed_and_split_tensor_into_local_scopes",
           &ParallelExecutor::FeedAndSplitTensorIntoLocalScopes)
S
sneaxiy 已提交
677 678 679 680 681 682
      .def("run", [](ParallelExecutor &self,
                     const std::vector<std::string> &fetch_tensors,
                     const std::string &fetched_var_name) {
        pybind11::gil_scoped_release release;
        self.Run(fetch_tensors, fetched_var_name);
      });
Y
Yu Yang 已提交
683

684
  BindRecordIOWriter(&m);
685
  return m.ptr();
L
Luo Tao 已提交
686
}
687
}  // namespace pybind
688
}  // namespace paddle