pybind.cc 28.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
L
lgone2000 已提交
14
#include <Python.h>
C
chengduoZH 已提交
15 16
#include <algorithm>
#include <map>
S
sneaxiy 已提交
17
#include <memory>
C
chengduoZH 已提交
18 19 20 21 22
#include <mutex>  // NOLINT // for call_once
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
23

24
#include "paddle/fluid/framework/channel.h"
Y
Yi Wang 已提交
25 26 27 28 29 30
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
31
#include "paddle/fluid/framework/op_registry.h"
Y
Yu Yang 已提交
32
#include "paddle/fluid/framework/parallel_executor.h"
Y
Yi Wang 已提交
33
#include "paddle/fluid/framework/prune.h"
Y
Refine  
Yu Yang 已提交
34
#include "paddle/fluid/framework/reader.h"
Y
Yi Wang 已提交
35
#include "paddle/fluid/framework/selected_rows.h"
D
dzhwinter 已提交
36
#include "paddle/fluid/operators/activation_op.h"
S
sneaxiy 已提交
37
#include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h"
Y
Yi Wang 已提交
38
#include "paddle/fluid/platform/enforce.h"
39
#include "paddle/fluid/platform/init.h"
Y
Yi Wang 已提交
40 41 42 43
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/pybind/const_value.h"
#include "paddle/fluid/pybind/exception.h"
44 45
#include "paddle/fluid/pybind/protobuf.h"
#include "paddle/fluid/pybind/pybind.h"  // NOLINT
Y
Yu Yang 已提交
46
#include "paddle/fluid/pybind/recordio.h"
Y
Yi Wang 已提交
47
#include "paddle/fluid/pybind/tensor_py.h"
Y
Yu Yang 已提交
48

49
#include "paddle/fluid/string/to_string.h"
50

D
Dong Zhihong 已提交
51
#ifdef PADDLE_WITH_CUDA
Y
Yi Wang 已提交
52 53 54
#include "paddle/fluid/operators/nccl/nccl_gpu_common.h"
#include "paddle/fluid/platform/cuda_profiler.h"
#include "paddle/fluid/platform/gpu_info.h"
D
Dong Zhihong 已提交
55 56
#endif

Q
Qiao Longfei 已提交
57 58 59
// disable auto conversion to list in Python
PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray);

60
namespace paddle {
61
namespace pybind {
62
bool IsCompiledWithCUDA() {
63
#ifndef PADDLE_WITH_CUDA
Q
qijun 已提交
64 65 66 67 68 69
  return false;
#else
  return true;
#endif
}

Y
update  
Yancey1989 已提交
70
bool IsCompiledWithDIST() {
Y
Yancey1989 已提交
71
#ifdef PADDLE_WITH_DISTRIBUTE
Y
update  
Yancey1989 已提交
72 73 74 75 76 77
  return true;
#else
  return false;
#endif
}

78 79
PYBIND11_PLUGIN(core) {
  py::module m("core", "C++ core of PaddlePaddle");
80

81 82 83 84
  // using framework in this function. Since it is inside a function, it will
  // not cause namespace pollution.
  using namespace paddle::framework;  // NOLINT

85
  BindException(&m);
Y
Yu Yang 已提交
86

87 88 89
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
yuyang18 已提交
90
      .def("_get_dims",
91
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
yuyang18 已提交
92
      .def("_set_dims",
Q
qijun 已提交
93
           [](Tensor &self, const std::vector<int64_t> &dim) {
Y
Yu Yang 已提交
94
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
95
           })
Y
yuyang18 已提交
96
      .def("_set_layout",
D
dzhwinter 已提交
97 98 99
           [](Tensor &self, const std::string &layout) {
             self.set_layout(StringToDataLayout(layout));
           })
Y
yuyang18 已提交
100
      .def("_alloc_float",
D
dzhwinter 已提交
101
           [](Tensor &self, paddle::platform::CUDAPlace &place) {
Q
qijun 已提交
102
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
103
           })
Y
yuyang18 已提交
104
      .def("_alloc_float",
Y
Yu Yang 已提交
105
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
106
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
107
           })
Y
yuyang18 已提交
108
      .def("_alloc_int",
Y
Yu Yang 已提交
109
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
110
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
111
           })
Y
yuyang18 已提交
112
      .def("_alloc_int",
D
dzhwinter 已提交
113
           [](Tensor &self, paddle::platform::CUDAPlace &place) {
Q
qijun 已提交
114
             self.mutable_data<int>(place);
Q
qijun 已提交
115
           })
Y
yuyang18 已提交
116
      .def("_alloc_int",
C
chengduoZH 已提交
117 118 119
           [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) {
             self.mutable_data<int>(place);
           })
Y
yuyang18 已提交
120
      .def("_alloc_float",
C
chengduoZH 已提交
121 122 123
           [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) {
             self.mutable_data<float>(place);
           })
Y
Yu Yang 已提交
124 125
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
126
      .def("set", PyCPUTensorSetFromArray<double>)
127
      .def("set", PyCPUTensorSetFromArray<int64_t>)
Y
Yu Yang 已提交
128
      .def("set", PyCPUTensorSetFromArray<bool>)
129
      .def("set", PyCPUTensorSetFromArray<uint16_t>)
F
fengjiayi 已提交
130
      .def("set", PyCPUTensorSetFromArray<uint8_t>)
131
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
132 133
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
134
      .def("set", PyCUDATensorSetFromArray<double>)
135
      .def("set", PyCUDATensorSetFromArray<int64_t>)
Y
Yu Yang 已提交
136
      .def("set", PyCUDATensorSetFromArray<bool>)
137
      .def("set", PyCUDATensorSetFromArray<uint16_t>)
F
fengjiayi 已提交
138
      .def("set", PyCUDATensorSetFromArray<uint8_t>)
C
chengduoZH 已提交
139 140 141 142 143 144
      .def("set", PyCUDAPinnedTensorSetFromArray<float>)
      .def("set", PyCUDAPinnedTensorSetFromArray<int>)
      .def("set", PyCUDAPinnedTensorSetFromArray<double>)
      .def("set", PyCUDAPinnedTensorSetFromArray<int64_t>)
      .def("set", PyCUDAPinnedTensorSetFromArray<bool>)
      .def("set", PyCUDAPinnedTensorSetFromArray<uint16_t>)
F
fengjiayi 已提交
145
      .def("set", PyCUDAPinnedTensorSetFromArray<uint8_t>)
Q
qijun 已提交
146
#endif
147
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
Y
yuyang18 已提交
148 149 150 151 152
      .def("_set_float_element", TensorSetElement<float>)
      .def("_get_float_element", TensorGetElement<float>)
      .def("_set_double_element", TensorSetElement<double>)
      .def("_get_double_element", TensorGetElement<double>)
      .def("_dtype", [](Tensor &self) { return ToDataType(self.type()); });
Y
Yu Yang 已提交
153

154
  py::class_<LoDTensor, Tensor>(m, "LoDTensor")
155 156
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
157 158 159 160 161 162 163 164 165 166 167 168 169 170
      .def("__init__",
           [](LoDTensor &instance, const std::vector<std::vector<size_t>>
                                       &recursive_sequence_lengths) {
             LoD new_lod;
             new_lod.reserve(recursive_sequence_lengths.size());
             std::copy(recursive_sequence_lengths.begin(),
                       recursive_sequence_lengths.end(),
                       std::back_inserter(new_lod));
             LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
             PADDLE_ENFORCE(
                 CheckLoD(new_offset_lod, -1),
                 "the provided recursive_sequence_lengths info is invalid");
             new (&instance) LoDTensor(new_offset_lod);
           })
Y
Yu Yang 已提交
171
      .def("__init__", [](LoDTensor &instance) { new (&instance) LoDTensor(); })
G
gongweibao 已提交
172 173 174 175 176
      // We implement offset based LOD in C++ while we use length based with
      // Python API. So we changed set_lod to set_recursive_sequence_lengths to
      // avoid misuse.
      // The discussion is here:
      // https://github.com/PaddlePaddle/Paddle/issues/10855
D
dangqingqing 已提交
177
      .def("set_lod",
178
           [](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) {
179
             // the input lod is offset-based level-of-detail info
Y
Yu Yang 已提交
180
             LoD new_lod;
181 182
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
183 184
             PADDLE_ENFORCE(CheckLoD(new_lod, vectorize(self.dims()).front()),
                            "the provided lod info is invalid");
185
             self.set_lod(new_lod);
D
dangqingqing 已提交
186
           })
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
      .def("set_recursive_sequence_lengths",
           [](LoDTensor &self, const std::vector<std::vector<size_t>>
                                   &recursive_sequence_lengths) {
             // the input recursive_sequence_lengths is length-based
             // level-of-detail info
             LoD new_lod;
             new_lod.reserve(recursive_sequence_lengths.size());
             std::copy(recursive_sequence_lengths.begin(),
                       recursive_sequence_lengths.end(),
                       std::back_inserter(new_lod));
             LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
             PADDLE_ENFORCE(
                 CheckLoD(new_offset_lod, vectorize(self.dims()).front()),
                 "the provided recursive_sequence_lengths info is invalid");
             self.set_lod(new_offset_lod);
           })
      .def("lod",
           [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
             // output the offset-based lod info
             LoD lod = self.lod();
             std::vector<std::vector<size_t>> new_lod;
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             return new_lod;
           })
G
gongweibao 已提交
212
      // Set above comments of set_lod.
213 214 215 216 217 218 219 220 221 222 223 224 225
      .def("recursive_sequence_lengths",
           [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
             // output the length-based lod info
             LoD lod = ConvertToLengthBasedLoD(self.lod());
             std::vector<std::vector<size_t>> new_lod;
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             return new_lod;
           })
      .def("has_valid_recursive_sequence_lengths", [](LoDTensor &self) -> bool {
        // Check that the lod info is valid and match the outermost
        // dimension of the LoDTensor data
        return CheckLoD(self.lod(), vectorize(self.dims()).front());
D
dangqingqing 已提交
226 227
      });

Q
qijun 已提交
228 229 230 231 232 233 234 235 236 237 238 239 240
  py::class_<SelectedRows>(m, "SelectedRows")
      .def("__init__",
           [](SelectedRows &instance) { new (&instance) SelectedRows(); })
      .def("__init__",
           [](SelectedRows &instance, const std::vector<int64_t> rows,
              const int64_t &height) {
             new (&instance) SelectedRows(rows, height);
           })
      .def("get_tensor",
           [](SelectedRows &self) { return self.mutable_value(); },
           py::return_value_policy::reference)
      .def("set_height", &SelectedRows::set_height)
      .def("height", &SelectedRows::height)
Q
qijun 已提交
241 242 243 244 245 246 247 248 249
      .def("set_rows",
           [](SelectedRows &self, std::vector<int64_t> rows) {
#ifndef PADDLE_WITH_CUDA
             self.set_rows(rows);
#else
        Vector<int64_t> new_rows(rows);
        self.set_rows(new_rows);
#endif
           })
250
      .def("rows", [](SelectedRows &self) {
251 252 253 254 255
        auto rows = self.rows();
        std::vector<int64_t> new_rows;
        new_rows.reserve(rows.size());
        std::copy(rows.begin(), rows.end(), std::back_inserter(new_rows));
        return new_rows;
256
      });
Q
qijun 已提交
257

258
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
259 260 261

All parameter, weight, gradient are variables in Paddle.
)DOC")
262
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
263
      .def("set_int",
264 265
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
266 267 268 269 270 271 272
      .def("is_float", [](const Variable &var) { return var.IsType<float>(); })
      .def("set_float",
           [](Variable &var, float val) -> void {
             *var.GetMutable<float>() = val;
           })
      .def("get_float",
           [](const Variable &var) -> float { return var.Get<float>(); })
Y
Yu Yang 已提交
273
      .def("get_tensor",
274 275
           [](Variable &self) -> LoDTensor * {
             return self.GetMutable<LoDTensor>();
D
dangqingqing 已提交
276 277
           },
           py::return_value_policy::reference)
Y
Yu Yang 已提交
278 279 280
      .def("get_lod_rank_table",
           [](Variable &self) { return self.GetMutable<LoDRankTable>(); },
           py::return_value_policy::reference)
Q
qijun 已提交
281 282 283 284 285
      .def("get_selected_rows",
           [](Variable &self) -> SelectedRows * {
             return self.GetMutable<SelectedRows>();
           },
           py::return_value_policy::reference)
Y
Yu Yang 已提交
286 287 288
      .def("get_lod_tensor_array",
           [](Variable &self) { return self.GetMutable<LoDTensorArray>(); },
           py::return_value_policy::reference)
D
Dong Zhihong 已提交
289 290 291 292 293 294 295
#ifdef PADDLE_WITH_CUDA
      .def("get_communicator",
           [](Variable &self) -> platform::Communicator * {
             return self.GetMutable<platform::Communicator>();
           },
           py::return_value_policy::reference)
#endif
Y
Refine  
Yu Yang 已提交
296 297 298 299 300
      .def("get_reader",
           [](Variable &self) -> framework::ReaderHolder * {
             PADDLE_ENFORCE(self.IsType<framework::ReaderHolder>());
             return self.GetMutable<framework::ReaderHolder>();
           },
Y
Yu Yang 已提交
301
           py::return_value_policy::reference);
302

Y
Refine  
Yu Yang 已提交
303
  py::class_<framework::ReaderHolder>(m, "Reader", "")
304
      .def("reset", &framework::ReaderHolder::ResetAll);
Y
Refine  
Yu Yang 已提交
305

S
sneaxiy 已提交
306 307 308 309
  using LoDTensorBlockingQueue =
      ::paddle::operators::reader::LoDTensorBlockingQueue;
  using LoDTensorBlockingQueueHolder =
      ::paddle::operators::reader::LoDTensorBlockingQueueHolder;
S
sneaxiy 已提交
310 311
  py::class_<LoDTensorBlockingQueue, std::shared_ptr<LoDTensorBlockingQueue>>(
      m, "LoDTensorBlockingQueue", "")
S
sneaxiy 已提交
312
      .def("push",
S
sneaxiy 已提交
313
           [](LoDTensorBlockingQueue &self,
S
sneaxiy 已提交
314
              const std::vector<framework::LoDTensor> &lod_tensor_vec) {
S
sneaxiy 已提交
315
             pybind11::gil_scoped_release release;
S
sneaxiy 已提交
316
             return self.Push(lod_tensor_vec);
S
sneaxiy 已提交
317
           })
S
sneaxiy 已提交
318 319 320 321
      .def("size", &LoDTensorBlockingQueue::Size)
      .def("capacity", &LoDTensorBlockingQueue::Cap)
      .def("close", &LoDTensorBlockingQueue::Close)
      .def("is_closed", &LoDTensorBlockingQueue::IsClosed);
S
sneaxiy 已提交
322

S
sneaxiy 已提交
323
  m.def("init_lod_tensor_blocking_queue",
S
sneaxiy 已提交
324
        [](Variable &var, size_t capacity,
S
sneaxiy 已提交
325
           const std::vector<std::vector<int64_t>> &shapes)
S
sneaxiy 已提交
326
            -> std::shared_ptr<LoDTensorBlockingQueue> {
S
sneaxiy 已提交
327 328 329 330 331 332 333
              std::vector<DDim> dims(shapes.size());
              std::transform(shapes.begin(), shapes.end(), dims.begin(),
                             [](const std::vector<int64_t> &shape) {
                               return make_ddim(shape);
                             });
              auto *holder = var.GetMutable<LoDTensorBlockingQueueHolder>();
              holder->InitOnce(capacity, dims);
S
sneaxiy 已提交
334
              return holder->GetQueue();
S
sneaxiy 已提交
335
            },
S
sneaxiy 已提交
336
        py::return_value_policy::copy);
S
sneaxiy 已提交
337

338
  py::class_<Scope>(m, "Scope", "")
D
dongzhihong 已提交
339
      .def("var",
340
           [](Scope &self, const std::string &name) -> Variable * {
D
dongzhihong 已提交
341
             return self.Var(name);
Y
Yu Yang 已提交
342
           },
343
           py::return_value_policy::reference)
344
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
345
      .def(py::init<>())
346
      .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
347
           py::return_value_policy::reference)
Y
Yu Yang 已提交
348
      .def("drop_kids", &Scope::DropKids);
349

Y
Yu Yang 已提交
350 351
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
352 353
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
    std::vector<py::bytes> ret_values;
354 355 356 357 358 359 360 361 362 363
    for (auto &iter : OpInfoMap::Instance().map()) {
      auto &info = iter.second;
      if (info.HasOpProtoAndChecker()) {
        std::string str;
        PADDLE_ENFORCE(
            info.Proto().SerializeToString(&str),
            "Serialize OpProto Error. This could be a bug of Paddle.");
        ret_values.emplace_back(str);
      }
    }
Y
Yu Yang 已提交
364 365
    return ret_values;
  });
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
  m.def(
      "get_grad_op_desc", [](const OpDesc &op_desc,
                             const std::unordered_set<std::string> &no_grad_set,
                             const std::vector<BlockDesc *> &grad_sub_block) {
        std::unordered_map<std::string, std::string> grad_to_var;
        std::vector<std::unique_ptr<OpDesc>> grad_op_descs =
            framework::OpInfoMap::Instance()
                .Get(op_desc.Type())
                .GradOpMaker()(op_desc, no_grad_set, &grad_to_var,
                               grad_sub_block);
        std::vector<OpDesc *> grad_op_desc_ptrs(grad_op_descs.size());
        std::transform(grad_op_descs.begin(), grad_op_descs.end(),
                       grad_op_desc_ptrs.begin(),
                       [](std::unique_ptr<OpDesc> &p) { return p.release(); });
        return std::make_pair(grad_op_desc_ptrs, grad_to_var);
      });
Y
Yu Yang 已提交
382
  m.def("prune", [](const ProgramDesc &origin,
383
                    const std::vector<std::array<size_t, 2>> &targets) {
Y
Yu Yang 已提交
384
    ProgramDesc prog_with_targets(origin);
385
    for (const auto &t : targets) {
386
      prog_with_targets.MutableBlock(t[0])->Op(t[1])->SetIsTarget(true);
387
    }
388
    proto::ProgramDesc pruned_desc;
389
    Prune(*prog_with_targets.Proto(), &pruned_desc);
Y
Yu Yang 已提交
390
    return new ProgramDesc(pruned_desc);
391
  });
Y
Yu Yang 已提交
392
  m.def("inference_optimize", [](ProgramDesc &origin) {
393
    proto::ProgramDesc pruned_desc;
394
    InferenceOptimize(*(origin.Proto()), &pruned_desc);
Y
Yu Yang 已提交
395
    return new ProgramDesc(pruned_desc);
396
  });
F
fengjiayi 已提交
397 398
  m.def("empty_var_name", []() { return framework::kEmptyVarName; });
  m.def("grad_var_suffix", []() { return framework::kGradVarSuffix; });
399 400 401
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yi Wang 已提交
402 403
      .def("empty", []() { return kEmptyVarName; })
      .def("temp", []() { return kTempVarName; });
Q
qijun 已提交
404
  // clang-format off
Y
Yu Yang 已提交
405
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
406 407
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
408
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
409 410 411
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
D
dzhwinter 已提交
412
                  [](paddle::platform::CUDAPlace& place)
Q
qijun 已提交
413
                      -> paddle::platform::DeviceContext* {
414
#ifndef PADDLE_WITH_CUDA
D
dzhwinter 已提交
415
                    PADDLE_THROW("CUDAPlace is not supported in CPU device.");
Q
qijun 已提交
416
#else
Q
qijun 已提交
417
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
418
#endif
C
chengduoZH 已提交
419 420 421 422 423 424 425 426 427 428 429
                  })
          .def_static("create",
                [](paddle::platform::CUDAPinnedPlace& place)
                        -> paddle::platform::DeviceContext* {
#ifndef PADDLE_WITH_CUDA
                  PADDLE_THROW(
                        "CUDAPinnedPlace is not supported in CPU device.");
#else
                  return new paddle::platform::CUDAPinnedDeviceContext(place);
#endif
                });;
D
Dong Zhihong 已提交
430 431 432 433
// clang-format on
#ifdef PADDLE_WITH_CUDA
  py::class_<platform::Communicator>(m, "Communicator").def(py::init<>());
#endif
D
dzhwinter 已提交
434
  py::class_<platform::CUDAPlace>(m, "CUDAPlace")
435
      .def(py::init<int>())
D
dzhwinter 已提交
436
      .def("__str__", string::to_string<const platform::CUDAPlace &>);
Q
qijun 已提交
437

438 439 440
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CPUPlace &>);
Y
Yu Yang 已提交
441

C
chengduoZH 已提交
442 443 444 445
  py::class_<paddle::platform::CUDAPinnedPlace>(m, "CUDAPinnedPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CUDAPinnedPlace &>);

Y
Yu Yang 已提交
446 447 448 449 450 451 452
  py::class_<platform::Place>(m, "Place")
      .def(py::init<>())
      .def("set_place",
           [](platform::Place &self, const platform::CPUPlace &cpu_place) {
             self = cpu_place;
           })
      .def("set_place",
D
dzhwinter 已提交
453
           [](platform::Place &self, const platform::CUDAPlace &gpu_place) {
Y
Yu Yang 已提交
454
             self = gpu_place;
C
chengduoZH 已提交
455 456
           })
      .def("set_place", [](platform::Place &self,
C
chengduoZH 已提交
457 458
                           const platform::CUDAPinnedPlace &cuda_pinned_place) {
        self = cuda_pinned_place;
C
chengduoZH 已提交
459
      });
Y
Yu Yang 已提交
460

Y
Yu Yang 已提交
461 462 463
  py::class_<OperatorBase>(m, "Operator")
      .def_static("create",
                  [](py::bytes protobin) {
464
                    proto::OpDesc desc;
Y
Yu Yang 已提交
465 466 467 468 469
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
470
                    return OpRegistry::CreateOp(desc);
Y
Yu Yang 已提交
471
                  })
472
      .def("run",
473
           [](OperatorBase &self, const Scope &scope,
D
dzhwinter 已提交
474 475 476
              const platform::CPUPlace &place) { self.Run(scope, place); })
      .def("run",
           [](OperatorBase &self, const Scope &scope,
D
dzhwinter 已提交
477
              const platform::CUDAPlace &place) { self.Run(scope, place); })
C
chengduoZH 已提交
478 479 480 481 482
      .def("run",
           [](OperatorBase &self, const Scope &scope,
              const platform::CUDAPinnedPlace &place) {
             self.Run(scope, place);
           })
Y
Yu Yang 已提交
483 484 485 486 487 488 489
      .def("type",
           [](const OperatorBase &op) -> std::string { return op.Type(); })
      .def("outputs",
           [](const OperatorBase &op)
               -> std::map<std::string, std::vector<std::string>> {
                 return op.Outputs();
               })
Q
qijun 已提交
490 491
      .def("output_vars",
           [](const OperatorBase &op) { return op.OutputVars(true); })
Y
Yu Yang 已提交
492
      .def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
Q
qijun 已提交
493
      .def("input_vars", [](const OperatorBase &op) { return op.InputVars(); })
Y
Yu Yang 已提交
494 495 496 497
      .def("__str__", &OperatorBase::DebugString)
      .def("no_intermediate_outputs",
           [](const OperatorBase &op) { return op.OutputVars(false); })
      .def("support_gpu", &OperatorBase::SupportGPU);
Y
Yu Yang 已提交
498

F
fengjiayi 已提交
499
  py::class_<framework::Executor>(m, "Executor")
D
dzhwinter 已提交
500
      .def(py::init<const platform::Place &>())
Y
Yancey1989 已提交
501
      .def("close", &Executor::Close)
S
sneaxiy 已提交
502 503 504 505 506
      .def("run", [](Executor &self, const ProgramDesc &prog, Scope *scope,
                     int block_id, bool create_local_scope, bool create_vars) {
        pybind11::gil_scoped_release release;
        self.Run(prog, scope, block_id, create_local_scope, create_vars);
      });
S
sneaxiy 已提交
507

D
dzhwinter 已提交
508
  m.def("init_gflags", framework::InitGflags);
Y
Yang Yu 已提交
509
  m.def("init_glog", framework::InitGLOG);
X
Xin Pan 已提交
510 511
  m.def("init_devices",
        [](bool init_p2p) { framework::InitDevices(init_p2p); });
512

513
  m.def("is_compiled_with_cuda", IsCompiledWithCUDA);
Y
update  
Yancey1989 已提交
514
  m.def("is_compiled_with_dist", IsCompiledWithDIST);
515 516 517 518 519 520
#ifdef PADDLE_WITH_CUDA
  m.def("is_float16_supported", [](const platform::CUDAPlace &place) -> bool {
    // Only GPUs with Compute Capability >= 53 support float16
    return platform::GetCUDAComputeCapability(place.device) >= 53;
  });
#endif
521

522
  m.def("set_feed_variable", framework::SetFeedVariable);
Q
qijun 已提交
523
  m.def("get_fetch_variable", framework::GetFetchVariable);
Q
qijun 已提交
524

525 526 527 528 529
  BindProgramDesc(&m);
  BindBlockDesc(&m);
  BindVarDsec(&m);
  BindOpDesc(&m);
  BindConstValue(&m);
Y
Yu Yang 已提交
530

Y
Yu Yang 已提交
531 532 533 534 535 536 537 538 539
  py::class_<framework::LoDRankTable>(m, "LodRankTable")
      .def("items", [](framework::LoDRankTable &table) {
        std::vector<std::pair<size_t, size_t>> res;
        for (auto &item : table.items()) {
          res.push_back({item.index, item.length});
        }
        return res;
      });

Y
Yu Yang 已提交
540
  py::class_<LoDTensorArray>(m, "LoDTensorArray")
S
sneaxiy 已提交
541 542
      .def("__init__",
           [](LoDTensorArray &instance) { new (&instance) LoDTensorArray(); })
Y
Yu Yang 已提交
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
      .def("__getitem__",
           [](LoDTensorArray &self, size_t i) { return &self.at(i); },
           py::return_value_policy::reference)
      .def("__len__", [](LoDTensorArray &self) { return self.size(); })
      .def("__setitem__",
           [](LoDTensorArray &self, size_t i, const LoDTensor &t) {
             PADDLE_ENFORCE_LT(i, self.size());
             self[i].ShareDataWith(t);
             self[i].set_lod(t.lod());
           })
      .def("append", [](LoDTensorArray &self, const LoDTensor &t) {
        self.emplace_back();
        self.back().ShareDataWith(t);
        self.back().set_lod(t.lod());
      });

D
dzhwinter 已提交
559 560 561
  m.def("IsInplace",
        [](std::string op) -> bool { return operators::IsInplace(op); });

Y
Yu Yang 已提交
562
  m.def("op_support_gpu", OpSupportGPU);
D
Dong Zhihong 已提交
563
#ifdef PADDLE_WITH_CUDA
D
Dong Zhihong 已提交
564
  m.def("get_cuda_device_count", platform::GetCUDADeviceCount);
D
dangqingqing 已提交
565 566 567 568

  m.def("nvprof_init", platform::CudaProfilerInit);
  m.def("nvprof_start", platform::CudaProfilerStart);
  m.def("nvprof_stop", platform::CudaProfilerStop);
D
Dong Zhihong 已提交
569
#endif
Y
Yu Yang 已提交
570

571 572 573 574
  py::enum_<platform::ProfilerState>(m, "ProfilerState", py::arithmetic())
      .value("kDisabled", platform::ProfilerState::kDisabled)
      .value("kCPU", platform::ProfilerState::kCPU)
      .value("kCUDA", platform::ProfilerState::kCUDA)
575
      .value("kAll", platform::ProfilerState::kAll)
576 577 578 579 580 581 582 583 584 585 586 587 588
      .export_values();

  py::enum_<platform::EventSortingKey>(m, "EventSortingKey", py::arithmetic())
      .value("kDefault", platform::EventSortingKey::kDefault)
      .value("kCalls", platform::EventSortingKey::kCalls)
      .value("kTotal", platform::EventSortingKey::kTotal)
      .value("kMin", platform::EventSortingKey::kMin)
      .value("kMax", platform::EventSortingKey::kMax)
      .value("kAve", platform::EventSortingKey::kAve)
      .export_values();

  m.def("enable_profiler", platform::EnableProfiler);
  m.def("disable_profiler", platform::DisableProfiler);
X
Xin Pan 已提交
589
  m.def("is_profiler_enabled", platform::IsProfileEnabled);
590
  m.def("reset_profiler", platform::ResetProfiler);
Y
Yu Yang 已提交
591

Y
yuyang18 已提交
592
  // -- python binds for parallel executor.
Y
yuyang18 已提交
593 594 595 596 597 598 599 600 601 602
  py::class_<ParallelExecutor> pe(m, "ParallelExecutor");
  py::class_<ExecutionStrategy>(pe, "ExecutionStrategy")
      .def(py::init())
      .def_property(
          "num_threads",
          [](const ExecutionStrategy &self) { return self.num_threads_; },
          [](ExecutionStrategy &self, size_t num_threads) {
            self.num_threads_ = num_threads;
          })
      .def_property(
603 604 605 606
          "use_cuda",
          [](const ExecutionStrategy &self) { return self.use_cuda_; },
          [](ExecutionStrategy &self, bool use_cuda) {
            self.use_cuda_ = use_cuda;
Y
yuyang18 已提交
607 608 609 610 611 612
          })
      .def_property(
          "allow_op_delay",
          [](const ExecutionStrategy &self) { return self.allow_op_delay_; },
          [](ExecutionStrategy &self, bool allow_op_delay) {
            self.allow_op_delay_ = allow_op_delay;
Y
yuyang18 已提交
613 614 615 616 617 618 619 620
          })
      .def_property(
          "num_iteration_per_drop_scope",
          [](const ExecutionStrategy &self) {
            return self.num_iteration_per_drop_scope_;
          },
          [](ExecutionStrategy &self, size_t num_iteration_per_drop_scope) {
            self.num_iteration_per_drop_scope_ = num_iteration_per_drop_scope;
Y
yuyang18 已提交
621
          });
Y
yuyang18 已提交
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
  py::class_<BuildStrategy> build_strategy(pe, "BuildStrategy");

  py::enum_<BuildStrategy::ReduceStrategy>(build_strategy, "ReduceStrategy")
      .value("Reduce", BuildStrategy::ReduceStrategy::kReduce)
      .value("AllReduce", BuildStrategy::ReduceStrategy::kAllReduce);
  py::enum_<BuildStrategy::GradientScaleStrategy>(build_strategy,
                                                  "GradientScaleStrategy")
      .value("CoeffNumDevice",
             BuildStrategy::GradientScaleStrategy::kCoeffNumDevice)
      .value("One", BuildStrategy::GradientScaleStrategy::kOne)
      .value("Customized", BuildStrategy::GradientScaleStrategy::kCustomized);

  build_strategy.def(py::init())
      .def_property(
          "reduce_strategy",
          [](const BuildStrategy &self) { return self.reduce_; },
          [](BuildStrategy &self, BuildStrategy::ReduceStrategy strategy) {
            self.reduce_ = strategy;
          })
      .def_property(
          "gradient_scale_strategy",
          [](const BuildStrategy &self) { return self.gradient_scale_; },
          [](BuildStrategy &self,
             BuildStrategy::GradientScaleStrategy strategy) {
            self.gradient_scale_ = strategy;
Y
yuyang18 已提交
647 648 649 650 651 652
          })
      .def_property(
          "debug_graphviz_path",
          [](const BuildStrategy &self) { return self.debug_graphviz_path_; },
          [](BuildStrategy &self, const std::string &path) {
            self.debug_graphviz_path_ = path;
F
fengjiayi 已提交
653 654 655 656 657
          })
      .def_property(
          "enable_data_balance",
          [](const BuildStrategy &self) { return self.enable_data_balance_; },
          [](BuildStrategy &self, bool b) { self.enable_data_balance_ = b; });
Y
yuyang18 已提交
658 659 660 661

  pe.def(py::init<const std::vector<platform::Place> &,
                  const std::unordered_set<std::string> &,
                  const std::unordered_set<std::string> &, const ProgramDesc &,
Y
yuyang18 已提交
662
                  const std::string &, Scope *, std::vector<Scope *> &,
663 664
                  const ExecutionStrategy &, const BuildStrategy &, size_t,
                  size_t>())
Y
Yancey1989 已提交
665
      .def("bcast_params", &ParallelExecutor::BCastParamsToDevices)
Y
Yu Yang 已提交
666 667 668 669
      // NOTE: even we return a vec<Scope*>* to Python use reference policy.
      // We still cannot get local_scope from this vector, since the element
      // of vec<Scope*> will be freed by Python GC. We can only return Scope*
      // one by one and mark them as reference.
670 671 672 673 674
      .def("local_scopes",
           [](ParallelExecutor &self) -> std::vector<Scope *> * {
             return &self.GetLocalScopes();
           },
           py::return_value_policy::reference)
Y
Yu Yang 已提交
675 676 677 678
      .def("feed_tensors_into_local_scopes",
           &ParallelExecutor::FeedTensorsIntoLocalScopes)
      .def("feed_and_split_tensor_into_local_scopes",
           &ParallelExecutor::FeedAndSplitTensorIntoLocalScopes)
S
sneaxiy 已提交
679 680 681 682 683 684
      .def("run", [](ParallelExecutor &self,
                     const std::vector<std::string> &fetch_tensors,
                     const std::string &fetched_var_name) {
        pybind11::gil_scoped_release release;
        self.Run(fetch_tensors, fetched_var_name);
      });
Y
Yu Yang 已提交
685

686
  BindRecordIOWriter(&m);
687
  return m.ptr();
L
Luo Tao 已提交
688
}
689
}  // namespace pybind
690
}  // namespace paddle