pybind.cc 12.0 KB
Newer Older
1 2 3 4 5 6
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/pybind/protobuf.h"
16

Q
Qiao Longfei 已提交
17
#include "paddle/framework/backward.h"
D
dangqingqing 已提交
18
#include "paddle/framework/lod_tensor.h"
Z
zchen0211 已提交
19
#include "paddle/operators/cond_op.h"
Y
Yan Chunwei 已提交
20
#include "paddle/operators/net_op.h"
Y
Yan Chunwei 已提交
21
#include "paddle/operators/recurrent_op.h"
Q
qijun 已提交
22
#include "paddle/platform/enforce.h"
Q
qijun 已提交
23
#include "paddle/platform/place.h"
L
Luo Tao 已提交
24
#include "paddle/pybind/pybind.h"
25
#include "paddle/pybind/tensor_py.h"
26
#include "paddle/string/to_string.h"
27

28
namespace paddle {
29
namespace pybind {
30 31 32 33 34
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
35 36 37 38 39 40 41 42
bool IsCompileGPU() {
#ifdef PADDLE_ONLY_CPU
  return false;
#else
  return true;
#endif
}

43
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
44
  py::module m("core", "C++ core of PaddlePaddle");
45

46 47 48 49
  // using framework in this function. Since it is inside a function, it will
  // not cause namespace pollution.
  using namespace paddle::framework;  // NOLINT

50 51 52
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
Yu Yang 已提交
53
      .def("get_dims",
54
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
55
      .def("set_dims",
Q
qijun 已提交
56
           [](Tensor &self, const std::vector<int64_t> &dim) {
57
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
58 59
           })
      .def("alloc_float",
Y
Yu Yang 已提交
60
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
61
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
62
           })
Q
qijun 已提交
63
      .def("alloc_float",
Y
Yu Yang 已提交
64
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
65
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
66 67
           })
      .def("alloc_int",
Y
Yu Yang 已提交
68
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
69
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
70
           })
Q
qijun 已提交
71
      .def("alloc_int",
Y
Yu Yang 已提交
72
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
73
             self.mutable_data<int>(place);
Q
qijun 已提交
74
           })
Y
Yu Yang 已提交
75 76
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
Q
qijun 已提交
77
#ifndef PADDLE_ONLY_CPU
Y
Yu Yang 已提交
78 79
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
Q
qijun 已提交
80
#endif
81
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
82
      .def("set_float_element",
83
           [](Tensor &self, size_t offset, float f) {
Y
Yu Yang 已提交
84
             // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
85 86
             self.data<float>()[offset] = f;
           })
87
      .def("get_float_element", [](Tensor &self, size_t offset) -> float {
Y
Yu Yang 已提交
88
        // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
89 90
        return self.data<float>()[offset];
      });
Y
Yu Yang 已提交
91

92
  py::class_<LoDTensor, Tensor>(m, "LoDTensor")
93 94
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
95 96 97
      .def(
          "__init__",
          [](LoDTensor &instance, const std::vector<std::vector<size_t>> &lod) {
98
#ifdef PADDLE_ONLY_CPU
99
            new (&instance) LoDTensor(lod);
100
#else
Y
Yu Yang 已提交
101
             LoD new_lod;
102 103
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
104
             new (&instance) LoDTensor(new_lod);
105
#endif
106
          })
D
dangqingqing 已提交
107
      .def("set_lod",
108
           [](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) {
109
#ifdef PADDLE_ONLY_CPU
D
dangqingqing 已提交
110
             self.set_lod(lod);
111
#else
Y
Yu Yang 已提交
112
             LoD new_lod;
113 114 115 116
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             self.set_lod(new_lod);
#endif
D
dangqingqing 已提交
117
           })
118 119
      .def("lod", [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
#ifdef PADDLE_ONLY_CPU
D
dangqingqing 已提交
120
        return self.lod();
121 122 123 124 125
#else
           auto lod = self.lod();
           std::vector<std::vector<size_t>> new_lod;
           new_lod.reserve(lod.size());
           std::transform(lod.begin(), lod.end(), std::back_inserter(new_lod),
Y
Yu Yang 已提交
126
               [](Vector<size_t> item) ->
127 128 129 130 131 132 133 134
                   std::vector<size_t> {
                 std::vector<size_t> v;
                 v.reserve(item.size());
                 std::copy(item.begin(), item.end(), std::back_inserter(v));
                 return v;
               });
           return new_lod;
#endif
D
dangqingqing 已提交
135 136
      });

137
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
138 139 140

All parameter, weight, gradient are variables in Paddle.
)DOC")
141
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
142
      .def("set_int",
143 144
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
Y
Yu Yang 已提交
145
      .def("get_tensor",
146 147
           [](Variable &self) -> LoDTensor * {
             return self.GetMutable<LoDTensor>();
D
dangqingqing 已提交
148 149
           },
           py::return_value_policy::reference)
Y
Yan Chunwei 已提交
150
      .def("get_net",
D
dongzhihong 已提交
151 152
           [](Variable &self) -> operators::NetOp * {
             return self.GetMutable<operators::NetOp>();
Y
Yan Chunwei 已提交
153
           },
Y
Yu Yang 已提交
154
           py::return_value_policy::reference);
155

156
  py::class_<Scope>(m, "Scope", "")
Y
Yu Yang 已提交
157
      .def("new_var",
158
           [](Scope &self, const std::string &name) -> Variable * {
Y
Yu Yang 已提交
159 160
             return self.NewVar(name);
           },
161
           py::return_value_policy::reference)
162
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
163
      .def(py::init<>())
164 165
      .def("new_scope",
           [](Scope &self) -> Scope * { return &self.NewScope(); },
166
           py::return_value_policy::reference)
167
      .def("drop_kids", &Scope::DropKids);
168

Y
Yu Yang 已提交
169 170
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
171 172
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
173 174 175 176

    OpInfoMap::Instance().IterAllInfo([&ret_values](const std::string &type,
                                                    const OpInfo &info) {
      if (!info.HasOpProtoAndChecker()) return;
Y
Yu Yang 已提交
177
      std::string str;
Y
Yu Yang 已提交
178
      PADDLE_ENFORCE(info.Proto().SerializeToString(&str),
Y
Yu Yang 已提交
179
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
180 181
      ret_values.emplace_back(str);
    });
Y
Yu Yang 已提交
182 183
    return ret_values;
  });
184 185 186
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yi Wang 已提交
187 188
      .def("empty", []() { return kEmptyVarName; })
      .def("temp", []() { return kTempVarName; });
Q
qijun 已提交
189
  // clang-format off
Y
Yu Yang 已提交
190
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
191 192
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
193
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
194 195 196 197 198
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
199
#ifdef PADDLE_ONLY_CPU
Q
qijun 已提交
200
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
201
#else
Q
qijun 已提交
202
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
203
#endif
Q
qijun 已提交
204
                  });
Q
qijun 已提交
205
  // clang-format on
Q
qijun 已提交
206

207 208 209
  py::class_<platform::GPUPlace>(m, "GPUPlace")
      .def(py::init<int>())
      .def("__str__", string::to_string<const platform::GPUPlace &>);
Q
qijun 已提交
210

211 212 213
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CPUPlace &>);
Y
Yu Yang 已提交
214

Y
Yu Yang 已提交
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
  py::class_<OperatorBase>(m, "Operator")
      .def_static("create",
                  [](py::bytes protobin) {
                    OpDesc desc;
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
                    return OpRegistry::CreateOp(desc);
                  })
      .def("backward",
           [](const OperatorBase &forwardOp,
              const std::unordered_set<std::string> &no_grad_vars) {
             return Backward(forwardOp, no_grad_vars).release();
           })
      .def("infer_shape", &OperatorBase::InferShape)
      .def("run", &OperatorBase::Run)
      .def("type",
           [](const OperatorBase &op) -> std::string { return op.Type(); })
      .def("outputs",
           [](const OperatorBase &op)
               -> std::map<std::string, std::vector<std::string>> {
                 return op.Outputs();
               })
Q
qijun 已提交
240 241
      .def("output_vars",
           [](const OperatorBase &op) { return op.OutputVars(true); })
Y
Yu Yang 已提交
242
      .def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
Q
qijun 已提交
243
      .def("input_vars", [](const OperatorBase &op) { return op.InputVars(); })
Y
Yu Yang 已提交
244 245 246 247
      .def("__str__", &OperatorBase::DebugString)
      .def("no_intermediate_outputs",
           [](const OperatorBase &op) { return op.OutputVars(false); })
      .def("support_gpu", &OperatorBase::SupportGPU);
Y
Yu Yang 已提交
248

Y
Yu Yang 已提交
249 250 251 252 253 254 255
  py::class_<operators::NetOp, OperatorBase>(m, "Net")
      .def_static("create",
                  []() -> operators::NetOp * {
                    auto *retv = new operators::NetOp;
                    retv->SetType("plain_net");
                    return retv;
                  })
256 257 258 259
      .def("append_op",
           [](operators::NetOp &self, const OperatorBase &op) {
             self.AppendOp(op);
           })
D
dongzhihong 已提交
260 261 262 263
      .def("complete_add_op", &operators::NetOp::CompleteAddOp)
      .def("complete_add_op", [](std::shared_ptr<operators::NetOp> &self) {
        self->CompleteAddOp();
      });
Y
Yan Chunwei 已提交
264

Y
Yan Chunwei 已提交
265
  // recurrent_op
Y
Yu Yang 已提交
266 267 268 269 270 271 272 273 274 275 276 277 278
  py::class_<operators::RecurrentOp, OperatorBase>(m, "RecurrentOp")
      .def_static(
          "create",
          [](py::bytes protobin) -> operators::RecurrentOp * {
            OpDesc desc;
            PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                           "Cannot parse user input to OpDesc");
            PADDLE_ENFORCE(desc.IsInitialized(),
                           "User OpDesc is not initialized, reason %s",
                           desc.InitializationErrorString());
            auto rnn_op = OpRegistry::CreateOp(desc);
            return static_cast<operators::RecurrentOp *>(rnn_op.release());
          })
279 280 281
      .def("set_stepnet",
           [](operators::RecurrentOp &self, const operators::NetOp &net)
               -> void { self.set_stepnet(net.Clone()); });
Y
Yan Chunwei 已提交
282

Z
cond op  
zchen0211 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
  // cond_op
  py::class_<operators::CondOp, OperatorBase>(m, "CondOp")
      .def_static("create",
                  [](py::bytes protobin) -> operators::CondOp * {
                    OpDesc desc;
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
                    auto cond_op = OpRegistry::CreateOp(desc);
                    return static_cast<operators::CondOp *>(cond_op.release());
                  })
      .def("set_truenet",
           [](operators::CondOp &self, const operators::NetOp &net) -> void {
             self.set_truenet(net.Clone());
           })
      .def("set_falsenet",
           [](operators::CondOp &self, const operators::NetOp &net) -> void {
             self.set_falsenet(net.Clone());
           });

305 306
  m.def("unique_integer", UniqueIntegerGenerator);

Q
qijun 已提交
307 308
  m.def("is_compile_gpu", IsCompileGPU);

F
fengjiayi 已提交
309 310 311 312
  BindProgramDesc(m);
  BindBlockDesc(m);
  BindVarDsec(m);
  BindOpDesc(m);
Y
Yu Yang 已提交
313

314
  return m.ptr();
L
Luo Tao 已提交
315
}
316
}  // namespace pybind
317
}  // namespace paddle