pybind.cc 12.1 KB
Newer Older
1 2 3 4 5 6
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/pybind/protobuf.h"
16

Q
Qiao Longfei 已提交
17
#include "paddle/framework/backward.h"
D
dangqingqing 已提交
18
#include "paddle/framework/lod_tensor.h"
Z
zchen0211 已提交
19
#include "paddle/operators/cond_op.h"
Y
Yan Chunwei 已提交
20
#include "paddle/operators/net_op.h"
Y
Yan Chunwei 已提交
21
#include "paddle/operators/recurrent_op.h"
Q
qijun 已提交
22
#include "paddle/platform/enforce.h"
Q
qijun 已提交
23
#include "paddle/platform/place.h"
L
Luo Tao 已提交
24
#include "paddle/pybind/pybind.h"
25
#include "paddle/pybind/tensor_py.h"
26
#include "paddle/string/to_string.h"
27

28
namespace paddle {
29
namespace pybind {
30 31 32 33 34
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
35 36 37 38 39 40 41 42
bool IsCompileGPU() {
#ifdef PADDLE_ONLY_CPU
  return false;
#else
  return true;
#endif
}

43
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
44
  py::module m("core", "C++ core of PaddlePaddle");
45

46 47 48 49
  // using framework in this function. Since it is inside a function, it will
  // not cause namespace pollution.
  using namespace paddle::framework;  // NOLINT

50 51 52
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
Yu Yang 已提交
53
      .def("get_dims",
54
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
55
      .def("set_dims",
Q
qijun 已提交
56
           [](Tensor &self, const std::vector<int64_t> &dim) {
Y
Yu Yang 已提交
57
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
58 59
           })
      .def("alloc_float",
Y
Yu Yang 已提交
60
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
61
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
62
           })
Q
qijun 已提交
63
      .def("alloc_float",
Y
Yu Yang 已提交
64
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
65
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
66 67
           })
      .def("alloc_int",
Y
Yu Yang 已提交
68
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
69
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
70
           })
Q
qijun 已提交
71
      .def("alloc_int",
Y
Yu Yang 已提交
72
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
73
             self.mutable_data<int>(place);
Q
qijun 已提交
74
           })
Y
Yu Yang 已提交
75 76
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
Q
qijun 已提交
77
#ifndef PADDLE_ONLY_CPU
Y
Yu Yang 已提交
78 79
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
Q
qijun 已提交
80
#endif
81
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
82
      .def("set_float_element",
83
           [](Tensor &self, size_t offset, float f) {
Y
Yu Yang 已提交
84
             // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
85 86
             self.data<float>()[offset] = f;
           })
87
      .def("get_float_element", [](Tensor &self, size_t offset) -> float {
Y
Yu Yang 已提交
88
        // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
89 90
        return self.data<float>()[offset];
      });
Y
Yu Yang 已提交
91

92
  py::class_<LoDTensor, Tensor>(m, "LoDTensor")
93 94
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
95 96 97
      .def(
          "__init__",
          [](LoDTensor &instance, const std::vector<std::vector<size_t>> &lod) {
98
#ifdef PADDLE_ONLY_CPU
99
            new (&instance) LoDTensor(lod);
100
#else
Y
Yu Yang 已提交
101
             LoD new_lod;
102 103
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
104
             new (&instance) LoDTensor(new_lod);
105
#endif
106
          })
D
dangqingqing 已提交
107
      .def("set_lod",
108
           [](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) {
109
#ifdef PADDLE_ONLY_CPU
D
dangqingqing 已提交
110
             self.set_lod(lod);
111
#else
Y
Yu Yang 已提交
112
             LoD new_lod;
113 114 115 116
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             self.set_lod(new_lod);
#endif
D
dangqingqing 已提交
117
           })
118 119
      .def("lod", [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
#ifdef PADDLE_ONLY_CPU
D
dangqingqing 已提交
120
        return self.lod();
121 122 123 124 125
#else
           auto lod = self.lod();
           std::vector<std::vector<size_t>> new_lod;
           new_lod.reserve(lod.size());
           std::transform(lod.begin(), lod.end(), std::back_inserter(new_lod),
Y
Yu Yang 已提交
126
               [](Vector<size_t> item) ->
127 128 129 130 131 132 133 134
                   std::vector<size_t> {
                 std::vector<size_t> v;
                 v.reserve(item.size());
                 std::copy(item.begin(), item.end(), std::back_inserter(v));
                 return v;
               });
           return new_lod;
#endif
D
dangqingqing 已提交
135 136
      });

137
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
138 139 140

All parameter, weight, gradient are variables in Paddle.
)DOC")
141
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
142
      .def("set_int",
143 144
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
Y
Yu Yang 已提交
145
      .def("get_tensor",
146 147
           [](Variable &self) -> LoDTensor * {
             return self.GetMutable<LoDTensor>();
D
dangqingqing 已提交
148 149
           },
           py::return_value_policy::reference)
Y
Yan Chunwei 已提交
150
      .def("get_net",
D
dongzhihong 已提交
151 152
           [](Variable &self) -> operators::NetOp * {
             return self.GetMutable<operators::NetOp>();
Y
Yan Chunwei 已提交
153
           },
Y
Yu Yang 已提交
154
           py::return_value_policy::reference);
155

156
  py::class_<Scope>(m, "Scope", "")
Y
Yu Yang 已提交
157
      .def("new_var",
158
           [](Scope &self, const std::string &name) -> Variable * {
Y
Yu Yang 已提交
159 160
             return self.NewVar(name);
           },
161
           py::return_value_policy::reference)
162
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
163
      .def(py::init<>())
164
      .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); },
165
           py::return_value_policy::reference)
166
      .def("drop_kids", &Scope::DropKids);
167

Y
Yu Yang 已提交
168 169
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
170 171
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
172 173 174 175

    OpInfoMap::Instance().IterAllInfo([&ret_values](const std::string &type,
                                                    const OpInfo &info) {
      if (!info.HasOpProtoAndChecker()) return;
Y
Yu Yang 已提交
176
      std::string str;
Y
Yu Yang 已提交
177
      PADDLE_ENFORCE(info.Proto().SerializeToString(&str),
Y
Yu Yang 已提交
178
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
179 180
      ret_values.emplace_back(str);
    });
Y
Yu Yang 已提交
181 182
    return ret_values;
  });
183 184 185
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yi Wang 已提交
186 187
      .def("empty", []() { return kEmptyVarName; })
      .def("temp", []() { return kTempVarName; });
Q
qijun 已提交
188
  // clang-format off
Y
Yu Yang 已提交
189
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
190 191
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
192
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
193 194 195 196 197
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
198
#ifdef PADDLE_ONLY_CPU
Q
qijun 已提交
199
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
200
#else
Q
qijun 已提交
201
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
202
#endif
Q
qijun 已提交
203
                  });
Q
qijun 已提交
204
  // clang-format on
Q
qijun 已提交
205

206 207 208
  py::class_<platform::GPUPlace>(m, "GPUPlace")
      .def(py::init<int>())
      .def("__str__", string::to_string<const platform::GPUPlace &>);
Q
qijun 已提交
209

210 211 212
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CPUPlace &>);
Y
Yu Yang 已提交
213

Y
Yu Yang 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
  py::class_<OperatorBase>(m, "Operator")
      .def_static("create",
                  [](py::bytes protobin) {
                    OpDesc desc;
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
                    return OpRegistry::CreateOp(desc);
                  })
      .def("backward",
           [](const OperatorBase &forwardOp,
              const std::unordered_set<std::string> &no_grad_vars) {
             return Backward(forwardOp, no_grad_vars).release();
           })
      .def("infer_shape", &OperatorBase::InferShape)
231
      .def("run",
232
           [](OperatorBase &self, const Scope &scope,
233 234 235 236
              const platform::DeviceContext &dev_ctx) {
             self.Run(scope, dev_ctx);
             dev_ctx.Wait();
           })
Y
Yu Yang 已提交
237 238 239 240 241 242 243
      .def("type",
           [](const OperatorBase &op) -> std::string { return op.Type(); })
      .def("outputs",
           [](const OperatorBase &op)
               -> std::map<std::string, std::vector<std::string>> {
                 return op.Outputs();
               })
Q
qijun 已提交
244 245
      .def("output_vars",
           [](const OperatorBase &op) { return op.OutputVars(true); })
Y
Yu Yang 已提交
246
      .def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
Q
qijun 已提交
247
      .def("input_vars", [](const OperatorBase &op) { return op.InputVars(); })
Y
Yu Yang 已提交
248 249 250 251
      .def("__str__", &OperatorBase::DebugString)
      .def("no_intermediate_outputs",
           [](const OperatorBase &op) { return op.OutputVars(false); })
      .def("support_gpu", &OperatorBase::SupportGPU);
Y
Yu Yang 已提交
252

Y
Yu Yang 已提交
253 254 255 256 257 258 259
  py::class_<operators::NetOp, OperatorBase>(m, "Net")
      .def_static("create",
                  []() -> operators::NetOp * {
                    auto *retv = new operators::NetOp;
                    retv->SetType("plain_net");
                    return retv;
                  })
260 261
      .def("append_op", [](operators::NetOp &self,
                           const OperatorBase &op) { self.AppendOp(op); })
D
dongzhihong 已提交
262 263 264 265
      .def("complete_add_op", &operators::NetOp::CompleteAddOp)
      .def("complete_add_op", [](std::shared_ptr<operators::NetOp> &self) {
        self->CompleteAddOp();
      });
Y
Yan Chunwei 已提交
266

Y
Yan Chunwei 已提交
267
  // recurrent_op
Y
Yu Yang 已提交
268 269 270 271 272 273 274 275 276 277 278 279 280
  py::class_<operators::RecurrentOp, OperatorBase>(m, "RecurrentOp")
      .def_static(
          "create",
          [](py::bytes protobin) -> operators::RecurrentOp * {
            OpDesc desc;
            PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                           "Cannot parse user input to OpDesc");
            PADDLE_ENFORCE(desc.IsInitialized(),
                           "User OpDesc is not initialized, reason %s",
                           desc.InitializationErrorString());
            auto rnn_op = OpRegistry::CreateOp(desc);
            return static_cast<operators::RecurrentOp *>(rnn_op.release());
          })
281 282 283 284
      .def("set_stepnet", [](operators::RecurrentOp &self,
                             const operators::NetOp &net) -> void {
        self.set_stepnet(net.Clone());
      });
Y
Yan Chunwei 已提交
285

Z
cond op  
zchen0211 已提交
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
  // cond_op
  py::class_<operators::CondOp, OperatorBase>(m, "CondOp")
      .def_static("create",
                  [](py::bytes protobin) -> operators::CondOp * {
                    OpDesc desc;
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
                    auto cond_op = OpRegistry::CreateOp(desc);
                    return static_cast<operators::CondOp *>(cond_op.release());
                  })
      .def("set_truenet",
           [](operators::CondOp &self, const operators::NetOp &net) -> void {
             self.set_truenet(net.Clone());
           })
      .def("set_falsenet",
           [](operators::CondOp &self, const operators::NetOp &net) -> void {
             self.set_falsenet(net.Clone());
           });

308 309
  m.def("unique_integer", UniqueIntegerGenerator);

Q
qijun 已提交
310 311
  m.def("is_compile_gpu", IsCompileGPU);

F
fengjiayi 已提交
312 313 314 315
  BindProgramDesc(m);
  BindBlockDesc(m);
  BindVarDsec(m);
  BindOpDesc(m);
Y
Yu Yang 已提交
316

317
  return m.ptr();
L
Luo Tao 已提交
318
}
319
}  // namespace pybind
320
}  // namespace paddle