pybind.cc 12.0 KB
Newer Older
1 2 3 4 5 6
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/pybind/protobuf.h"
16

Q
Qiao Longfei 已提交
17
#include "paddle/framework/backward.h"
D
dangqingqing 已提交
18
#include "paddle/framework/lod_tensor.h"
Z
zchen0211 已提交
19
#include "paddle/operators/cond_op.h"
Y
Yan Chunwei 已提交
20
#include "paddle/operators/net_op.h"
Y
Yan Chunwei 已提交
21
#include "paddle/operators/recurrent_op.h"
Q
qijun 已提交
22
#include "paddle/platform/enforce.h"
Q
qijun 已提交
23
#include "paddle/platform/place.h"
L
Luo Tao 已提交
24
#include "paddle/pybind/pybind.h"
25
#include "paddle/pybind/tensor_py.h"
26
#include "paddle/string/to_string.h"
27

28 29
namespace paddle {
namespace framework {
D
dongzhihong 已提交
30 31

using Tensor = framework::Tensor;
32 33
using LoDTensor = framework::LoDTensor;
using LoD = framework::LoD;
D
dongzhihong 已提交
34

35 36 37 38 39
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
40 41 42 43 44 45 46 47
bool IsCompileGPU() {
#ifdef PADDLE_ONLY_CPU
  return false;
#else
  return true;
#endif
}

48
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
49
  py::module m("core", "C++ core of PaddlePaddle");
50

51 52 53
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
Yu Yang 已提交
54
      .def("get_dims",
55
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
56
      .def("set_dims",
Q
qijun 已提交
57
           [](Tensor &self, const std::vector<int64_t> &dim) {
58
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
59 60
           })
      .def("alloc_float",
Y
Yu Yang 已提交
61
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
62
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
63
           })
Q
qijun 已提交
64
      .def("alloc_float",
Y
Yu Yang 已提交
65
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
66
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
67 68
           })
      .def("alloc_int",
Y
Yu Yang 已提交
69
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
70
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
71
           })
Q
qijun 已提交
72
      .def("alloc_int",
Y
Yu Yang 已提交
73
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
74
             self.mutable_data<int>(place);
Q
qijun 已提交
75
           })
Y
Yu Yang 已提交
76 77
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
Q
qijun 已提交
78
#ifndef PADDLE_ONLY_CPU
Y
Yu Yang 已提交
79 80
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
Q
qijun 已提交
81
#endif
82
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
83
      .def("set_float_element",
84
           [](Tensor &self, size_t offset, float f) {
Y
Yu Yang 已提交
85
             // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
86 87
             self.data<float>()[offset] = f;
           })
88
      .def("get_float_element", [](Tensor &self, size_t offset) -> float {
Y
Yu Yang 已提交
89
        // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
90 91
        return self.data<float>()[offset];
      });
Y
Yu Yang 已提交
92

93
  py::class_<LoDTensor, Tensor>(m, "LoDTensor")
94 95
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
96 97 98
      .def(
          "__init__",
          [](LoDTensor &instance, const std::vector<std::vector<size_t>> &lod) {
99
#ifdef PADDLE_ONLY_CPU
100
            new (&instance) LoDTensor(lod);
101 102 103 104
#else
             paddle::framework::LoD new_lod;
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
105
             new (&instance) LoDTensor(new_lod);
106
#endif
107
          })
D
dangqingqing 已提交
108
      .def("set_lod",
109
           [](LoDTensor &self, const std::vector<std::vector<size_t>> &lod) {
110
#ifdef PADDLE_ONLY_CPU
D
dangqingqing 已提交
111
             self.set_lod(lod);
112 113 114 115 116 117
#else
             paddle::framework::LoD new_lod;
             new_lod.reserve(lod.size());
             std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
             self.set_lod(new_lod);
#endif
D
dangqingqing 已提交
118
           })
119 120
      .def("lod", [](LoDTensor &self) -> std::vector<std::vector<size_t>> {
#ifdef PADDLE_ONLY_CPU
D
dangqingqing 已提交
121
        return self.lod();
122 123 124 125 126 127 128 129 130 131 132 133 134 135
#else
           auto lod = self.lod();
           std::vector<std::vector<size_t>> new_lod;
           new_lod.reserve(lod.size());
           std::transform(lod.begin(), lod.end(), std::back_inserter(new_lod),
               [](paddle::framework::Vector<size_t> item) ->
                   std::vector<size_t> {
                 std::vector<size_t> v;
                 v.reserve(item.size());
                 std::copy(item.begin(), item.end(), std::back_inserter(v));
                 return v;
               });
           return new_lod;
#endif
D
dangqingqing 已提交
136 137
      });

138
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
139 140 141

All parameter, weight, gradient are variables in Paddle.
)DOC")
142
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
143
      .def("set_int",
144 145
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
Y
Yu Yang 已提交
146
      .def("get_tensor",
147 148
           [](Variable &self) -> LoDTensor * {
             return self.GetMutable<LoDTensor>();
D
dangqingqing 已提交
149 150
           },
           py::return_value_policy::reference)
Y
Yan Chunwei 已提交
151
      .def("get_net",
D
dongzhihong 已提交
152 153
           [](Variable &self) -> operators::NetOp * {
             return self.GetMutable<operators::NetOp>();
Y
Yan Chunwei 已提交
154
           },
Y
Yu Yang 已提交
155
           py::return_value_policy::reference);
156

157
  py::class_<Scope>(m, "Scope", "")
Y
Yu Yang 已提交
158
      .def("new_var",
159
           [](Scope &self, const std::string &name) -> Variable * {
Y
Yu Yang 已提交
160 161
             return self.NewVar(name);
           },
162
           py::return_value_policy::reference)
163
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
164
      .def(py::init<>())
165 166
      .def("new_scope",
           [](Scope &self) -> Scope * { return &self.NewScope(); },
167
           py::return_value_policy::reference)
168
      .def("drop_kids", &Scope::DropKids);
169

Y
Yu Yang 已提交
170 171
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
172 173
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
174 175 176 177

    OpInfoMap::Instance().IterAllInfo([&ret_values](const std::string &type,
                                                    const OpInfo &info) {
      if (!info.HasOpProtoAndChecker()) return;
Y
Yu Yang 已提交
178
      std::string str;
Y
Yu Yang 已提交
179
      PADDLE_ENFORCE(info.Proto().SerializeToString(&str),
Y
Yu Yang 已提交
180
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
181 182
      ret_values.emplace_back(str);
    });
Y
Yu Yang 已提交
183 184
    return ret_values;
  });
185 186 187
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yi Wang 已提交
188 189
      .def("empty", []() { return kEmptyVarName; })
      .def("temp", []() { return kTempVarName; });
Q
qijun 已提交
190
  // clang-format off
Y
Yu Yang 已提交
191
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
192 193
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
194
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
195 196 197 198 199
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
200
#ifdef PADDLE_ONLY_CPU
Q
qijun 已提交
201
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
202
#else
Q
qijun 已提交
203
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
204
#endif
Q
qijun 已提交
205
                  });
Q
qijun 已提交
206
  // clang-format on
Q
qijun 已提交
207

208 209 210
  py::class_<platform::GPUPlace>(m, "GPUPlace")
      .def(py::init<int>())
      .def("__str__", string::to_string<const platform::GPUPlace &>);
Q
qijun 已提交
211

212 213 214
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CPUPlace &>);
Y
Yu Yang 已提交
215

Y
Yu Yang 已提交
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
  py::class_<OperatorBase>(m, "Operator")
      .def_static("create",
                  [](py::bytes protobin) {
                    OpDesc desc;
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
                    return OpRegistry::CreateOp(desc);
                  })
      .def("backward",
           [](const OperatorBase &forwardOp,
              const std::unordered_set<std::string> &no_grad_vars) {
             return Backward(forwardOp, no_grad_vars).release();
           })
      .def("infer_shape", &OperatorBase::InferShape)
      .def("run", &OperatorBase::Run)
      .def("type",
           [](const OperatorBase &op) -> std::string { return op.Type(); })
      .def("outputs",
           [](const OperatorBase &op)
               -> std::map<std::string, std::vector<std::string>> {
                 return op.Outputs();
               })
Q
qijun 已提交
241 242
      .def("output_vars",
           [](const OperatorBase &op) { return op.OutputVars(true); })
Y
Yu Yang 已提交
243
      .def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
Q
qijun 已提交
244
      .def("input_vars", [](const OperatorBase &op) { return op.InputVars(); })
Y
Yu Yang 已提交
245 246 247 248
      .def("__str__", &OperatorBase::DebugString)
      .def("no_intermediate_outputs",
           [](const OperatorBase &op) { return op.OutputVars(false); })
      .def("support_gpu", &OperatorBase::SupportGPU);
Y
Yu Yang 已提交
249

Y
Yu Yang 已提交
250 251 252 253 254 255 256
  py::class_<operators::NetOp, OperatorBase>(m, "Net")
      .def_static("create",
                  []() -> operators::NetOp * {
                    auto *retv = new operators::NetOp;
                    retv->SetType("plain_net");
                    return retv;
                  })
257 258 259 260
      .def("append_op",
           [](operators::NetOp &self, const OperatorBase &op) {
             self.AppendOp(op);
           })
D
dongzhihong 已提交
261 262 263 264
      .def("complete_add_op", &operators::NetOp::CompleteAddOp)
      .def("complete_add_op", [](std::shared_ptr<operators::NetOp> &self) {
        self->CompleteAddOp();
      });
Y
Yan Chunwei 已提交
265

Y
Yan Chunwei 已提交
266
  // recurrent_op
Y
Yu Yang 已提交
267 268 269 270 271 272 273 274 275 276 277 278 279
  py::class_<operators::RecurrentOp, OperatorBase>(m, "RecurrentOp")
      .def_static(
          "create",
          [](py::bytes protobin) -> operators::RecurrentOp * {
            OpDesc desc;
            PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                           "Cannot parse user input to OpDesc");
            PADDLE_ENFORCE(desc.IsInitialized(),
                           "User OpDesc is not initialized, reason %s",
                           desc.InitializationErrorString());
            auto rnn_op = OpRegistry::CreateOp(desc);
            return static_cast<operators::RecurrentOp *>(rnn_op.release());
          })
280 281 282
      .def("set_stepnet",
           [](operators::RecurrentOp &self, const operators::NetOp &net)
               -> void { self.set_stepnet(net.Clone()); });
Y
Yan Chunwei 已提交
283

Z
cond op  
zchen0211 已提交
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
  // cond_op
  py::class_<operators::CondOp, OperatorBase>(m, "CondOp")
      .def_static("create",
                  [](py::bytes protobin) -> operators::CondOp * {
                    OpDesc desc;
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
                    auto cond_op = OpRegistry::CreateOp(desc);
                    return static_cast<operators::CondOp *>(cond_op.release());
                  })
      .def("set_truenet",
           [](operators::CondOp &self, const operators::NetOp &net) -> void {
             self.set_truenet(net.Clone());
           })
      .def("set_falsenet",
           [](operators::CondOp &self, const operators::NetOp &net) -> void {
             self.set_falsenet(net.Clone());
           });

306 307
  m.def("unique_integer", UniqueIntegerGenerator);

Q
qijun 已提交
308 309
  m.def("is_compile_gpu", IsCompileGPU);

310 311 312 313
  bind_program_desc(m);
  bind_block_desc(m);
  bind_var_dses(m);
  bind_op_desc(m);
Y
Yu Yang 已提交
314

315
  return m.ptr();
L
Luo Tao 已提交
316
}
317 318
}  // namespace framework
}  // namespace paddle