pybind.cc 8.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

L
Luo Tao 已提交
15
#include <Python.h>
Y
Yu Yang 已提交
16
#include <fstream>
Y
Yu Yang 已提交
17
#include <vector>
18

Q
Qiao Longfei 已提交
19
#include "paddle/framework/backward.h"
Y
Yu Yang 已提交
20 21 22 23
#include "paddle/framework/net.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/scope.h"
Q
qijun 已提交
24
#include "paddle/platform/enforce.h"
Q
qijun 已提交
25
#include "paddle/platform/place.h"
Y
Yu Yang 已提交
26 27 28 29 30
#include "paddle/pybind/tensor_bind.h"
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"

31 32 33
namespace py = pybind11;
namespace pd = paddle::framework;

Y
Yu Yang 已提交
34
USE_OP(add_two);
Q
Qiao Longfei 已提交
35
USE_OP(onehot_cross_entropy);
Y
Yu Yang 已提交
36
USE_OP_WITHOUT_KERNEL(fc);
Q
Qiao Longfei 已提交
37
USE_OP(sgd);
Q
qijun 已提交
38
USE_OP(mul);
L
liaogang 已提交
39
USE_OP(mean);
Q
qijun 已提交
40 41 42
USE_OP(sigmoid);
USE_OP(softmax);
USE_OP(rowwise_add);
Y
Yan Chunwei 已提交
43
USE_OP_WITHOUT_KERNEL(recurrent_op);
Y
Yu Yang 已提交
44

Y
Yu Yang 已提交
45 46 47 48
template <typename ClassType>
void ExposeOperator(ClassType& m) {
  m.def("infer_shape", &ClassType::type::InferShape)
      .def("run", &ClassType::type::Run)
Q
Qiao Longfei 已提交
49 50 51 52
      .def("type",
           [](const typename ClassType::type& op) -> std::string {
             return op.type_;
           })
Y
Yu Yang 已提交
53 54 55 56 57 58
      .def("outputs",
           [](const typename ClassType::type& op) -> std::vector<std::string> {
             return op.outputs_;
           })
      .def("__str__", &ClassType::type::DebugString);
}
Y
Yu Yang 已提交
59

60 61 62 63 64
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
65 66 67 68 69 70 71 72
bool IsCompileGPU() {
#ifdef PADDLE_ONLY_CPU
  return false;
#else
  return true;
#endif
}

73
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
74
  py::module m("core", "C++ core of PaddlePaddle");
75

Y
Yu Yang 已提交
76 77
  py::class_<pd::Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer([](pd::Tensor& self) -> py::buffer_info {
78
        return paddle::pybind::CastToPyBuffer(self);
Y
Yu Yang 已提交
79 80 81 82 83
      })
      .def("get_dims",
           [](const pd::Tensor& self) { return pd::vectorize(self.dims()); })
      .def("set_dims",
           [](pd::Tensor& self, const std::vector<int>& dim) {
F
fengjiayi 已提交
84
             self.Resize(pd::make_ddim(dim));
Y
Yu Yang 已提交
85 86
           })
      .def("alloc_float",
Q
qijun 已提交
87
           [](pd::Tensor& self, paddle::platform::GPUPlace& place) {
Q
qijun 已提交
88
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
89
           })
Q
qijun 已提交
90
      .def("alloc_float",
Q
qijun 已提交
91 92
           [](pd::Tensor& self, paddle::platform::CPUPlace& place) {
             self.mutable_data<float>(place);
Q
qijun 已提交
93
           })
Y
Yu Yang 已提交
94
      .def("alloc_int",
Q
qijun 已提交
95
           [](pd::Tensor& self, paddle::platform::CPUPlace& place) {
Q
qijun 已提交
96
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
97 98
           })
      .def("alloc_int",
Q
qijun 已提交
99 100
           [](pd::Tensor& self, paddle::platform::GPUPlace& place) {
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
101
           })
Q
qijun 已提交
102 103
      .def("set", paddle::pybind::PyCPUTensorSetFromArray<float>)
      .def("set", paddle::pybind::PyCPUTensorSetFromArray<int>)
Q
qijun 已提交
104 105
#ifndef PADDLE_ONLY_CPU
      .def("set", paddle::pybind::PyCUDATensorSetFromArray<float>)
Q
qijun 已提交
106
      .def("set", paddle::pybind::PyCUDATensorSetFromArray<int>)
Q
qijun 已提交
107
#endif
Y
Yu Yang 已提交
108 109 110
      .def("shape", [](pd::Tensor& self) { return pd::vectorize(self.dims()); })
      .def("set_float_element",
           [](pd::Tensor& self, size_t offset, float f) {
Y
Yu Yang 已提交
111
             // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
112 113 114
             self.data<float>()[offset] = f;
           })
      .def("get_float_element", [](pd::Tensor& self, size_t offset) -> float {
Y
Yu Yang 已提交
115
        // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
116 117
        return self.data<float>()[offset];
      });
Y
Yu Yang 已提交
118

119 120 121 122 123 124 125 126 127 128
  py::class_<pd::Variable>(m, "Variable", R"DOC(Variable Class.

All parameter, weight, gradient are variables in Paddle.
)DOC")
      .def("is_int", [](const pd::Variable& var) { return var.IsType<int>(); })
      .def("set_int",
           [](pd::Variable& var, int val) -> void {
             *var.GetMutable<int>() = val;
           })
      .def("get_int",
Y
Yu Yang 已提交
129 130 131 132 133
           [](const pd::Variable& var) -> int { return var.Get<int>(); })
      .def("get_tensor",
           [](pd::Variable& self) -> pd::Tensor* {
             return self.GetMutable<pd::Tensor>();
           },
Y
Yan Chunwei 已提交
134 135 136 137 138
           py::return_value_policy::reference)
      .def("get_net",
           [](pd::Variable& self) -> pd::NetOp* {
             return self.GetMutable<pd::NetOp>();
           },
Y
Yu Yang 已提交
139
           py::return_value_policy::reference);
140

Y
Yu Yang 已提交
141 142 143 144 145 146 147 148 149 150 151 152
  py::class_<pd::Scope>(m, "Scope", "")
      .def("new_var",
           [](pd::Scope& self, const std::string& name) -> pd::Variable* {
             return self.NewVar(name);
           },
           py::return_value_policy::reference)
      .def("find_var", &pd::Scope::FindVar, py::return_value_policy::reference)
      .def(py::init<>())
      .def("new_scope",
           [](pd::Scope& self) -> pd::Scope* { return &self.NewScope(); },
           py::return_value_policy::reference)
      .def("drop_kids", &pd::Scope::DropKids);
153

Y
Yu Yang 已提交
154 155
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
156
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
Y
Yu Yang 已提交
157
    auto& protos = pd::OpRegistry::protos();
Y
Yu Yang 已提交
158
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
159
    for (auto it = protos.begin(); it != protos.end(); ++it) {
Y
Yu Yang 已提交
160 161
      PADDLE_ENFORCE(it->second.IsInitialized(),
                     "OpProto must all be initialized");
Y
Yu Yang 已提交
162 163
      std::string str;
      PADDLE_ENFORCE(it->second.SerializeToString(&str),
Y
Yu Yang 已提交
164
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
165
      ret_values.push_back(py::bytes(str));
Y
Yu Yang 已提交
166 167 168
    }
    return ret_values;
  });
169 170 171 172 173
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
      .def("empty", pd::OperatorBase::EMPTY_VAR_NAME)
      .def("temp", pd::OperatorBase::TMP_VAR_NAME);
Q
qijun 已提交
174
  // clang-format off
Y
Yu Yang 已提交
175
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
176 177
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
178
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
179 180 181 182 183
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
184
#ifdef PADDLE_ONLY_CPU
Q
qijun 已提交
185
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
186
#else
Q
qijun 已提交
187
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
188
#endif
Q
qijun 已提交
189
                  });
Q
qijun 已提交
190
  // clang-format on
Q
qijun 已提交
191

Q
qijun 已提交
192
  py::class_<paddle::platform::GPUPlace>(m, "GPUPlace").def(py::init<int>());
Q
qijun 已提交
193

Q
qijun 已提交
194
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace").def(py::init<>());
Y
Yu Yang 已提交
195

Y
Yu Yang 已提交
196 197
  py::class_<pd::OperatorBase, std::shared_ptr<pd::OperatorBase>> operator_base(
      m, "Operator");
Y
Yu Yang 已提交
198

Y
Yu Yang 已提交
199
  operator_base.def_static("create", [](py::bytes protobin) {
Y
Yu Yang 已提交
200 201 202 203 204 205 206 207
    pd::OpDesc desc;
    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                   "Cannot parse user input to OpDesc");
    PADDLE_ENFORCE(desc.IsInitialized(),
                   "User OpDesc is not initialized, reason %s",
                   desc.InitializationErrorString());
    return pd::OpRegistry::CreateOp(desc);
  });
Q
Qiao Longfei 已提交
208 209 210 211 212 213 214

  operator_base.def("backward",
                    [](const pd::OperatorBase& forwardOp,
                       const std::unordered_set<std::string>& no_grad_vars) {
                      return pd::Backward(forwardOp, no_grad_vars);
                    });

Y
Yu Yang 已提交
215 216
  ExposeOperator(operator_base);

Y
Yu Yang 已提交
217
  py::class_<pd::NetOp, std::shared_ptr<pd::NetOp>> net(m, "Net");
Y
Yu Yang 已提交
218 219

  net.def_static("create",
Y
Yu Yang 已提交
220 221
                 []() -> std::shared_ptr<pd::NetOp> {
                   auto retv = std::make_shared<pd::NetOp>();
Y
Yu Yang 已提交
222
                   retv->type_ = "plain_net";
Y
Yu Yang 已提交
223 224
                   return retv;
                 })
Y
Yu Yang 已提交
225
      .def("add_op", &pd::NetOp::AddOp)
Y
Yu Yang 已提交
226
      .def("add_op",
Y
Yu Yang 已提交
227 228
           [](pd::NetOp& self, const std::shared_ptr<pd::NetOp>& net) -> void {
             self.AddOp(std::static_pointer_cast<pd::OperatorBase>(net));
Y
Yu Yang 已提交
229
           })
Y
Yu Yang 已提交
230 231 232
      .def("complete_add_op", &pd::NetOp::CompleteAddOp)
      .def("complete_add_op",
           [](std::shared_ptr<pd::NetOp>& self) { self->CompleteAddOp(); });
Y
Yu Yang 已提交
233
  ExposeOperator(net);
Y
Yu Yang 已提交
234

235 236
  m.def("unique_integer", UniqueIntegerGenerator);

Q
qijun 已提交
237 238
  m.def("is_compile_gpu", IsCompileGPU);

239
  return m.ptr();
L
Luo Tao 已提交
240
}