pybind.cc 9.5 KB
Newer Older
1 2 3 4 5 6
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

L
Luo Tao 已提交
15
#include <Python.h>
Y
Yu Yang 已提交
16
#include <fstream>
Y
Yu Yang 已提交
17
#include <vector>
18

Q
Qiao Longfei 已提交
19
#include "paddle/framework/backward.h"
Y
Yu Yang 已提交
20
#include "paddle/framework/op_registry.h"
Y
Yan Chunwei 已提交
21
#include "paddle/operators/net_op.h"
Y
Yan Chunwei 已提交
22
#include "paddle/operators/recurrent_op.h"
Q
qijun 已提交
23
#include "paddle/platform/enforce.h"
Q
qijun 已提交
24
#include "paddle/platform/place.h"
25
#include "paddle/pybind/tensor_py.h"
26
#include "paddle/string/to_string.h"
Y
Yu Yang 已提交
27 28 29 30
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"

31 32
namespace py = pybind11;

Y
Yu Yang 已提交
33
USE_OP(add_two);
34
USE_OP(onehot_cross_entropy);
F
fengjiayi 已提交
35
USE_OP(sgd);
Q
qijun 已提交
36
USE_OP(mul);
L
liaogang 已提交
37
USE_OP(mean);
Q
qijun 已提交
38 39 40
USE_OP(sigmoid);
USE_OP(softmax);
USE_OP(rowwise_add);
F
fengjiayi 已提交
41
USE_OP(fill_zeros_like);
42
USE_NO_KERNEL_OP(recurrent);
43
USE_OP(gaussian_random);
Y
Yu Yang 已提交
44
USE_OP(uniform_random);
45
USE_OP(lookup_table);
Y
Yu Yang 已提交
46
USE_OP(scale);
47
USE_NO_KERNEL_OP(identity);
Y
Yu Yang 已提交
48
USE_OP(minus);
X
Xinghai Sun 已提交
49
USE_OP(cos_sim);
Z
zchen0211 已提交
50
USE_CPU_ONLY_OP(gather);
Z
zchen0211 已提交
51
USE_CPU_ONLY_OP(scatter);
X
xzl 已提交
52
USE_OP(transpose);
53

54 55
namespace paddle {
namespace framework {
D
dongzhihong 已提交
56 57 58

using Tensor = framework::Tensor;

59 60 61 62 63
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
64 65 66 67 68 69 70 71
bool IsCompileGPU() {
#ifdef PADDLE_ONLY_CPU
  return false;
#else
  return true;
#endif
}

72
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
73
  py::module m("core", "C++ core of PaddlePaddle");
74

75 76 77
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
Yu Yang 已提交
78
      .def("get_dims",
79
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
80
      .def("set_dims",
Q
qijun 已提交
81
           [](Tensor &self, const std::vector<int64_t> &dim) {
82
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
83 84
           })
      .def("alloc_float",
Y
Yu Yang 已提交
85
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
86
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
87
           })
Q
qijun 已提交
88
      .def("alloc_float",
Y
Yu Yang 已提交
89
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
90
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
91 92
           })
      .def("alloc_int",
Y
Yu Yang 已提交
93
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
94
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
95
           })
Q
qijun 已提交
96
      .def("alloc_int",
Y
Yu Yang 已提交
97
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
98
             self.mutable_data<int>(place);
Q
qijun 已提交
99
           })
Y
Yu Yang 已提交
100 101
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
Q
qijun 已提交
102
#ifndef PADDLE_ONLY_CPU
Y
Yu Yang 已提交
103 104
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
Q
qijun 已提交
105
#endif
106
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
107
      .def("set_float_element",
108
           [](Tensor &self, size_t offset, float f) {
Y
Yu Yang 已提交
109
             // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
110 111
             self.data<float>()[offset] = f;
           })
112
      .def("get_float_element", [](Tensor &self, size_t offset) -> float {
Y
Yu Yang 已提交
113
        // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
114 115
        return self.data<float>()[offset];
      });
Y
Yu Yang 已提交
116

117
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
118 119 120

All parameter, weight, gradient are variables in Paddle.
)DOC")
121
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
122
      .def("set_int",
123 124
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
Y
Yu Yang 已提交
125
      .def("get_tensor",
126
           [](Variable &self) -> Tensor * { return self.GetMutable<Tensor>(); },
Y
Yan Chunwei 已提交
127 128
           py::return_value_policy::reference)
      .def("get_net",
D
dongzhihong 已提交
129 130
           [](Variable &self) -> operators::NetOp * {
             return self.GetMutable<operators::NetOp>();
Y
Yan Chunwei 已提交
131
           },
Y
Yu Yang 已提交
132
           py::return_value_policy::reference);
133

134
  py::class_<Scope>(m, "Scope", "")
Y
Yu Yang 已提交
135
      .def("new_var",
136
           [](Scope &self, const std::string &name) -> Variable * {
Y
Yu Yang 已提交
137 138
             return self.NewVar(name);
           },
139
           py::return_value_policy::reference)
140
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
141
      .def(py::init<>())
142 143
      .def("new_scope",
           [](Scope &self) -> Scope * { return &self.NewScope(); },
144
           py::return_value_policy::reference)
145
      .def("drop_kids", &Scope::DropKids);
146

Y
Yu Yang 已提交
147 148
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
149 150
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
151 152 153 154

    OpInfoMap::Instance().IterAllInfo([&ret_values](const std::string &type,
                                                    const OpInfo &info) {
      if (!info.HasOpProtoAndChecker()) return;
Y
Yu Yang 已提交
155
      std::string str;
Y
Yu Yang 已提交
156
      PADDLE_ENFORCE(info.Proto().SerializeToString(&str),
Y
Yu Yang 已提交
157
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
158 159
      ret_values.emplace_back(str);
    });
Y
Yu Yang 已提交
160 161
    return ret_values;
  });
162 163 164
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yi Wang 已提交
165 166
      .def("empty", []() { return kEmptyVarName; })
      .def("temp", []() { return kTempVarName; });
Q
qijun 已提交
167
  // clang-format off
Y
Yu Yang 已提交
168
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
169 170
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
171
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
172 173 174 175 176
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
177
#ifdef PADDLE_ONLY_CPU
Q
qijun 已提交
178
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
179
#else
Q
qijun 已提交
180
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
181
#endif
Q
qijun 已提交
182
                  });
Q
qijun 已提交
183
  // clang-format on
Q
qijun 已提交
184

185 186 187
  py::class_<platform::GPUPlace>(m, "GPUPlace")
      .def(py::init<int>())
      .def("__str__", string::to_string<const platform::GPUPlace &>);
Q
qijun 已提交
188

189 190 191
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CPUPlace &>);
Y
Yu Yang 已提交
192

Y
Yu Yang 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
  py::class_<OperatorBase>(m, "Operator")
      .def_static("create",
                  [](py::bytes protobin) {
                    OpDesc desc;
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
                    return OpRegistry::CreateOp(desc);
                  })
      .def("backward",
           [](const OperatorBase &forwardOp,
              const std::unordered_set<std::string> &no_grad_vars) {
             return Backward(forwardOp, no_grad_vars).release();
           })
      .def("infer_shape", &OperatorBase::InferShape)
      .def("run", &OperatorBase::Run)
      .def("type",
           [](const OperatorBase &op) -> std::string { return op.Type(); })
      .def("outputs",
           [](const OperatorBase &op)
               -> std::map<std::string, std::vector<std::string>> {
                 return op.Outputs();
               })
      .def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
      .def("__str__", &OperatorBase::DebugString)
      .def("no_intermediate_outputs",
           [](const OperatorBase &op) { return op.OutputVars(false); })
      .def("support_gpu", &OperatorBase::SupportGPU);
Y
Yu Yang 已提交
223

Y
Yu Yang 已提交
224 225 226 227 228 229 230
  py::class_<operators::NetOp, OperatorBase>(m, "Net")
      .def_static("create",
                  []() -> operators::NetOp * {
                    auto *retv = new operators::NetOp;
                    retv->SetType("plain_net");
                    return retv;
                  })
231 232 233 234
      .def("append_op",
           [](operators::NetOp &self, const OperatorBase &op) {
             self.AppendOp(op);
           })
D
dongzhihong 已提交
235 236 237 238
      .def("complete_add_op", &operators::NetOp::CompleteAddOp)
      .def("complete_add_op", [](std::shared_ptr<operators::NetOp> &self) {
        self->CompleteAddOp();
      });
Y
Yan Chunwei 已提交
239

Y
Yan Chunwei 已提交
240
  // recurrent_op
Y
Yu Yang 已提交
241 242 243 244 245 246 247 248 249 250 251 252 253
  py::class_<operators::RecurrentOp, OperatorBase>(m, "RecurrentOp")
      .def_static(
          "create",
          [](py::bytes protobin) -> operators::RecurrentOp * {
            OpDesc desc;
            PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                           "Cannot parse user input to OpDesc");
            PADDLE_ENFORCE(desc.IsInitialized(),
                           "User OpDesc is not initialized, reason %s",
                           desc.InitializationErrorString());
            auto rnn_op = OpRegistry::CreateOp(desc);
            return static_cast<operators::RecurrentOp *>(rnn_op.release());
          })
254 255 256
      .def("set_stepnet",
           [](operators::RecurrentOp &self, const operators::NetOp &net)
               -> void { self.set_stepnet(net.Clone()); });
Y
Yan Chunwei 已提交
257

258 259
  m.def("unique_integer", UniqueIntegerGenerator);

Q
qijun 已提交
260 261
  m.def("is_compile_gpu", IsCompileGPU);

262
  return m.ptr();
L
Luo Tao 已提交
263
}
264 265
}  // namespace framework
}  // namespace paddle