pybind.cc 9.7 KB
Newer Older
1 2 3 4 5 6
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

7
http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

L
Luo Tao 已提交
15
#include <Python.h>
Y
Yu Yang 已提交
16
#include <fstream>
Y
Yu Yang 已提交
17
#include <vector>
18

Q
Qiao Longfei 已提交
19
#include "paddle/framework/backward.h"
Y
Yu Yang 已提交
20
#include "paddle/framework/op_registry.h"
Y
Yan Chunwei 已提交
21
#include "paddle/operators/net_op.h"
Y
Yan Chunwei 已提交
22
#include "paddle/operators/recurrent_op.h"
Q
qijun 已提交
23
#include "paddle/platform/enforce.h"
Q
qijun 已提交
24
#include "paddle/platform/place.h"
25
#include "paddle/pybind/tensor_py.h"
26
#include "paddle/string/to_string.h"
Y
Yu Yang 已提交
27 28 29 30
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"

31 32
namespace py = pybind11;

33
USE_OP(add);
34
USE_OP(onehot_cross_entropy);
F
fengjiayi 已提交
35
USE_OP(sgd);
Q
qijun 已提交
36
USE_OP(mul);
L
liaogang 已提交
37
USE_OP(mean);
Q
qijun 已提交
38 39 40
USE_OP(sigmoid);
USE_OP(softmax);
USE_OP(rowwise_add);
F
fengjiayi 已提交
41
USE_OP(fill_zeros_like);
42
USE_NO_KERNEL_OP(recurrent);
43
USE_OP(gaussian_random);
Y
Yu Yang 已提交
44
USE_OP(uniform_random);
45
USE_OP(lookup_table);
Y
Yu Yang 已提交
46
USE_OP(scale);
47
USE_NO_KERNEL_OP(identity);
Y
Yu Yang 已提交
48
USE_OP(minus);
X
Xinghai Sun 已提交
49
USE_OP(cos_sim);
Z
zchen0211 已提交
50
USE_CPU_ONLY_OP(gather);
Z
zchen0211 已提交
51
USE_CPU_ONLY_OP(scatter);
武毅 已提交
52
USE_OP(top_k);
53
USE_OP(squared_l2_distance);
54
USE_OP(sum);
55

56 57
namespace paddle {
namespace framework {
D
dongzhihong 已提交
58 59 60

using Tensor = framework::Tensor;

61 62 63 64 65
static size_t UniqueIntegerGenerator() {
  static std::atomic<size_t> generator;
  return generator.fetch_add(1);
}

Q
qijun 已提交
66 67 68 69 70 71 72 73
bool IsCompileGPU() {
#ifdef PADDLE_ONLY_CPU
  return false;
#else
  return true;
#endif
}

74
PYBIND11_PLUGIN(core) {
Y
Yu Yang 已提交
75
  py::module m("core", "C++ core of PaddlePaddle");
76

77 78 79
  py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
      .def_buffer(
          [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
Y
Yu Yang 已提交
80
      .def("get_dims",
81
           [](const Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
82
      .def("set_dims",
Q
qijun 已提交
83
           [](Tensor &self, const std::vector<int64_t> &dim) {
84
             self.Resize(make_ddim(dim));
Y
Yu Yang 已提交
85 86
           })
      .def("alloc_float",
Y
Yu Yang 已提交
87
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
88
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
89
           })
Q
qijun 已提交
90
      .def("alloc_float",
Y
Yu Yang 已提交
91
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
92
             self.mutable_data<float>(place);
Y
Yu Yang 已提交
93 94
           })
      .def("alloc_int",
Y
Yu Yang 已提交
95
           [](Tensor &self, paddle::platform::CPUPlace &place) {
Q
qijun 已提交
96
             self.mutable_data<int>(place);
Y
Yu Yang 已提交
97
           })
Q
qijun 已提交
98
      .def("alloc_int",
Y
Yu Yang 已提交
99
           [](Tensor &self, paddle::platform::GPUPlace &place) {
Q
qijun 已提交
100
             self.mutable_data<int>(place);
Q
qijun 已提交
101
           })
Y
Yu Yang 已提交
102 103
      .def("set", PyCPUTensorSetFromArray<float>)
      .def("set", PyCPUTensorSetFromArray<int>)
Q
qijun 已提交
104
#ifndef PADDLE_ONLY_CPU
Y
Yu Yang 已提交
105 106
      .def("set", PyCUDATensorSetFromArray<float>)
      .def("set", PyCUDATensorSetFromArray<int>)
Q
qijun 已提交
107
#endif
108
      .def("shape", [](Tensor &self) { return vectorize(self.dims()); })
Y
Yu Yang 已提交
109
      .def("set_float_element",
110
           [](Tensor &self, size_t offset, float f) {
Y
Yu Yang 已提交
111
             // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
112 113
             self.data<float>()[offset] = f;
           })
114
      .def("get_float_element", [](Tensor &self, size_t offset) -> float {
Y
Yu Yang 已提交
115
        // TODO(yuyang18): Only support GPU now.
Y
Yu Yang 已提交
116 117
        return self.data<float>()[offset];
      });
Y
Yu Yang 已提交
118

119
  py::class_<Variable>(m, "Variable", R"DOC(Variable Class.
120 121 122

All parameter, weight, gradient are variables in Paddle.
)DOC")
123
      .def("is_int", [](const Variable &var) { return var.IsType<int>(); })
124
      .def("set_int",
125 126
           [](Variable &var, int val) -> void { *var.GetMutable<int>() = val; })
      .def("get_int", [](const Variable &var) -> int { return var.Get<int>(); })
Y
Yu Yang 已提交
127
      .def("get_tensor",
128
           [](Variable &self) -> Tensor * { return self.GetMutable<Tensor>(); },
Y
Yan Chunwei 已提交
129 130
           py::return_value_policy::reference)
      .def("get_net",
D
dongzhihong 已提交
131 132
           [](Variable &self) -> operators::NetOp * {
             return self.GetMutable<operators::NetOp>();
Y
Yan Chunwei 已提交
133
           },
Y
Yu Yang 已提交
134
           py::return_value_policy::reference);
135

136
  py::class_<Scope>(m, "Scope", "")
Y
Yu Yang 已提交
137
      .def("new_var",
138
           [](Scope &self, const std::string &name) -> Variable * {
Y
Yu Yang 已提交
139 140
             return self.NewVar(name);
           },
141
           py::return_value_policy::reference)
142
      .def("find_var", &Scope::FindVar, py::return_value_policy::reference)
Y
Yu Yang 已提交
143
      .def(py::init<>())
144 145
      .def("new_scope",
           [](Scope &self) -> Scope * { return &self.NewScope(); },
146
           py::return_value_policy::reference)
147
      .def("drop_kids", &Scope::DropKids);
148

Y
Yu Yang 已提交
149 150
  //! @note: Be careful! PyBind will return std::string as an unicode, not
  //! Python str. If you want a str object, you should cast them in Python.
Y
Yu Yang 已提交
151 152
  m.def("get_all_op_protos", []() -> std::vector<py::bytes> {
    std::vector<py::bytes> ret_values;
Y
Yu Yang 已提交
153 154 155 156

    OpInfoMap::Instance().IterAllInfo([&ret_values](const std::string &type,
                                                    const OpInfo &info) {
      if (!info.HasOpProtoAndChecker()) return;
Y
Yu Yang 已提交
157
      std::string str;
Y
Yu Yang 已提交
158
      PADDLE_ENFORCE(info.Proto().SerializeToString(&str),
Y
Yu Yang 已提交
159
                     "Serialize OpProto Error. This could be a bug of Paddle.");
Y
Yu Yang 已提交
160 161
      ret_values.emplace_back(str);
    });
Y
Yu Yang 已提交
162 163
    return ret_values;
  });
164 165 166
  m.def_submodule(
       "var_names",
       "The module will return special predefined variable name in Paddle")
Y
Yi Wang 已提交
167 168
      .def("empty", []() { return kEmptyVarName; })
      .def("temp", []() { return kTempVarName; });
Q
qijun 已提交
169
  // clang-format off
Y
Yu Yang 已提交
170
  py::class_<paddle::platform::DeviceContext>(m, "DeviceContext")
Q
qijun 已提交
171 172
      .def_static("create",
                  [](paddle::platform::CPUPlace& place)
Q
qijun 已提交
173
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
174 175 176 177 178
                    return new paddle::platform::CPUDeviceContext();
                  })
      .def_static("create",
                  [](paddle::platform::GPUPlace& place)
                      -> paddle::platform::DeviceContext* {
Q
qijun 已提交
179
#ifdef PADDLE_ONLY_CPU
Q
qijun 已提交
180
                    PADDLE_THROW("GPUPlace is not supported in CPU device.");
Q
qijun 已提交
181
#else
Q
qijun 已提交
182
                    return new paddle::platform::CUDADeviceContext(place);
Q
qijun 已提交
183
#endif
Q
qijun 已提交
184
                  });
Q
qijun 已提交
185
  // clang-format on
Q
qijun 已提交
186

187 188 189
  py::class_<platform::GPUPlace>(m, "GPUPlace")
      .def(py::init<int>())
      .def("__str__", string::to_string<const platform::GPUPlace &>);
Q
qijun 已提交
190

191 192 193
  py::class_<paddle::platform::CPUPlace>(m, "CPUPlace")
      .def(py::init<>())
      .def("__str__", string::to_string<const platform::CPUPlace &>);
Y
Yu Yang 已提交
194

Y
Yu Yang 已提交
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
  py::class_<OperatorBase>(m, "Operator")
      .def_static("create",
                  [](py::bytes protobin) {
                    OpDesc desc;
                    PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                                   "Cannot parse user input to OpDesc");
                    PADDLE_ENFORCE(desc.IsInitialized(),
                                   "User OpDesc is not initialized, reason %s",
                                   desc.InitializationErrorString());
                    return OpRegistry::CreateOp(desc);
                  })
      .def("backward",
           [](const OperatorBase &forwardOp,
              const std::unordered_set<std::string> &no_grad_vars) {
             return Backward(forwardOp, no_grad_vars).release();
           })
      .def("infer_shape", &OperatorBase::InferShape)
      .def("run", &OperatorBase::Run)
      .def("type",
           [](const OperatorBase &op) -> std::string { return op.Type(); })
      .def("outputs",
           [](const OperatorBase &op)
               -> std::map<std::string, std::vector<std::string>> {
                 return op.Outputs();
               })
Q
qijun 已提交
220 221
      .def("output_vars",
           [](const OperatorBase &op) { return op.OutputVars(true); })
Y
Yu Yang 已提交
222
      .def("inputs", [](const OperatorBase &op) { return op.Inputs(); })
Q
qijun 已提交
223
      .def("input_vars", [](const OperatorBase &op) { return op.InputVars(); })
Y
Yu Yang 已提交
224 225 226 227
      .def("__str__", &OperatorBase::DebugString)
      .def("no_intermediate_outputs",
           [](const OperatorBase &op) { return op.OutputVars(false); })
      .def("support_gpu", &OperatorBase::SupportGPU);
Y
Yu Yang 已提交
228

Y
Yu Yang 已提交
229 230 231 232 233 234 235
  py::class_<operators::NetOp, OperatorBase>(m, "Net")
      .def_static("create",
                  []() -> operators::NetOp * {
                    auto *retv = new operators::NetOp;
                    retv->SetType("plain_net");
                    return retv;
                  })
236 237 238 239
      .def("append_op",
           [](operators::NetOp &self, const OperatorBase &op) {
             self.AppendOp(op);
           })
D
dongzhihong 已提交
240 241 242 243
      .def("complete_add_op", &operators::NetOp::CompleteAddOp)
      .def("complete_add_op", [](std::shared_ptr<operators::NetOp> &self) {
        self->CompleteAddOp();
      });
Y
Yan Chunwei 已提交
244

Y
Yan Chunwei 已提交
245
  // recurrent_op
Y
Yu Yang 已提交
246 247 248 249 250 251 252 253 254 255 256 257 258
  py::class_<operators::RecurrentOp, OperatorBase>(m, "RecurrentOp")
      .def_static(
          "create",
          [](py::bytes protobin) -> operators::RecurrentOp * {
            OpDesc desc;
            PADDLE_ENFORCE(desc.ParsePartialFromString(protobin),
                           "Cannot parse user input to OpDesc");
            PADDLE_ENFORCE(desc.IsInitialized(),
                           "User OpDesc is not initialized, reason %s",
                           desc.InitializationErrorString());
            auto rnn_op = OpRegistry::CreateOp(desc);
            return static_cast<operators::RecurrentOp *>(rnn_op.release());
          })
259 260 261
      .def("set_stepnet",
           [](operators::RecurrentOp &self, const operators::NetOp &net)
               -> void { self.set_stepnet(net.Clone()); });
Y
Yan Chunwei 已提交
262

263 264
  m.def("unique_integer", UniqueIntegerGenerator);

Q
qijun 已提交
265 266
  m.def("is_compile_gpu", IsCompileGPU);

267
  return m.ptr();
L
Luo Tao 已提交
268
}
269 270
}  // namespace framework
}  // namespace paddle