eager_functions.cc 39.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// disable numpy compile error
12 13 14 15 16 17

#if defined(_MSC_VER)
#include <BaseTsd.h>
typedef SSIZE_T ssize_t;
#endif

18 19 20 21 22 23 24 25 26
#include <Python.h>

#include <string>
#include <vector>

#include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/backward.h"
27
#include "paddle/fluid/eager/custom_operator/custom_operator_node.h"
28
#include "paddle/fluid/eager/utils.h"
29
#include "paddle/fluid/framework/convert_utils.h"
30 31
#include "paddle/fluid/framework/custom_operator.h"
#include "paddle/fluid/framework/op_meta_info_helper.h"
32
#include "paddle/fluid/framework/python_headers.h"
33 34
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memcpy.h"
W
wanghuancoder 已提交
35
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
36
#include "paddle/fluid/platform/dynload/dynamic_loader.h"
37
#include "paddle/fluid/platform/enforce.h"
W
wanghuancoder 已提交
38
#include "paddle/fluid/platform/stream/cuda_stream.h"
39 40 41
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h"
42
#include "paddle/fluid/pybind/tensor_py.h"
43
#include "paddle/phi/api/ext/op_meta_info.h"
44 45 46 47 48
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/api/lib/utils/tensor_utils.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
49 50
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
51 52
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
53

54 55 56 57 58
namespace paddle {
namespace pybind {

namespace py = ::pybind11;

59
extern PyTypeObject* p_tensor_type;
60 61
extern PyTypeObject* g_multidevicefeedreader_pytype;
extern PyTypeObject* g_orderedmultidevicefeedreader_pytype;
62 63 64 65 66 67 68 69 70 71 72

size_t PyArray_Size_(PyObject* numpy_data) {
  size_t res = 1;
  auto dims = pybind11::detail::array_proxy(numpy_data)->dimensions;
  auto nd = pybind11::detail::array_proxy(numpy_data)->nd;
  while (nd--) {
    res *= (*dims++);
  }
  return res;
}

73
class EagerNumpyAllocation : public phi::Allocation {
74
 public:
75
  explicit EagerNumpyAllocation(PyObject* numpy_data, phi::DataType dtype)
76 77
      : Allocation(
            static_cast<void*>(pybind11::detail::array_proxy(numpy_data)->data),
78
            framework::DataTypeSize(dtype) * PyArray_Size_(numpy_data),
79 80
            paddle::platform::CPUPlace()),
        arr_(numpy_data) {
81 82 83 84
    PADDLE_ENFORCE_NOT_NULL(
        arr_,
        platform::errors::InvalidArgument("The underlying PyObject pointer of "
                                          "numpy array cannot be nullptr"));
85
    PADDLE_ENFORCE_NE(
86 87
        arr_,
        Py_None,
88 89 90 91 92 93 94 95 96 97 98 99 100
        platform::errors::PreconditionNotMet(
            "The underlying PyObject pointer of numpy array cannot be None"));
    Py_INCREF(arr_);
  }
  ~EagerNumpyAllocation() override {
    py::gil_scoped_acquire gil;
    Py_DECREF(arr_);
  }

 private:
  PyObject* arr_;
};

101 102
static PyObject* eager_api_scale(PyObject* self,
                                 PyObject* args,
103 104 105
                                 PyObject* kwargs) {
  EAGER_TRY
  // TODO(jiabin): Sync Tensor and Variable here when we support
106 107 108 109 110 111
  paddle::experimental::Tensor ret = egr::scale(
      reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor,
      CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 1), 1),
      CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 2), 2),
      CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3),
      CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4));
112 113 114 115
  return ToPyObject(ret);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

116 117
static PyObject* eager_api_run_backward(PyObject* self,
                                        PyObject* args,
118 119
                                        PyObject* kwargs) {
  EAGER_TRY
120 121
  auto tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);
  auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
122 123
  egr::Backward(tensors,
                grad_tensors,
124
                CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2));
125
  RETURN_PY_NONE
126 127 128
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

129 130
static PyObject* eager_api_run_partial_grad(PyObject* self,
                                            PyObject* args,
131 132 133 134 135 136 137 138 139 140 141
                                            PyObject* kwargs) {
  EAGER_TRY
  auto tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);
  auto inputs = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
  auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 2), 2);
  auto retain_graph = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
  auto create_graph = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
  auto only_inputs = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 5), 5);
  auto allow_unused = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 6), 6);
  auto no_grad_vars = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 7), 7);

142 143 144 145 146 147 148 149
  std::vector<paddle::experimental::Tensor> result = egr::Grad(tensors,
                                                               inputs,
                                                               grad_tensors,
                                                               retain_graph,
                                                               create_graph,
                                                               only_inputs,
                                                               allow_unused,
                                                               no_grad_vars);
150 151 152 153 154
  VLOG(1) << " in eager_api_run_partial_grad, after runing egr::Grad";
  return ToPyObject(result, true /* return_py_none_if_not_initialize */);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

155 156
static PyObject* eager_api_tensor_copy(PyObject* self,
                                       PyObject* args,
157 158
                                       PyObject* kwargs) {
  EAGER_TRY
159 160 161 162
  paddle::experimental::Tensor& src =
      reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor;
  paddle::experimental::Tensor& dst =
      reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 1))->tensor;
163 164 165
  auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 2), 2);
  bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);

166
  dst = src.copy_to(place, blocking);
167 168 169 170
  egr::EagerUtils::autograd_meta(&dst)->SetStopGradient(
      egr::EagerUtils::autograd_meta(&(src))->StopGradient());
  egr::EagerUtils::autograd_meta(&dst)->SetPersistable(
      egr::EagerUtils::autograd_meta(&(src))->Persistable());
171
  RETURN_PY_NONE
172 173 174
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

175 176
static PyObject* eager_api_read_next_tensor_list(PyObject* self,
                                                 PyObject* args,
177
                                                 PyObject* kwargs) {
178
  EAGER_TRY
179 180 181 182 183 184
  auto tensor_base_list =
      CastPyArg2VectorOfTensorBase(PyTuple_GET_ITEM(args, 0), 0);
  std::vector<paddle::experimental::Tensor> tensor_list;
  tensor_list.reserve(tensor_base_list.size());
  auto func = [](framework::Tensor& tensor_base) {
    paddle::experimental::Tensor tensor(
185
        egr::Controller::Instance().GenerateUniqueName());
186
    auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
187 188
    autograd_meta->SetPersistable(false);
    autograd_meta->SetStopGradient(true);
189
    tensor.set_impl(std::make_shared<phi::DenseTensor>(tensor_base));
190
    return tensor;
191
  };
192 193
  for (auto& tensor_base : tensor_base_list) {
    tensor_list.emplace_back(func(tensor_base));
194
  }
195
  return ToPyObject(tensor_list);
196 197 198
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
static void ConstructFwdAndBwdMap(
    const std::vector<paddle::OpMetaInfo>& vec_map,
    const std::string& op_type) {
  auto& in_out_map = egr::Controller::Instance().GetCustomEdgesSlotMap();
  if (in_out_map.find(op_type) != in_out_map.end()) {
    VLOG(7) << "Find Exist CustomEdgesSlotMap Skip >>>> ";
    return;
  } else {
    VLOG(7) << "Construct CustomEdgesSlotMap ";
    auto inputs_names =
        paddle::framework::OpMetaInfoHelper::GetInputs(vec_map[0]);
    auto outputs_names =
        paddle::framework::OpMetaInfoHelper::GetOutputs(vec_map[0]);
    auto attrs_names =
        paddle::framework::OpMetaInfoHelper::GetAttrs(vec_map[0]);
    auto grad_outputs_names =
        paddle::framework::OpMetaInfoHelper::GetOutputs(vec_map[1]);
    auto grad_inputs_names =
        paddle::framework::OpMetaInfoHelper::GetInputs(vec_map[1]);
    auto grad_attrs_names =
        paddle::framework::OpMetaInfoHelper::GetAttrs(vec_map[1]);
    std::vector<std::unordered_map<int, int>> res(5);
221 222

    in_out_map.insert({op_type, {res}});
223 224 225
    // Prepare pos map for grad_outputs
    VLOG(7) << "Prepare pos map for grad_outputs";
    PADDLE_ENFORCE_LE(
226 227
        grad_outputs_names.size(),
        inputs_names.size(),
228 229 230 231 232
        paddle::platform::errors::InvalidArgument(
            "Grad outputs num should be less equal than forward inputs num."));
    for (size_t i = 0; i < grad_outputs_names.size(); i++) {
      size_t end = grad_outputs_names[i].find("@GRAD");
      PADDLE_ENFORCE_NE(
233 234
          end,
          std::string::npos,
235 236 237 238 239 240 241 242 243
          paddle::platform::errors::NotFound(
              "All Grad outputs should be grad and we got %s is not grad var, "
              "please check your op and change to fit the rule.",
              grad_outputs_names[i]));
      for (size_t j = 0; j < inputs_names.size(); j++) {
        if (grad_outputs_names[i].substr(0, end) == inputs_names[j]) {
          VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                  << " inputs: " << inputs_names[j] << " related to No." << i
                  << " grad_outputs: " << grad_outputs_names[i];
244
          in_out_map[op_type][0][0][j] = i;
245 246 247 248 249 250 251 252 253 254 255 256
        }
      }
    }
    // Prepare pos map for grad_inputs
    for (size_t i = 0; i < grad_inputs_names.size(); i++) {
      size_t end = grad_inputs_names[i].find("@GRAD");
      if (end != std::string::npos) {
        for (size_t j = 0; j < outputs_names.size(); j++) {
          if (grad_inputs_names[i].substr(0, end) == outputs_names[j]) {
            VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                    << " outputs: " << outputs_names[j] << " related to No."
                    << i << " grad_inputs's grad: " << grad_inputs_names[i];
257
            in_out_map[op_type][0][1][j] = i;
258 259 260
          }
        }
      } else {
261 262
        if (std::find(outputs_names.begin(),
                      outputs_names.end(),
263 264 265 266 267 268 269
                      grad_inputs_names[i]) != outputs_names.end()) {
          for (size_t j = 0; j < outputs_names.size(); j++) {
            if (grad_inputs_names[i] == outputs_names[j]) {
              VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                      << " outputs: " << outputs_names[j] << " related to No."
                      << i
                      << " grad_inputs fwd outputs: " << grad_inputs_names[i];
270
              in_out_map[op_type][0][2][j] = i;
271 272 273 274 275 276 277 278 279
            }
          }
        } else {
          for (size_t j = 0; j < inputs_names.size(); j++) {
            if (grad_inputs_names[i] == inputs_names[j]) {
              VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                      << " inputs: " << inputs_names[j] << " related to No."
                      << i
                      << " grad_inputs fwd inputs: " << grad_inputs_names[i];
280
              in_out_map[op_type][0][3][j] = i;
281 282 283 284 285 286 287 288
            }
          }
        }
      }
    }

    // Prepare pos map for grad attrs_
    for (size_t i = 0; i < grad_attrs_names.size(); i++) {
289 290 291 292
      auto end = std::find(
          attrs_names.begin(), attrs_names.end(), grad_attrs_names[i]);
      PADDLE_ENFORCE_NE(end,
                        attrs_names.end(),
293 294 295 296 297 298 299 300 301 302
                        paddle::platform::errors::NotFound(
                            "All Grad attrs should be one of forward attrs and "
                            "we got %s is not one of them, please check your "
                            "op and change to fit the rule.",
                            grad_attrs_names[i]));
      for (size_t j = 0; j < attrs_names.size(); j++) {
        if (grad_attrs_names[i] == attrs_names[j]) {
          VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                  << " attrs: " << attrs_names[j] << " related to No." << i
                  << " grad_attrs: " << grad_attrs_names[i];
303
          in_out_map[op_type][0][4][j] = i;
304 305 306 307 308 309 310 311 312 313
        }
      }
    }
  }
}

static std::vector<paddle::any> CastAttrsToTragetType(
    const std::vector<paddle::any>& src,
    const std::vector<std::string>& attrs_names) {
  std::vector<paddle::any> res;
314 315
  PADDLE_ENFORCE_EQ(src.size(),
                    attrs_names.size(),
316 317 318 319
                    paddle::platform::errors::InvalidArgument(
                        "We Expected same size of attrs and attrs_name list, "
                        "if u got this error indicate your custom op setting "
                        "%s attrs, but you just give %s",
320 321
                        attrs_names.size(),
                        src.size()));
322 323 324 325 326 327 328 329 330 331 332 333 334
  for (size_t i = 0; i < src.size(); i++) {
    size_t end = attrs_names[i].find(": ");
    std::string type_name =
        attrs_names[i].substr(end + 2, attrs_names.size() - end - 2);
    if (type_name == "int") {
      if (src[i].type() == typeid(bool)) {
        res.emplace_back(static_cast<int>(paddle::any_cast<bool>(src[i])));
      } else if (src[i].type() == typeid(int)) {
        res.emplace_back(src[i]);
      } else {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "Your No. %s attrs should only can be bool or int32, other type is "
            "forbidden for now but we got %s. Check your code first please",
335 336
            i,
            src[i].type().name()));
337 338 339 340 341 342 343 344 345 346 347 348 349
      }
    } else if (type_name == "int64_t") {
      if (src[i].type() == typeid(bool)) {
        res.emplace_back(static_cast<int64_t>(paddle::any_cast<bool>(src[i])));
      } else if (src[i].type() == typeid(int)) {
        res.emplace_back(static_cast<int64_t>(paddle::any_cast<int>(src[i])));
      } else if (src[i].type() == typeid(int64_t)) {
        res.emplace_back(src[i]);
      } else {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "Your No. %s attrs should only can be bool or int32 or int64_t, "
            "other type is forbidden for now but we got %s. Check your code "
            "first please",
350 351
            i,
            src[i].type().name()));
352 353 354 355 356 357 358 359
      }
    } else {
      res.emplace_back(src[i]);
    }
  }
  return res;
}

360 361
static PyObject* eager_api_run_costum_op(PyObject* self,
                                         PyObject* args,
362 363 364 365 366 367 368 369 370
                                         PyObject* kwargs) {
  EAGER_TRY
  paddle::CustomOpKernelContext ctx =
      CastPyArg2CustomOpKernelContext(PyTuple_GET_ITEM(args, 0), 0);
  std::string op_type = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 1), 1);
  bool trace_backward = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
  VLOG(7) << "Get things for python for Custom Op: " << op_type
          << ", trace_backward is: " << trace_backward;
  auto meta_info_map = egr::Controller::Instance().GetOpMetaInfoMap();
371 372
  PADDLE_ENFORCE_NE(meta_info_map.find(op_type),
                    meta_info_map.end(),
373 374 375 376 377 378
                    paddle::platform::errors::NotFound(
                        "Can't find %s in Eager OpMetaInfoMap which should be "
                        "created by LoadOpMetaInfoAndRegisterOp, please make "
                        "sure you registered your op first and try again. ",
                        op_type));
  VLOG(7) << "Run Kernel of Custom Op: " << op_type;
379 380 381 382
  std::vector<paddle::any> res_attrs =
      CastAttrsToTragetType(ctx.Attrs(),
                            paddle::framework::OpMetaInfoHelper::GetAttrs(
                                meta_info_map.at(op_type)[0]));
383 384 385 386 387 388 389 390 391 392 393
  ctx.EmplaceBackAttrs(res_attrs);
  const auto& vec_map = meta_info_map.at(op_type);
  (*paddle::framework::OpMetaInfoHelper::GetKernelFn(vec_map[0]))(&ctx);

  VLOG(7) << "Get AutogradMeta for inputs and outputs for Custom Op";
  std::vector<std::vector<egr::AutogradMeta*>> ins_auto_grad_metas;
  std::vector<std::vector<egr::AutogradMeta*>> outs_auto_grad_metas;
  VLOG(7) << "We got slot num of ins is: " << ctx.InputRange().size();
  ins_auto_grad_metas.resize(ctx.InputRange().size());
  VLOG(7) << "We got slot num of outs is: " << ctx.OutputRange().size();
  outs_auto_grad_metas.resize(ctx.OutputRange().size());
394

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
  for (size_t i = 0; i < ctx.InputRange().size(); i++) {
    ins_auto_grad_metas[i] =
        egr::EagerUtils::nullable_autograd_meta(ctx.InputsBetween(
            ctx.InputRangeAt(i).first, ctx.InputRangeAt(i).second));
  }
  for (size_t i = 0; i < ctx.OutputRange().size(); i++) {
    outs_auto_grad_metas[i] =
        egr::EagerUtils::unsafe_autograd_meta(ctx.OutputsBetweeen(
            ctx.OutputRangeAt(i).first, ctx.OutputRangeAt(i).second));
  }
  bool require_any_grad = false;
  for (size_t i = 0; i < ins_auto_grad_metas.size(); i++) {
    require_any_grad =
        require_any_grad || egr::EagerUtils::ComputeRequireGrad(
                                trace_backward, &(ins_auto_grad_metas[i]));
  }
411
  if (require_any_grad && (vec_map.size() > 1)) {
412 413 414 415 416 417 418 419 420 421 422 423
    VLOG(6) << " Construct Grad for Custom Op: " << op_type;
    ConstructFwdAndBwdMap(vec_map, op_type);
    for (size_t i = 0; i < outs_auto_grad_metas.size(); i++) {
      egr::EagerUtils::PassStopGradient(false, &(outs_auto_grad_metas[i]));
    }
    auto grad_node = std::make_shared<egr::RunCustomOpNode>(
        outs_auto_grad_metas.size(), ins_auto_grad_metas.size(), op_type);
    auto slot_map =
        egr::Controller::Instance().GetCustomEdgesSlotMap().at(op_type);
    // Prepare Grad outputs
    size_t no_grad_cnt = 0;
    for (size_t i = 0; i < ins_auto_grad_metas.size(); i++) {
424 425 426 427
      const std::vector<paddle::experimental::Tensor>& in_tensors =
          ctx.InputsBetween(ctx.InputRangeAt(i).first,
                            ctx.InputRangeAt(i).second);

428 429
      if (slot_map[0][0].find(i) != slot_map[0][0].end()) {
        grad_node->SetGradOutMeta(in_tensors, slot_map[0][0][i]);
430
      } else {
431
        grad_node->SetGradOutMeta(in_tensors,
432 433 434 435 436 437
                                  ins_auto_grad_metas.size() - 1 - no_grad_cnt);
        no_grad_cnt++;
      }
    }
    // Prepare Grad inputs with grad of fwd outputs
    for (size_t i = 0; i < outs_auto_grad_metas.size(); i++) {
438 439 440 441
      const std::vector<paddle::experimental::Tensor>& out_tensors =
          ctx.OutputsBetweeen(ctx.OutputRangeAt(i).first,
                              ctx.OutputRangeAt(i).second);

442 443
      egr::EagerUtils::SetOutRankWithSlot(&(outs_auto_grad_metas[i]), i);
      egr::EagerUtils::SetHistory(&(outs_auto_grad_metas[i]), grad_node);
444 445
      grad_node->SetGradInMeta(out_tensors, i);
      egr::EagerUtils::CheckAndRetainGrad(out_tensors);
446 447 448
    }

    // Prepare Grad inputs with fwd outputs
449
    for (auto it = slot_map[0][2].begin(); it != slot_map[0][2].end(); it++) {
450 451 452 453 454 455 456 457 458
      VLOG(7) << "Prepare fwd_outs: " << it->first
              << " to grad_inputs: " << it->second;
      grad_node->fwd_outs[it->second] =
          egr::RunCustomOpNode::ConstructTensorWrapper(
              ctx.OutputsBetweeen(ctx.OutputRangeAt(it->first).first,
                                  ctx.OutputRangeAt(it->first).second));
    }

    // Prepare Grad inputs with fwd inputs
459
    for (auto it = slot_map[0][3].begin(); it != slot_map[0][3].end(); it++) {
460 461 462 463 464 465 466 467 468 469 470 471
      VLOG(7) << "Prepare fwd_ins: " << it->first
              << " to grad_inputs: " << it->second;
      grad_node->fwd_ins[it->second] =
          egr::RunCustomOpNode::ConstructTensorWrapper(
              ctx.InputsBetween(ctx.InputRangeAt(it->first).first,
                                ctx.InputRangeAt(it->first).second));
    }

    auto attrs_names = paddle::framework::OpMetaInfoHelper::GetAttrs(
        meta_info_map.at(op_type)[1]);
    std::vector<paddle::any> attrs(attrs_names.size());
    // Prepare attrs for Grad node
472
    for (auto it = slot_map[0][4].begin(); it != slot_map[0][4].end(); it++) {
473 474 475 476 477 478
      VLOG(7) << "Prepare fwd attrs: " << it->first
              << " to grad_attrs: " << it->second;
      attrs[it->second] = res_attrs[it->first];
    }
    grad_node->SetAttrs(attrs);
  }
479
  RETURN_PY_NONE
480 481 482
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

483 484
static PyObject* eager_api_sparse_coo_tensor(PyObject* self,
                                             PyObject* args,
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
                                             PyObject* kwargs) {
  EAGER_TRY
  auto non_zero_indices = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
  auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1);
  auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 2), 2);
  auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
  PADDLE_ENFORCE(non_zero_indices.is_dense_tensor(),
                 paddle::platform::errors::Fatal(
                     "the non-zero indices must be a DenseTensor."));
  PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(),
                 paddle::platform::errors::Fatal(
                     "the non-zero elements must be a DenseTensor."));
  auto dense_indices =
      std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_indices.impl());
  auto dense_elements =
      std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
  // TODO(zhangkaihuo): After create SparseTensor, call coalesced() to sort and
  // merge duplicate indices
  std::shared_ptr<phi::SparseCooTensor> coo_tensor =
504 505
      std::make_shared<phi::SparseCooTensor>(
          *dense_indices, *dense_elements, phi::make_ddim(dense_shape));
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
  paddle::experimental::Tensor tensor;
  tensor.set_impl(coo_tensor);
  auto name =
      egr::Controller::Instance().GenerateUniqueName("generated_tensor");
  tensor.set_name(name);
  auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
  autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
  if (!autograd_meta->GetMutableGradNode()) {
    VLOG(3) << "Tensor(" << name
            << ") have not GradNode, add GradNodeAccumulation for it.";
    autograd_meta->SetGradNode(
        std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
  }
  return ToPyObject(tensor);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

523 524
static PyObject* eager_api_sparse_csr_tensor(PyObject* self,
                                             PyObject* args,
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
                                             PyObject* kwargs) {
  EAGER_TRY
  auto non_zero_crows = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
  auto non_zero_cols = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1);
  auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 2), 2);
  auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 3), 3);
  auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
  PADDLE_ENFORCE(non_zero_crows.is_dense_tensor(),
                 paddle::platform::errors::Fatal(
                     "the compressed non-zero rows must be a DenseTensor."));
  PADDLE_ENFORCE(non_zero_cols.is_dense_tensor(),
                 paddle::platform::errors::Fatal(
                     "the non-zero cols must be a DenseTensor."));
  PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(),
                 paddle::platform::errors::Fatal(
                     "the non-zero elements must be a DenseTensor."));

  auto dense_crows =
      std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_crows.impl());
  auto dense_cols =
      std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_cols.impl());
  auto dense_elements =
      std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
  std::shared_ptr<phi::SparseCsrTensor> csr_tensor =
549 550
      std::make_shared<phi::SparseCsrTensor>(*dense_crows,
                                             *dense_cols,
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
                                             *dense_elements,
                                             phi::make_ddim(dense_shape));
  paddle::experimental::Tensor tensor;
  tensor.set_impl(csr_tensor);
  auto name =
      egr::Controller::Instance().GenerateUniqueName("generated_tensor");
  tensor.set_name(name);
  auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
  autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
  if (!autograd_meta->GetMutableGradNode()) {
    VLOG(3) << "Tensor(" << name
            << ") have not GradNode, add GradNodeAccumulation for it.";
    autograd_meta->SetGradNode(
        std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
  }
  return ToPyObject(tensor);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
W
wanghuancoder 已提交
569
#if defined(PADDLE_WITH_CUDA)
570 571
static PyObject* eager_api_async_read(PyObject* self,
                                      PyObject* args,
W
wanghuancoder 已提交
572 573 574 575 576 577 578 579 580
                                      PyObject* kwargs) {
  EAGER_TRY
  auto& src = GetTensorFromArgs("async_read", "src", args, 0, false);
  auto& dst = GetTensorFromArgs("async_read", "dst", args, 1, false);
  auto& index = GetTensorFromArgs("async_read", "index", args, 2, false);
  auto& buffer = GetTensorFromArgs("async_read", "buffer", args, 3, false);
  auto& offset = GetTensorFromArgs("async_read", "offset", args, 4, false);
  auto& count = GetTensorFromArgs("async_read", "count", args, 5, false);
  PADDLE_ENFORCE_EQ(
581 582
      src.is_gpu_pinned(),
      true,
W
wanghuancoder 已提交
583 584
      platform::errors::InvalidArgument("Required `src` device should be "
                                        "CUDAPinnedPlace, but received %d.",
C
Chen Weihang 已提交
585
                                        src.place()));
W
wanghuancoder 已提交
586
  PADDLE_ENFORCE_EQ(
587 588
      dst.is_gpu(),
      true,
W
wanghuancoder 已提交
589 590
      platform::errors::InvalidArgument(
          "Required `dst` device should be CUDAPlace, but received %d.",
C
Chen Weihang 已提交
591
          dst.place()));
W
wanghuancoder 已提交
592
  PADDLE_ENFORCE_EQ(
593 594
      index.is_cpu(),
      true,
W
wanghuancoder 已提交
595 596
      platform::errors::InvalidArgument(
          "Required `index` device should be CPUPlace, but received %d.",
C
Chen Weihang 已提交
597
          index.place()));
598 599
  PADDLE_ENFORCE_EQ(buffer.is_gpu_pinned(),
                    true,
W
wanghuancoder 已提交
600 601 602
                    platform::errors::InvalidArgument(
                        "Required `buffer` device should be CUDAPinnedPlace, "
                        "but received %d.",
C
Chen Weihang 已提交
603
                        buffer.place()));
W
wanghuancoder 已提交
604
  PADDLE_ENFORCE_EQ(
605 606
      offset.is_cpu(),
      true,
W
wanghuancoder 已提交
607 608
      platform::errors::InvalidArgument(
          "Required `offset` device should be CPUPlace, but received %d.",
C
Chen Weihang 已提交
609
          offset.place()));
W
wanghuancoder 已提交
610
  PADDLE_ENFORCE_EQ(
611 612
      count.is_cpu(),
      true,
W
wanghuancoder 已提交
613 614
      platform::errors::InvalidArgument(
          "Required `count` device should be CPUPlace, but received %d.",
C
Chen Weihang 已提交
615
          count.place()));
W
wanghuancoder 已提交
616 617 618 619 620 621 622 623 624 625

  auto& src_tensor = src;
  auto* dst_tensor = &dst;
  auto& index_tensor = index;
  auto* buffer_tensor = &buffer;
  auto& offset_tensor = offset;
  auto& count_tensor = count;
  auto* dst_data = dst_tensor->mutable_data<float>(dst.place());
  const auto& deviceId = paddle::platform::GetCurrentDeviceId();

626 627
  PADDLE_ENFORCE_EQ(src_tensor.dims().size(),
                    dst_tensor->dims().size(),
W
wanghuancoder 已提交
628 629 630
                    platform::errors::InvalidArgument(
                        "`src` and `dst` should have same tensor shape, "
                        "except for the first dimension."));
631 632
  PADDLE_ENFORCE_EQ(src_tensor.dims().size(),
                    buffer_tensor->dims().size(),
W
wanghuancoder 已提交
633 634 635 636
                    platform::errors::InvalidArgument(
                        "`src` and `buffer` should have same tensor shape, "
                        "except for the first dimension."));
  for (int i = 1; i < src_tensor.dims().size(); i++) {
637 638
    PADDLE_ENFORCE_EQ(src_tensor.dims()[i],
                      dst_tensor->dims()[i],
W
wanghuancoder 已提交
639 640 641 642
                      platform::errors::InvalidArgument(
                          "`src` and `dst` should have the same tensor shape, "
                          "except for the first dimension."));
    PADDLE_ENFORCE_EQ(
643 644
        src_tensor.dims()[i],
        buffer_tensor->dims()[i],
W
wanghuancoder 已提交
645 646 647 648
        platform::errors::InvalidArgument(
            "`src` and `buffer` should have the same tensor shape, "
            "except for the first dimension."));
  }
649 650
  PADDLE_ENFORCE_EQ(index_tensor.dims().size(),
                    1,
W
wanghuancoder 已提交
651 652 653 654 655 656 657 658 659 660 661
                    platform::errors::InvalidArgument(
                        "`index` tensor should be one-dimensional."));

  auto stream =
      paddle::platform::stream::get_current_stream(deviceId)->raw_stream();

  int64_t numel = 0;  // total copy length
  int64_t copy_flag = offset_tensor.dims()[0];
  int64_t size = src_tensor.numel() / src_tensor.dims()[0];

  if (copy_flag != 0) {
662 663
    PADDLE_ENFORCE_EQ(offset_tensor.dims().size(),
                      1,
W
wanghuancoder 已提交
664 665
                      platform::errors::InvalidArgument(
                          "`offset` tensor should be one-dimensional."));
666 667
    PADDLE_ENFORCE_EQ(count_tensor.dims().size(),
                      1,
W
wanghuancoder 已提交
668 669
                      platform::errors::InvalidArgument(
                          "`count` tensor should be one-dimensional."));
670 671
    PADDLE_ENFORCE_EQ(offset_tensor.numel(),
                      count_tensor.numel(),
W
wanghuancoder 已提交
672 673 674 675 676 677 678 679
                      platform::errors::InvalidArgument(
                          "`offset` and `count` tensor size dismatch."));
    auto* offset_data = offset_tensor.data<int64_t>();
    auto* count_data = count_tensor.data<int64_t>();
    for (int64_t i = 0; i < count_tensor.numel(); i++) {
      numel += count_data[i];
    }
    PADDLE_ENFORCE_LE(
680 681
        numel + index_tensor.numel(),
        buffer_tensor->dims()[0],
W
wanghuancoder 已提交
682 683
        platform::errors::InvalidArgument("Buffer tensor size is too small."));
    PADDLE_ENFORCE_LE(
684 685
        numel + index_tensor.numel(),
        dst_tensor->dims()[0],
W
wanghuancoder 已提交
686 687 688 689 690 691 692
        platform::errors::InvalidArgument("Target tensor size is too small."));

    int64_t src_offset, dst_offset = 0, c;
    auto* src_data = src_tensor.data<float>();
    for (int64_t i = 0; i < offset_tensor.numel(); i++) {
      src_offset = offset_data[i], c = count_data[i];
      PADDLE_ENFORCE_LE(
693 694
          src_offset + c,
          src_tensor.dims()[0],
W
wanghuancoder 已提交
695 696
          platform::errors::InvalidArgument("Invalid offset or count index."));
      PADDLE_ENFORCE_LE(
697 698
          dst_offset + c,
          dst_tensor->dims()[0],
W
wanghuancoder 已提交
699 700
          platform::errors::InvalidArgument("Invalid offset or count index."));
      cudaMemcpyAsync(dst_data + (dst_offset * size),
701 702 703 704
                      src_data + (src_offset * size),
                      c * size * sizeof(float),
                      cudaMemcpyHostToDevice,
                      stream);
W
wanghuancoder 已提交
705 706 707 708
      dst_offset += c;
    }
  } else {
    PADDLE_ENFORCE_LE(
709 710
        index_tensor.numel(),
        buffer_tensor->dims()[0],
W
wanghuancoder 已提交
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
        platform::errors::InvalidArgument("Buffer tensor size is too small."));
  }

  // Select the index data to the buffer
  auto index_select = [](const paddle::experimental::Tensor& src_tensor,
                         const paddle::experimental::Tensor& index_tensor,
                         paddle::experimental::Tensor* buffer_tensor) {
    auto* src_data = src_tensor.data<float>();
    auto* index_data = index_tensor.data<int64_t>();
    auto* buffer_data = buffer_tensor->data<float>();
    const int& slice_size = src_tensor.numel() / src_tensor.dims()[0];
    const int& copy_bytes = slice_size * sizeof(float);
    int64_t c = 0;
    for (int64_t i = 0; i < index_tensor.numel(); i++) {
      std::memcpy(buffer_data + c * slice_size,
726 727
                  src_data + index_data[i] * slice_size,
                  copy_bytes);
W
wanghuancoder 已提交
728 729 730 731 732 733
      c += 1;
    }
  };
  index_select(src_tensor, index_tensor, buffer_tensor);

  // Copy the data to device memory
734 735
  cudaMemcpyAsync(dst_data + (numel * size),
                  buffer_tensor->data<float>(),
W
wanghuancoder 已提交
736
                  index_tensor.numel() * size * sizeof(float),
737 738
                  cudaMemcpyHostToDevice,
                  stream);
739
  RETURN_PY_NONE
W
wanghuancoder 已提交
740 741 742
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

743 744
static PyObject* eager_api_async_write(PyObject* self,
                                       PyObject* args,
W
wanghuancoder 已提交
745 746 747 748 749 750 751
                                       PyObject* kwargs) {
  EAGER_TRY
  auto& src = GetTensorFromArgs("async_write", "src", args, 0, false);
  auto& dst = GetTensorFromArgs("async_write", "dst", args, 1, false);
  auto& offset = GetTensorFromArgs("async_write", "offset", args, 2, false);
  auto& count = GetTensorFromArgs("async_write", "count", args, 3, false);
  PADDLE_ENFORCE_EQ(
752 753
      src.is_gpu(),
      true,
W
wanghuancoder 已提交
754 755
      platform::errors::InvalidArgument(
          "Required `src` device should be CUDAPlace, but received %d. ",
C
Chen Weihang 已提交
756
          src.place()));
757 758
  PADDLE_ENFORCE_EQ(dst.is_gpu_pinned(),
                    true,
W
wanghuancoder 已提交
759 760 761
                    platform::errors::InvalidArgument(
                        "Required `dst` device should be CUDAPinnedPlace, "
                        "but received %d. ",
C
Chen Weihang 已提交
762
                        dst.place()));
W
wanghuancoder 已提交
763
  PADDLE_ENFORCE_EQ(
764 765
      offset.is_cpu(),
      true,
W
wanghuancoder 已提交
766 767
      platform::errors::InvalidArgument("Required `offset` device should "
                                        "be CPUPlace, but received %d. ",
C
Chen Weihang 已提交
768
                                        offset.place()));
W
wanghuancoder 已提交
769
  PADDLE_ENFORCE_EQ(
770 771
      count.is_cpu(),
      true,
W
wanghuancoder 已提交
772 773
      platform::errors::InvalidArgument(
          "Required `count` device should be CPUPlace, but received %d. ",
C
Chen Weihang 已提交
774
          count.place()));
W
wanghuancoder 已提交
775 776 777 778 779 780 781 782 783

  // TODO(daisiming): In future, add index as arguments following
  // async_read.
  auto& src_tensor = src;
  auto* dst_tensor = &dst;
  auto& offset_tensor = offset;
  auto& count_tensor = count;
  const auto& deviceId = paddle::platform::GetCurrentDeviceId();

784 785
  PADDLE_ENFORCE_EQ(offset_tensor.dims().size(),
                    1,
W
wanghuancoder 已提交
786 787
                    platform::errors::InvalidArgument(
                        "`offset` tensor should be one-dimensional."));
788 789
  PADDLE_ENFORCE_EQ(count_tensor.dims().size(),
                    1,
W
wanghuancoder 已提交
790 791
                    platform::errors::InvalidArgument(
                        "`count` tensor should be one-dimensional."));
792 793
  PADDLE_ENFORCE_EQ(offset_tensor.numel(),
                    count_tensor.numel(),
W
wanghuancoder 已提交
794 795
                    platform::errors::InvalidArgument(
                        "`offset` and `count` tensor size dismatch."));
796 797
  PADDLE_ENFORCE_EQ(src_tensor.dims().size(),
                    dst_tensor->dims().size(),
W
wanghuancoder 已提交
798 799 800 801
                    platform::errors::InvalidArgument(
                        "`src` and `dst` should have the same tensor shape, "
                        "except for the first dimension."));
  for (int i = 1; i < src_tensor.dims().size(); i++) {
802 803
    PADDLE_ENFORCE_EQ(src_tensor.dims()[i],
                      dst_tensor->dims()[i],
W
wanghuancoder 已提交
804 805 806 807
                      platform::errors::InvalidArgument(
                          "`src` and `dst` should have the same tensor shape, "
                          "except for the first dimension."));
  }
808

W
wanghuancoder 已提交
809 810 811 812 813 814 815 816 817 818 819 820
  auto stream =
      paddle::platform::stream::get_current_stream(deviceId)->raw_stream();

  int64_t size = src_tensor.numel() / src_tensor.dims()[0];
  auto* src_data = src_tensor.data<float>();
  auto* dst_data = dst_tensor->data<float>();
  const int64_t* offset_data = offset_tensor.data<int64_t>();
  const int64_t* count_data = count_tensor.data<int64_t>();
  int64_t src_offset = 0, dst_offset, c;
  for (int64_t i = 0; i < offset_tensor.numel(); i++) {
    dst_offset = offset_data[i], c = count_data[i];
    PADDLE_ENFORCE_LE(
821 822
        src_offset + c,
        src_tensor.dims()[0],
W
wanghuancoder 已提交
823 824
        platform::errors::InvalidArgument("Invalid offset or count index"));
    PADDLE_ENFORCE_LE(
825 826
        dst_offset + c,
        dst_tensor->dims()[0],
W
wanghuancoder 已提交
827 828
        platform::errors::InvalidArgument("Invalid offset or count index"));
    cudaMemcpyAsync(dst_data + (dst_offset * size),
829 830 831 832
                    src_data + (src_offset * size),
                    c * size * sizeof(float),
                    cudaMemcpyDeviceToHost,
                    stream);
W
wanghuancoder 已提交
833 834
    src_offset += c;
  }
835
  RETURN_PY_NONE
W
wanghuancoder 已提交
836 837
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
838

839 840
static PyObject* eager_api_to_uva_tensor(PyObject* self,
                                         PyObject* args,
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
                                         PyObject* kwargs) {
  EAGER_TRY
  VLOG(4) << "Running in eager_api_to_uva_tensor.";
  auto new_tensor = std::shared_ptr<paddle::experimental::Tensor>(
      new paddle::experimental::Tensor(
          egr::Controller::Instance().GenerateUniqueName()));
  PyObject* obj = PyTuple_GET_ITEM(args, 0);
  auto array = py::cast<py::array>(py::handle(obj));

  int device_id = 0;
  PyObject* Py_device_id = PyTuple_GET_ITEM(args, 1);
  if (Py_device_id) {
    device_id = CastPyArg2AttrLong(Py_device_id, 1);
  }

  if (py::isinstance<py::array_t<int32_t>>(array)) {
    SetUVATensorFromPyArray<int32_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<int64_t>>(array)) {
    SetUVATensorFromPyArray<int64_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<float>>(array)) {
    SetUVATensorFromPyArray<float>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<double>>(array)) {
    SetUVATensorFromPyArray<double>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<int8_t>>(array)) {
    SetUVATensorFromPyArray<int8_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<int16_t>>(array)) {
    SetUVATensorFromPyArray<int16_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<paddle::platform::float16>>(array)) {
869 870
    SetUVATensorFromPyArray<paddle::platform::float16>(
        new_tensor, array, device_id);
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
  } else if (py::isinstance<py::array_t<bool>>(array)) {
    SetUVATensorFromPyArray<bool>(new_tensor, array, device_id);
  } else {
    // obj may be any type, obj.cast<py::array>() may be failed,
    // then the array.dtype will be string of unknown meaning.
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Input object type error or incompatible array data type. "
        "tensor.set() supports array with bool, float16, float32, "
        "float64, int8, int16, int32, int64,"
        "please check your input or input array data type."));
  }

  return ToPyObject(*(new_tensor.get()));
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
W
wanghuancoder 已提交
886
#endif
887

888
PyMethodDef variable_functions[] = {
889
    // TODO(jiabin): Remove scale when we have final state tests
890 891 892 893 894 895 896 897
    {"scale",
     (PyCFunction)(void (*)(void))eager_api_scale,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"run_backward",
     (PyCFunction)(void (*)(void))eager_api_run_backward,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
898 899
    {"run_partial_grad",
     (PyCFunction)(void (*)(void))eager_api_run_partial_grad,
900 901 902 903 904 905 906 907 908 909
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"_run_custom_op",
     (PyCFunction)(void (*)(void))eager_api_run_costum_op,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"tensor_copy",
     (PyCFunction)(void (*)(void))eager_api_tensor_copy,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
910 911
    {"read_next_tensor_list",
     (PyCFunction)(void (*)(void))eager_api_read_next_tensor_list,
912 913
     METH_VARARGS | METH_KEYWORDS,
     NULL},
914 915 916
    /**sparse functions**/
    {"sparse_coo_tensor",
     (PyCFunction)(void (*)(void))eager_api_sparse_coo_tensor,
917 918
     METH_VARARGS | METH_KEYWORDS,
     NULL},
919 920
    {"sparse_csr_tensor",
     (PyCFunction)(void (*)(void))eager_api_sparse_csr_tensor,
921 922
     METH_VARARGS | METH_KEYWORDS,
     NULL},
923
/**sparse functions**/
W
wanghuancoder 已提交
924
#if defined(PADDLE_WITH_CUDA)
925 926 927 928 929 930 931 932 933 934 935 936
    {"async_read",
     (PyCFunction)(void (*)(void))eager_api_async_read,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"async_write",
     (PyCFunction)(void (*)(void))eager_api_async_write,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"to_uva_tensor",
     (PyCFunction)(void (*)(void))eager_api_to_uva_tensor,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
W
wanghuancoder 已提交
937
#endif
938 939 940 941 942 943 944 945 946 947 948 949
    {NULL, NULL, 0, NULL}};

void BindFunctions(PyObject* module) {
  if (PyModule_AddFunctions(module, variable_functions) < 0) {
    PADDLE_THROW(platform::errors::Fatal(
        "Init Paddle erroe in BindFunctions(PyModule_AddFunctions)."));
    return;
  }
}

}  // namespace pybind
}  // namespace paddle