eager_functions.cc 45.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// disable numpy compile error
12 13 14 15 16 17

#if defined(_MSC_VER)
#include <BaseTsd.h>
typedef SSIZE_T ssize_t;
#endif

18 19 20 21 22 23 24 25 26
#include <Python.h>

#include <string>
#include <vector>

#include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/backward.h"
27
#include "paddle/fluid/eager/custom_operator/custom_operator_node.h"
28
#include "paddle/fluid/eager/saved_tensors_hooks.h"
29
#include "paddle/fluid/eager/utils.h"
30
#include "paddle/fluid/framework/convert_utils.h"
31 32
#include "paddle/fluid/framework/custom_operator.h"
#include "paddle/fluid/framework/op_meta_info_helper.h"
33
#include "paddle/fluid/framework/python_headers.h"
34 35
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memcpy.h"
W
wanghuancoder 已提交
36
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
37
#include "paddle/fluid/platform/dynload/dynamic_loader.h"
38 39 40 41
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h"
42
#include "paddle/fluid/pybind/tensor_py.h"
43
#include "paddle/phi/api/ext/op_meta_info.h"
44 45 46 47 48
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/api/lib/utils/tensor_utils.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
49 50
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
51 52
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
53

L
Leo Chen 已提交
54 55 56 57
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/pybind/cuda_streams_py.h"
#endif

58 59 60 61 62
namespace paddle {
namespace pybind {

namespace py = ::pybind11;

63
extern PyTypeObject* p_tensor_type;
64 65
extern PyTypeObject* g_multidevicefeedreader_pytype;
extern PyTypeObject* g_orderedmultidevicefeedreader_pytype;
66 67 68 69 70 71 72 73 74 75 76

size_t PyArray_Size_(PyObject* numpy_data) {
  size_t res = 1;
  auto dims = pybind11::detail::array_proxy(numpy_data)->dimensions;
  auto nd = pybind11::detail::array_proxy(numpy_data)->nd;
  while (nd--) {
    res *= (*dims++);
  }
  return res;
}

77
class EagerNumpyAllocation : public phi::Allocation {
78
 public:
79
  explicit EagerNumpyAllocation(PyObject* numpy_data, phi::DataType dtype)
80 81
      : Allocation(
            static_cast<void*>(pybind11::detail::array_proxy(numpy_data)->data),
82
            framework::DataTypeSize(dtype) * PyArray_Size_(numpy_data),
83 84
            paddle::platform::CPUPlace()),
        arr_(numpy_data) {
85 86 87 88
    PADDLE_ENFORCE_NOT_NULL(
        arr_,
        platform::errors::InvalidArgument("The underlying PyObject pointer of "
                                          "numpy array cannot be nullptr"));
89
    PADDLE_ENFORCE_NE(
90 91
        arr_,
        Py_None,
92 93 94 95 96 97 98 99 100 101 102 103 104
        platform::errors::PreconditionNotMet(
            "The underlying PyObject pointer of numpy array cannot be None"));
    Py_INCREF(arr_);
  }
  ~EagerNumpyAllocation() override {
    py::gil_scoped_acquire gil;
    Py_DECREF(arr_);
  }

 private:
  PyObject* arr_;
};

105 106
static PyObject* eager_api_scale(PyObject* self,
                                 PyObject* args,
107 108 109
                                 PyObject* kwargs) {
  EAGER_TRY
  // TODO(jiabin): Sync Tensor and Variable here when we support
W
wanghuancoder 已提交
110 111 112 113 114 115 116 117 118 119 120 121

  auto& tensor =
      reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor;
  float scale = CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 1), 1);
  float bias = CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 2), 2);
  bool bias_after_scale = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
  bool trace_backward = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
  paddle::experimental::Tensor ret;
  {
    eager_gil_scoped_release guard;
    ret = egr::scale(tensor, scale, bias, bias_after_scale, trace_backward);
  }
122 123 124 125
  return ToPyObject(ret);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

126 127
static PyObject* eager_api_run_backward(PyObject* self,
                                        PyObject* args,
128 129
                                        PyObject* kwargs) {
  EAGER_TRY
130 131
  auto tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);
  auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
W
wanghuancoder 已提交
132
  bool retain_graph = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
133 134
  {
    eager_gil_scoped_release guard;
W
wanghuancoder 已提交
135
    egr::Backward(tensors, grad_tensors, retain_graph);
136
  }
137
  RETURN_PY_NONE
138 139 140
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

141 142
static PyObject* eager_api_run_partial_grad(PyObject* self,
                                            PyObject* args,
143 144 145 146 147 148 149 150 151 152
                                            PyObject* kwargs) {
  EAGER_TRY
  auto tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);
  auto inputs = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
  auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 2), 2);
  auto retain_graph = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
  auto create_graph = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
  auto only_inputs = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 5), 5);
  auto allow_unused = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 6), 6);
  auto no_grad_vars = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 7), 7);
153 154 155 156 157 158 159 160 161 162 163
  std::vector<paddle::experimental::Tensor> result;
  {
    eager_gil_scoped_release guard;
    result = egr::Grad(tensors,
                       inputs,
                       grad_tensors,
                       retain_graph,
                       create_graph,
                       only_inputs,
                       allow_unused,
                       no_grad_vars);
W
wanghuancoder 已提交
164
    VLOG(1) << " in eager_api_run_partial_grad, after runing egr::Grad";
165
  }
166 167 168 169
  return ToPyObject(result, true /* return_py_none_if_not_initialize */);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

170 171
static PyObject* eager_api_tensor_copy(PyObject* self,
                                       PyObject* args,
172 173
                                       PyObject* kwargs) {
  EAGER_TRY
174 175 176 177
  paddle::experimental::Tensor& src =
      reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor;
  paddle::experimental::Tensor& dst =
      reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 1))->tensor;
178 179 180
  auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 2), 2);
  bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);

W
wanghuancoder 已提交
181 182 183 184 185 186 187 188
  {
    eager_gil_scoped_release guard;
    dst = src.copy_to(place, blocking);
    egr::EagerUtils::autograd_meta(&dst)->SetStopGradient(
        egr::EagerUtils::autograd_meta(&(src))->StopGradient());
    egr::EagerUtils::autograd_meta(&dst)->SetPersistable(
        egr::EagerUtils::autograd_meta(&(src))->Persistable());
  }
189
  RETURN_PY_NONE
190 191 192
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
PyObject* eager_api_get_all_grads(PyObject* self,
                                  PyObject* args,
                                  PyObject* kwargs) {
  EAGER_TRY
  auto tensor_list = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);

  std::vector<paddle::experimental::Tensor> ret;
  for (auto& tensor : tensor_list) {
    VLOG(6) << "Get grad for tensor: " << tensor.name();
    auto meta = egr::EagerUtils::nullable_autograd_meta(tensor);
    if (!meta || meta->StopGradient()) {
      ret.emplace_back(paddle::experimental::Tensor());
      continue;
    }
    if (meta && meta->Grad().initialized()) {
      ret.emplace_back(meta->Grad());
    } else {
      ret.emplace_back(paddle::experimental::Tensor());
    }
  }
  return ToPyObject(ret, true);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

217 218 219 220 221
PyObject* eager_api_get_grads_lists(PyObject* self,
                                    PyObject* args,
                                    PyObject* kwargs) {
  EAGER_TRY
  auto tensor_list = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);
222
  // The order of the 3 vectors is: FP16_grads, BF16_grads, FP32_grads
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
  std::vector<std::vector<paddle::experimental::Tensor>> ret(3);

  for (auto& tensor : tensor_list) {
    VLOG(6) << "Get grad for tensor: " << tensor.name();
    auto meta = egr::EagerUtils::nullable_autograd_meta(tensor);
    if (meta && meta->Grad().initialized()) {
      auto& grad = meta->Grad();
      switch (grad.dtype()) {
        case paddle::experimental::DataType::FLOAT16:
          ret[0].emplace_back(grad);
          break;
        case paddle::experimental::DataType::BFLOAT16:
          ret[1].emplace_back(grad);
          break;
        case paddle::experimental::DataType::FLOAT32:
          ret[2].emplace_back(grad);
          break;
        default:
          break;
      }
    }
  }

  return ToPyObject(ret);

  EAGER_CATCH_AND_THROW_RETURN_NULL
}

251 252
static PyObject* eager_api_read_next_tensor_list(PyObject* self,
                                                 PyObject* args,
253
                                                 PyObject* kwargs) {
254
  EAGER_TRY
255 256 257
  auto tensor_base_list =
      CastPyArg2VectorOfTensorBase(PyTuple_GET_ITEM(args, 0), 0);
  std::vector<paddle::experimental::Tensor> tensor_list;
258 259 260
  {
    eager_gil_scoped_release guard;
    tensor_list.reserve(tensor_base_list.size());
261
    auto func = [](phi::DenseTensor& tensor_base) {
262 263 264 265 266 267 268 269 270 271 272
      paddle::experimental::Tensor tensor(
          egr::Controller::Instance().GenerateUniqueName());
      auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
      autograd_meta->SetPersistable(false);
      autograd_meta->SetStopGradient(true);
      tensor.set_impl(std::make_shared<phi::DenseTensor>(tensor_base));
      return tensor;
    };
    for (auto& tensor_base : tensor_base_list) {
      tensor_list.emplace_back(func(tensor_base));
    }
273
  }
274
  return ToPyObject(tensor_list);
275 276 277
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
static void ConstructFwdAndBwdMap(
    const std::vector<paddle::OpMetaInfo>& vec_map,
    const std::string& op_type) {
  auto& in_out_map = egr::Controller::Instance().GetCustomEdgesSlotMap();
  if (in_out_map.find(op_type) != in_out_map.end()) {
    VLOG(7) << "Find Exist CustomEdgesSlotMap Skip >>>> ";
    return;
  } else {
    VLOG(7) << "Construct CustomEdgesSlotMap ";
    auto inputs_names =
        paddle::framework::OpMetaInfoHelper::GetInputs(vec_map[0]);
    auto outputs_names =
        paddle::framework::OpMetaInfoHelper::GetOutputs(vec_map[0]);
    auto attrs_names =
        paddle::framework::OpMetaInfoHelper::GetAttrs(vec_map[0]);
    auto grad_outputs_names =
        paddle::framework::OpMetaInfoHelper::GetOutputs(vec_map[1]);
    auto grad_inputs_names =
        paddle::framework::OpMetaInfoHelper::GetInputs(vec_map[1]);
    auto grad_attrs_names =
        paddle::framework::OpMetaInfoHelper::GetAttrs(vec_map[1]);
    std::vector<std::unordered_map<int, int>> res(5);
300 301

    in_out_map.insert({op_type, {res}});
302 303 304
    // Prepare pos map for grad_outputs
    VLOG(7) << "Prepare pos map for grad_outputs";
    PADDLE_ENFORCE_LE(
305 306
        grad_outputs_names.size(),
        inputs_names.size(),
307 308 309 310 311
        paddle::platform::errors::InvalidArgument(
            "Grad outputs num should be less equal than forward inputs num."));
    for (size_t i = 0; i < grad_outputs_names.size(); i++) {
      size_t end = grad_outputs_names[i].find("@GRAD");
      PADDLE_ENFORCE_NE(
312 313
          end,
          std::string::npos,
314 315 316 317 318 319 320 321 322
          paddle::platform::errors::NotFound(
              "All Grad outputs should be grad and we got %s is not grad var, "
              "please check your op and change to fit the rule.",
              grad_outputs_names[i]));
      for (size_t j = 0; j < inputs_names.size(); j++) {
        if (grad_outputs_names[i].substr(0, end) == inputs_names[j]) {
          VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                  << " inputs: " << inputs_names[j] << " related to No." << i
                  << " grad_outputs: " << grad_outputs_names[i];
323
          in_out_map[op_type][0][0][j] = i;
324 325 326 327 328 329 330 331 332 333 334 335
        }
      }
    }
    // Prepare pos map for grad_inputs
    for (size_t i = 0; i < grad_inputs_names.size(); i++) {
      size_t end = grad_inputs_names[i].find("@GRAD");
      if (end != std::string::npos) {
        for (size_t j = 0; j < outputs_names.size(); j++) {
          if (grad_inputs_names[i].substr(0, end) == outputs_names[j]) {
            VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                    << " outputs: " << outputs_names[j] << " related to No."
                    << i << " grad_inputs's grad: " << grad_inputs_names[i];
336
            in_out_map[op_type][0][1][j] = i;
337 338 339
          }
        }
      } else {
340 341
        if (std::find(outputs_names.begin(),
                      outputs_names.end(),
342 343 344 345 346 347 348
                      grad_inputs_names[i]) != outputs_names.end()) {
          for (size_t j = 0; j < outputs_names.size(); j++) {
            if (grad_inputs_names[i] == outputs_names[j]) {
              VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                      << " outputs: " << outputs_names[j] << " related to No."
                      << i
                      << " grad_inputs fwd outputs: " << grad_inputs_names[i];
349
              in_out_map[op_type][0][2][j] = i;
350 351 352 353 354 355 356 357 358
            }
          }
        } else {
          for (size_t j = 0; j < inputs_names.size(); j++) {
            if (grad_inputs_names[i] == inputs_names[j]) {
              VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                      << " inputs: " << inputs_names[j] << " related to No."
                      << i
                      << " grad_inputs fwd inputs: " << grad_inputs_names[i];
359
              in_out_map[op_type][0][3][j] = i;
360 361 362 363 364 365 366 367
            }
          }
        }
      }
    }

    // Prepare pos map for grad attrs_
    for (size_t i = 0; i < grad_attrs_names.size(); i++) {
368 369 370 371
      auto end = std::find(
          attrs_names.begin(), attrs_names.end(), grad_attrs_names[i]);
      PADDLE_ENFORCE_NE(end,
                        attrs_names.end(),
372 373 374 375 376 377 378 379 380 381
                        paddle::platform::errors::NotFound(
                            "All Grad attrs should be one of forward attrs and "
                            "we got %s is not one of them, please check your "
                            "op and change to fit the rule.",
                            grad_attrs_names[i]));
      for (size_t j = 0; j < attrs_names.size(); j++) {
        if (grad_attrs_names[i] == attrs_names[j]) {
          VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                  << " attrs: " << attrs_names[j] << " related to No." << i
                  << " grad_attrs: " << grad_attrs_names[i];
382
          in_out_map[op_type][0][4][j] = i;
383 384 385 386 387 388 389 390 391 392
        }
      }
    }
  }
}

static std::vector<paddle::any> CastAttrsToTragetType(
    const std::vector<paddle::any>& src,
    const std::vector<std::string>& attrs_names) {
  std::vector<paddle::any> res;
393 394
  PADDLE_ENFORCE_EQ(src.size(),
                    attrs_names.size(),
395 396 397 398
                    paddle::platform::errors::InvalidArgument(
                        "We Expected same size of attrs and attrs_name list, "
                        "if u got this error indicate your custom op setting "
                        "%s attrs, but you just give %s",
399 400
                        attrs_names.size(),
                        src.size()));
401 402
  for (size_t i = 0; i < src.size(); i++) {
    size_t end = attrs_names[i].find(": ");
403
    std::string type_name = attrs_names[i].substr(end + 2);
404 405 406 407 408 409 410 411 412
    if (type_name == "int") {
      if (src[i].type() == typeid(bool)) {
        res.emplace_back(static_cast<int>(paddle::any_cast<bool>(src[i])));
      } else if (src[i].type() == typeid(int)) {
        res.emplace_back(src[i]);
      } else {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "Your No. %s attrs should only can be bool or int32, other type is "
            "forbidden for now but we got %s. Check your code first please",
413 414
            i,
            src[i].type().name()));
415 416 417 418 419 420 421 422 423 424 425 426 427
      }
    } else if (type_name == "int64_t") {
      if (src[i].type() == typeid(bool)) {
        res.emplace_back(static_cast<int64_t>(paddle::any_cast<bool>(src[i])));
      } else if (src[i].type() == typeid(int)) {
        res.emplace_back(static_cast<int64_t>(paddle::any_cast<int>(src[i])));
      } else if (src[i].type() == typeid(int64_t)) {
        res.emplace_back(src[i]);
      } else {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "Your No. %s attrs should only can be bool or int32 or int64_t, "
            "other type is forbidden for now but we got %s. Check your code "
            "first please",
428 429
            i,
            src[i].type().name()));
430 431 432 433 434 435 436 437
      }
    } else {
      res.emplace_back(src[i]);
    }
  }
  return res;
}

438 439 440 441
static PyObject* eager_api_jit_function_call(PyObject* self,
                                             PyObject* args,
                                             PyObject* kwargs) {
  EAGER_TRY
442 443 444

  std::shared_ptr<jit::Function> function =
      CastPyArg2JitFunction(PyTuple_GET_ITEM(args, 0), 0);
445 446
  std::vector<paddle::experimental::Tensor> ins =
      CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
W
wanghuancoder 已提交
447 448 449 450 451
  std::vector<paddle::experimental::Tensor> outs;
  {
    eager_gil_scoped_release guard;
    outs = (*function)(ins);
  }
452 453 454 455
  return ToPyObject(outs);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

456 457
static PyObject* eager_api_run_costum_op(PyObject* self,
                                         PyObject* args,
458 459 460 461 462 463
                                         PyObject* kwargs) {
  EAGER_TRY
  paddle::CustomOpKernelContext ctx =
      CastPyArg2CustomOpKernelContext(PyTuple_GET_ITEM(args, 0), 0);
  std::string op_type = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 1), 1);
  bool trace_backward = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
W
wanghuancoder 已提交
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
  {
    eager_gil_scoped_release guard;
    VLOG(7) << "Get things for python for Custom Op: " << op_type
            << ", trace_backward is: " << trace_backward;
    auto meta_info_map = egr::Controller::Instance().GetOpMetaInfoMap();
    PADDLE_ENFORCE_NE(
        meta_info_map.find(op_type),
        meta_info_map.end(),
        paddle::platform::errors::NotFound(
            "Can't find %s in Eager OpMetaInfoMap which should be "
            "created by LoadOpMetaInfoAndRegisterOp, please make "
            "sure you registered your op first and try again. ",
            op_type));
    VLOG(7) << "Run Kernel of Custom Op: " << op_type;
    std::vector<paddle::any> res_attrs =
        CastAttrsToTragetType(ctx.Attrs(),
                              paddle::framework::OpMetaInfoHelper::GetAttrs(
                                  meta_info_map.at(op_type)[0]));
    ctx.EmplaceBackAttrs(res_attrs);
    const auto& vec_map = meta_info_map.at(op_type);
    (*paddle::framework::OpMetaInfoHelper::GetKernelFn(vec_map[0]))(&ctx);

    VLOG(7) << "Get AutogradMeta for inputs and outputs for Custom Op";
    std::vector<std::vector<egr::AutogradMeta*>> ins_auto_grad_metas;
    std::vector<std::vector<egr::AutogradMeta*>> outs_auto_grad_metas;
    VLOG(7) << "We got slot num of ins is: " << ctx.InputRange().size();
    ins_auto_grad_metas.resize(ctx.InputRange().size());
    VLOG(7) << "We got slot num of outs is: " << ctx.OutputRange().size();
    outs_auto_grad_metas.resize(ctx.OutputRange().size());

    for (size_t i = 0; i < ctx.InputRange().size(); i++) {
      ins_auto_grad_metas[i] =
          egr::EagerUtils::nullable_autograd_meta(ctx.InputsBetween(
              ctx.InputRangeAt(i).first, ctx.InputRangeAt(i).second));
498
    }
W
wanghuancoder 已提交
499 500 501 502
    for (size_t i = 0; i < ctx.OutputRange().size(); i++) {
      outs_auto_grad_metas[i] =
          egr::EagerUtils::unsafe_autograd_meta(ctx.OutputsBetweeen(
              ctx.OutputRangeAt(i).first, ctx.OutputRangeAt(i).second));
503
    }
W
wanghuancoder 已提交
504 505 506 507 508
    bool require_any_grad = false;
    for (size_t i = 0; i < ins_auto_grad_metas.size(); i++) {
      require_any_grad =
          require_any_grad || egr::EagerUtils::ComputeRequireGrad(
                                  trace_backward, &(ins_auto_grad_metas[i]));
509
    }
W
wanghuancoder 已提交
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
    if (require_any_grad && (vec_map.size() > 1)) {
      VLOG(6) << " Construct Grad for Custom Op: " << op_type;
      ConstructFwdAndBwdMap(vec_map, op_type);
      for (size_t i = 0; i < outs_auto_grad_metas.size(); i++) {
        egr::EagerUtils::PassStopGradient(false, &(outs_auto_grad_metas[i]));
      }
      auto grad_node = std::make_shared<egr::RunCustomOpNode>(
          outs_auto_grad_metas.size(), ins_auto_grad_metas.size(), op_type);
      auto slot_map =
          egr::Controller::Instance().GetCustomEdgesSlotMap().at(op_type);
      // Prepare Grad outputs
      size_t no_grad_cnt = 0;
      for (size_t i = 0; i < ins_auto_grad_metas.size(); i++) {
        const std::vector<paddle::experimental::Tensor>& in_tensors =
            ctx.InputsBetween(ctx.InputRangeAt(i).first,
                              ctx.InputRangeAt(i).second);

        if (slot_map[0][0].find(i) != slot_map[0][0].end()) {
          grad_node->SetGradOutMeta(in_tensors, slot_map[0][0][i]);
        } else {
          grad_node->SetGradOutMeta(
              in_tensors, ins_auto_grad_metas.size() - 1 - no_grad_cnt);
          no_grad_cnt++;
        }
      }
      // Prepare Grad inputs with grad of fwd outputs
      for (size_t i = 0; i < outs_auto_grad_metas.size(); i++) {
        const std::vector<paddle::experimental::Tensor>& out_tensors =
            ctx.OutputsBetweeen(ctx.OutputRangeAt(i).first,
                                ctx.OutputRangeAt(i).second);

        egr::EagerUtils::SetOutRankWithSlot(&(outs_auto_grad_metas[i]), i);
        egr::EagerUtils::SetHistory(&(outs_auto_grad_metas[i]), grad_node);
        grad_node->SetGradInMeta(out_tensors, i);
        egr::EagerUtils::CheckAndRetainGrad(out_tensors);
      }
546

W
wanghuancoder 已提交
547 548 549 550 551 552 553 554 555
      // Prepare Grad inputs with fwd outputs
      for (auto it = slot_map[0][2].begin(); it != slot_map[0][2].end(); it++) {
        VLOG(7) << "Prepare fwd_outs: " << it->first
                << " to grad_inputs: " << it->second;
        grad_node->fwd_outs[it->second] =
            egr::RunCustomOpNode::ConstructTensorWrapper(
                ctx.OutputsBetweeen(ctx.OutputRangeAt(it->first).first,
                                    ctx.OutputRangeAt(it->first).second));
      }
556

W
wanghuancoder 已提交
557 558 559 560 561 562 563 564 565
      // Prepare Grad inputs with fwd inputs
      for (auto it = slot_map[0][3].begin(); it != slot_map[0][3].end(); it++) {
        VLOG(7) << "Prepare fwd_ins: " << it->first
                << " to grad_inputs: " << it->second;
        grad_node->fwd_ins[it->second] =
            egr::RunCustomOpNode::ConstructTensorWrapper(
                ctx.InputsBetween(ctx.InputRangeAt(it->first).first,
                                  ctx.InputRangeAt(it->first).second));
      }
566

W
wanghuancoder 已提交
567 568 569 570 571 572 573 574 575 576
      auto attrs_names = paddle::framework::OpMetaInfoHelper::GetAttrs(
          meta_info_map.at(op_type)[1]);
      std::vector<paddle::any> attrs(attrs_names.size());
      // Prepare attrs for Grad node
      for (auto it = slot_map[0][4].begin(); it != slot_map[0][4].end(); it++) {
        VLOG(7) << "Prepare fwd attrs: " << it->first
                << " to grad_attrs: " << it->second;
        attrs[it->second] = res_attrs[it->first];
      }
      grad_node->SetAttrs(attrs);
577 578
    }
  }
579
  RETURN_PY_NONE
580 581 582
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

583 584
static PyObject* eager_api_sparse_coo_tensor(PyObject* self,
                                             PyObject* args,
585 586 587 588 589 590 591
                                             PyObject* kwargs) {
  EAGER_TRY
  auto non_zero_indices = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
  auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1);
  auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 2), 2);
  auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
  paddle::experimental::Tensor tensor;
W
wanghuancoder 已提交
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
  {
    eager_gil_scoped_release guard;
    PADDLE_ENFORCE(non_zero_indices.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the non-zero indices must be a DenseTensor."));
    PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the non-zero elements must be a DenseTensor."));
    auto dense_indices =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_indices.impl());
    auto dense_elements =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
    // TODO(zhangkaihuo): After creating SparseCooTensor, call coalesced() to
    // sort and merge duplicate indices
    std::shared_ptr<phi::SparseCooTensor> coo_tensor =
        std::make_shared<phi::SparseCooTensor>(
            *dense_indices, *dense_elements, phi::make_ddim(dense_shape));
    tensor.set_impl(coo_tensor);
    auto name =
        egr::Controller::Instance().GenerateUniqueName("generated_tensor");
    tensor.set_name(name);
    auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
    autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
    if (!autograd_meta->GetMutableGradNode()) {
      VLOG(3) << "Tensor(" << name
              << ") doesn't have GradNode, add GradNodeAccumulation to it.";
      autograd_meta->SetGradNode(
          std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
    }
621 622 623 624 625
  }
  return ToPyObject(tensor);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

626 627
static PyObject* eager_api_sparse_csr_tensor(PyObject* self,
                                             PyObject* args,
628 629 630 631 632 633 634 635
                                             PyObject* kwargs) {
  EAGER_TRY
  auto non_zero_crows = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
  auto non_zero_cols = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1);
  auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 2), 2);
  auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 3), 3);
  auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
  paddle::experimental::Tensor tensor;
W
wanghuancoder 已提交
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
  {
    eager_gil_scoped_release guard;
    PADDLE_ENFORCE(non_zero_crows.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the compressed non-zero rows must be a DenseTensor."));
    PADDLE_ENFORCE(non_zero_cols.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the non-zero cols must be a DenseTensor."));
    PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the non-zero elements must be a DenseTensor."));

    auto dense_crows =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_crows.impl());
    auto dense_cols =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_cols.impl());
    auto dense_elements =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
    std::shared_ptr<phi::SparseCsrTensor> csr_tensor =
        std::make_shared<phi::SparseCsrTensor>(*dense_crows,
                                               *dense_cols,
                                               *dense_elements,
                                               phi::make_ddim(dense_shape));
    tensor.set_impl(csr_tensor);
    auto name =
        egr::Controller::Instance().GenerateUniqueName("generated_tensor");
    tensor.set_name(name);
    auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
    autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
    if (!autograd_meta->GetMutableGradNode()) {
      VLOG(3) << "Tensor(" << name
              << ") have not GradNode, add GradNodeAccumulation for it.";
      autograd_meta->SetGradNode(
          std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
    }
671 672 673 674
  }
  return ToPyObject(tensor);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697

static PyObject* eager_api_register_saved_tensors_hooks(PyObject* self,
                                                        PyObject* args,
                                                        PyObject* kwargs) {
  EAGER_TRY
  if (egr::Controller::Instance().HasGrad()) {
    auto pack_hook = PyTuple_GET_ITEM(args, 0);
    auto unpack_hook = PyTuple_GET_ITEM(args, 1);
    egr::SavedTensorsHooks::GetInstance().SetHooks(pack_hook, unpack_hook);
  }
  RETURN_PY_NONE
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

static PyObject* eager_api_reset_saved_tensors_hooks(PyObject* self,
                                                     PyObject* args,
                                                     PyObject* kwargs) {
  EAGER_TRY
  egr::SavedTensorsHooks::GetInstance().ResetHooks();
  RETURN_PY_NONE
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

W
wanghuancoder 已提交
698
#if defined(PADDLE_WITH_CUDA)
699 700
static PyObject* eager_api_async_read(PyObject* self,
                                      PyObject* args,
W
wanghuancoder 已提交
701 702 703 704 705 706 707 708
                                      PyObject* kwargs) {
  EAGER_TRY
  auto& src = GetTensorFromArgs("async_read", "src", args, 0, false);
  auto& dst = GetTensorFromArgs("async_read", "dst", args, 1, false);
  auto& index = GetTensorFromArgs("async_read", "index", args, 2, false);
  auto& buffer = GetTensorFromArgs("async_read", "buffer", args, 3, false);
  auto& offset = GetTensorFromArgs("async_read", "offset", args, 4, false);
  auto& count = GetTensorFromArgs("async_read", "count", args, 5, false);
W
wanghuancoder 已提交
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
  {
    eager_gil_scoped_release guard;
    PADDLE_ENFORCE_EQ(
        src.is_gpu_pinned(),
        true,
        platform::errors::InvalidArgument("Required `src` device should be "
                                          "CUDAPinnedPlace, but received %d.",
                                          src.place()));
    PADDLE_ENFORCE_EQ(
        dst.is_gpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `dst` device should be CUDAPlace, but received %d.",
            dst.place()));
    PADDLE_ENFORCE_EQ(
        index.is_cpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `index` device should be CPUPlace, but received %d.",
            index.place()));
    PADDLE_ENFORCE_EQ(buffer.is_gpu_pinned(),
                      true,
W
wanghuancoder 已提交
731
                      platform::errors::InvalidArgument(
W
wanghuancoder 已提交
732 733 734 735 736 737 738 739 740
                          "Required `buffer` device should be CUDAPinnedPlace, "
                          "but received %d.",
                          buffer.place()));
    PADDLE_ENFORCE_EQ(
        offset.is_cpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `offset` device should be CPUPlace, but received %d.",
            offset.place()));
W
wanghuancoder 已提交
741
    PADDLE_ENFORCE_EQ(
W
wanghuancoder 已提交
742 743
        count.is_cpu(),
        true,
W
wanghuancoder 已提交
744
        platform::errors::InvalidArgument(
W
wanghuancoder 已提交
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
            "Required `count` device should be CPUPlace, but received %d.",
            count.place()));

    auto& src_tensor = src;
    auto* dst_tensor = &dst;
    auto& index_tensor = index;
    auto* buffer_tensor = &buffer;
    auto& offset_tensor = offset;
    auto& count_tensor = count;
    auto* dst_data = dst_tensor->mutable_data<float>(dst.place());
    const auto& deviceId = paddle::platform::GetCurrentDeviceId();

    PADDLE_ENFORCE_EQ(src_tensor.dims().size(),
                      dst_tensor->dims().size(),
                      platform::errors::InvalidArgument(
                          "`src` and `dst` should have same tensor shape, "
                          "except for the first dimension."));
    PADDLE_ENFORCE_EQ(src_tensor.dims().size(),
                      buffer_tensor->dims().size(),
                      platform::errors::InvalidArgument(
                          "`src` and `buffer` should have same tensor shape, "
                          "except for the first dimension."));
    for (int i = 1; i < src_tensor.dims().size(); i++) {
      PADDLE_ENFORCE_EQ(
          src_tensor.dims()[i],
          dst_tensor->dims()[i],
          platform::errors::InvalidArgument(
              "`src` and `dst` should have the same tensor shape, "
              "except for the first dimension."));
      PADDLE_ENFORCE_EQ(
          src_tensor.dims()[i],
          buffer_tensor->dims()[i],
          platform::errors::InvalidArgument(
              "`src` and `buffer` should have the same tensor shape, "
              "except for the first dimension."));
    }
    PADDLE_ENFORCE_EQ(index_tensor.dims().size(),
                      1,
                      platform::errors::InvalidArgument(
                          "`index` tensor should be one-dimensional."));

    auto stream = paddle::platform::get_current_stream(deviceId)->raw_stream();

    int64_t numel = 0;  // total copy length
    int64_t copy_flag = offset_tensor.dims()[0];
    int64_t size = src_tensor.numel() / src_tensor.dims()[0];

    if (copy_flag != 0) {
      PADDLE_ENFORCE_EQ(offset_tensor.dims().size(),
                        1,
                        platform::errors::InvalidArgument(
                            "`offset` tensor should be one-dimensional."));
      PADDLE_ENFORCE_EQ(count_tensor.dims().size(),
                        1,
                        platform::errors::InvalidArgument(
                            "`count` tensor should be one-dimensional."));
      PADDLE_ENFORCE_EQ(offset_tensor.numel(),
                        count_tensor.numel(),
                        platform::errors::InvalidArgument(
                            "`offset` and `count` tensor size dismatch."));
      auto* offset_data = offset_tensor.data<int64_t>();
      auto* count_data = count_tensor.data<int64_t>();
      for (int64_t i = 0; i < count_tensor.numel(); i++) {
        numel += count_data[i];
      }
      PADDLE_ENFORCE_LE(numel + index_tensor.numel(),
                        buffer_tensor->dims()[0],
                        platform::errors::InvalidArgument(
                            "Buffer tensor size is too small."));
      PADDLE_ENFORCE_LE(numel + index_tensor.numel(),
                        dst_tensor->dims()[0],
                        platform::errors::InvalidArgument(
                            "Target tensor size is too small."));

      int64_t src_offset, dst_offset = 0, c;
      auto* src_data = src_tensor.data<float>();
      for (int64_t i = 0; i < offset_tensor.numel(); i++) {
        src_offset = offset_data[i], c = count_data[i];
        PADDLE_ENFORCE_LE(src_offset + c,
                          src_tensor.dims()[0],
                          platform::errors::InvalidArgument(
                              "Invalid offset or count index."));
        PADDLE_ENFORCE_LE(dst_offset + c,
                          dst_tensor->dims()[0],
                          platform::errors::InvalidArgument(
                              "Invalid offset or count index."));
        cudaMemcpyAsync(dst_data + (dst_offset * size),
                        src_data + (src_offset * size),
                        c * size * sizeof(float),
                        cudaMemcpyHostToDevice,
                        stream);
        dst_offset += c;
      }
    } else {
      PADDLE_ENFORCE_LE(index_tensor.numel(),
                        buffer_tensor->dims()[0],
                        platform::errors::InvalidArgument(
                            "Buffer tensor size is too small."));
    }

    // Select the index data to the buffer
    auto index_select = [](const paddle::experimental::Tensor& src_tensor,
                           const paddle::experimental::Tensor& index_tensor,
                           paddle::experimental::Tensor* buffer_tensor) {
      auto* src_data = src_tensor.data<float>();
      auto* index_data = index_tensor.data<int64_t>();
      auto* buffer_data = buffer_tensor->data<float>();
      const int& slice_size = src_tensor.numel() / src_tensor.dims()[0];
      const int& copy_bytes = slice_size * sizeof(float);
      int64_t c = 0;
      for (int64_t i = 0; i < index_tensor.numel(); i++) {
        std::memcpy(buffer_data + c * slice_size,
                    src_data + index_data[i] * slice_size,
                    copy_bytes);
        c += 1;
      }
    };
    index_select(src_tensor, index_tensor, buffer_tensor);

    // Copy the data to device memory
    cudaMemcpyAsync(dst_data + (numel * size),
                    buffer_tensor->data<float>(),
                    index_tensor.numel() * size * sizeof(float),
                    cudaMemcpyHostToDevice,
                    stream);
W
wanghuancoder 已提交
870
  }
W
wanghuancoder 已提交
871 872 873
  RETURN_PY_NONE
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
W
wanghuancoder 已提交
874

W
wanghuancoder 已提交
875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
static PyObject* eager_api_async_write(PyObject* self,
                                       PyObject* args,
                                       PyObject* kwargs) {
  EAGER_TRY
  auto& src = GetTensorFromArgs("async_write", "src", args, 0, false);
  auto& dst = GetTensorFromArgs("async_write", "dst", args, 1, false);
  auto& offset = GetTensorFromArgs("async_write", "offset", args, 2, false);
  auto& count = GetTensorFromArgs("async_write", "count", args, 3, false);
  {
    eager_gil_scoped_release guard;
    PADDLE_ENFORCE_EQ(
        src.is_gpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `src` device should be CUDAPlace, but received %d. ",
            src.place()));
    PADDLE_ENFORCE_EQ(dst.is_gpu_pinned(),
                      true,
                      platform::errors::InvalidArgument(
                          "Required `dst` device should be CUDAPinnedPlace, "
                          "but received %d. ",
                          dst.place()));
    PADDLE_ENFORCE_EQ(
        offset.is_cpu(),
        true,
        platform::errors::InvalidArgument("Required `offset` device should "
                                          "be CPUPlace, but received %d. ",
                                          offset.place()));
    PADDLE_ENFORCE_EQ(
        count.is_cpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `count` device should be CPUPlace, but received %d. ",
            count.place()));
W
wanghuancoder 已提交
909

W
wanghuancoder 已提交
910 911 912 913 914 915 916
    // TODO(daisiming): In future, add index as arguments following
    // async_read.
    auto& src_tensor = src;
    auto* dst_tensor = &dst;
    auto& offset_tensor = offset;
    auto& count_tensor = count;
    const auto& deviceId = paddle::platform::GetCurrentDeviceId();
W
wanghuancoder 已提交
917

918 919
    PADDLE_ENFORCE_EQ(offset_tensor.dims().size(),
                      1,
W
wanghuancoder 已提交
920 921
                      platform::errors::InvalidArgument(
                          "`offset` tensor should be one-dimensional."));
922 923
    PADDLE_ENFORCE_EQ(count_tensor.dims().size(),
                      1,
W
wanghuancoder 已提交
924 925
                      platform::errors::InvalidArgument(
                          "`count` tensor should be one-dimensional."));
926 927
    PADDLE_ENFORCE_EQ(offset_tensor.numel(),
                      count_tensor.numel(),
W
wanghuancoder 已提交
928 929
                      platform::errors::InvalidArgument(
                          "`offset` and `count` tensor size dismatch."));
W
wanghuancoder 已提交
930 931 932 933 934 935 936 937 938 939 940 941
    PADDLE_ENFORCE_EQ(src_tensor.dims().size(),
                      dst_tensor->dims().size(),
                      platform::errors::InvalidArgument(
                          "`src` and `dst` should have the same tensor shape, "
                          "except for the first dimension."));
    for (int i = 1; i < src_tensor.dims().size(); i++) {
      PADDLE_ENFORCE_EQ(
          src_tensor.dims()[i],
          dst_tensor->dims()[i],
          platform::errors::InvalidArgument(
              "`src` and `dst` should have the same tensor shape, "
              "except for the first dimension."));
W
wanghuancoder 已提交
942 943
    }

W
wanghuancoder 已提交
944 945 946
    auto stream = paddle::platform::get_current_stream(deviceId)->raw_stream();

    int64_t size = src_tensor.numel() / src_tensor.dims()[0];
W
wanghuancoder 已提交
947
    auto* src_data = src_tensor.data<float>();
W
wanghuancoder 已提交
948 949 950 951
    auto* dst_data = dst_tensor->data<float>();
    const int64_t* offset_data = offset_tensor.data<int64_t>();
    const int64_t* count_data = count_tensor.data<int64_t>();
    int64_t src_offset = 0, dst_offset, c;
W
wanghuancoder 已提交
952
    for (int64_t i = 0; i < offset_tensor.numel(); i++) {
W
wanghuancoder 已提交
953
      dst_offset = offset_data[i], c = count_data[i];
W
wanghuancoder 已提交
954
      PADDLE_ENFORCE_LE(
955 956
          src_offset + c,
          src_tensor.dims()[0],
W
wanghuancoder 已提交
957
          platform::errors::InvalidArgument("Invalid offset or count index"));
W
wanghuancoder 已提交
958
      PADDLE_ENFORCE_LE(
959 960
          dst_offset + c,
          dst_tensor->dims()[0],
W
wanghuancoder 已提交
961
          platform::errors::InvalidArgument("Invalid offset or count index"));
W
wanghuancoder 已提交
962
      cudaMemcpyAsync(dst_data + (dst_offset * size),
963 964
                      src_data + (src_offset * size),
                      c * size * sizeof(float),
W
wanghuancoder 已提交
965
                      cudaMemcpyDeviceToHost,
966
                      stream);
W
wanghuancoder 已提交
967
      src_offset += c;
W
wanghuancoder 已提交
968 969
    }
  }
970
  RETURN_PY_NONE
W
wanghuancoder 已提交
971 972
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
973

974 975
static PyObject* eager_api_to_uva_tensor(PyObject* self,
                                         PyObject* args,
976 977 978 979 980 981 982 983 984
                                         PyObject* kwargs) {
  EAGER_TRY
  VLOG(4) << "Running in eager_api_to_uva_tensor.";
  auto new_tensor = std::shared_ptr<paddle::experimental::Tensor>(
      new paddle::experimental::Tensor(
          egr::Controller::Instance().GenerateUniqueName()));
  PyObject* obj = PyTuple_GET_ITEM(args, 0);
  auto array = py::cast<py::array>(py::handle(obj));

985 986 987 988 989 990 991
  Py_ssize_t args_num = PyTuple_Size(args);
  int64_t device_id = 0;
  if (args_num > 1) {
    PyObject* Py_device_id = PyTuple_GET_ITEM(args, 1);
    if (Py_device_id) {
      device_id = CastPyArg2AttrLong(Py_device_id, 1);
    }
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
  }

  if (py::isinstance<py::array_t<int32_t>>(array)) {
    SetUVATensorFromPyArray<int32_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<int64_t>>(array)) {
    SetUVATensorFromPyArray<int64_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<float>>(array)) {
    SetUVATensorFromPyArray<float>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<double>>(array)) {
    SetUVATensorFromPyArray<double>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<int8_t>>(array)) {
    SetUVATensorFromPyArray<int8_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<int16_t>>(array)) {
    SetUVATensorFromPyArray<int16_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<paddle::platform::float16>>(array)) {
1007 1008
    SetUVATensorFromPyArray<paddle::platform::float16>(
        new_tensor, array, device_id);
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
  } else if (py::isinstance<py::array_t<bool>>(array)) {
    SetUVATensorFromPyArray<bool>(new_tensor, array, device_id);
  } else {
    // obj may be any type, obj.cast<py::array>() may be failed,
    // then the array.dtype will be string of unknown meaning.
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Input object type error or incompatible array data type. "
        "tensor.set() supports array with bool, float16, float32, "
        "float64, int8, int16, int32, int64,"
        "please check your input or input array data type."));
  }
  return ToPyObject(*(new_tensor.get()));
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
W
wanghuancoder 已提交
1023
#endif
1024

1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
static PyObject* eager_api__add_backward_final_hook(PyObject* self,
                                                    PyObject* args,
                                                    PyObject* kwargs) {
  EAGER_TRY
  PyObject* hook_func = PyTuple_GET_ITEM(args, 0);
  egr::Controller::Instance().RegisterBackwardFinalHook(
      std::make_shared<PyVoidHook>(hook_func));
  RETURN_PY_NONE
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

1036
PyMethodDef variable_functions[] = {
1037
    // TODO(jiabin): Remove scale when we have final state tests
1038 1039 1040 1041
    {"scale",
     (PyCFunction)(void (*)(void))eager_api_scale,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1042 1043 1044 1045
    {"_add_backward_final_hook",
     (PyCFunction)(void (*)(void))eager_api__add_backward_final_hook,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1046 1047 1048 1049
    {"run_backward",
     (PyCFunction)(void (*)(void))eager_api_run_backward,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1050 1051
    {"run_partial_grad",
     (PyCFunction)(void (*)(void))eager_api_run_partial_grad,
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"_run_custom_op",
     (PyCFunction)(void (*)(void))eager_api_run_costum_op,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"tensor_copy",
     (PyCFunction)(void (*)(void))eager_api_tensor_copy,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1062 1063 1064 1065
    {"get_all_grads",
     (PyCFunction)(void (*)(void))eager_api_get_all_grads,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1066 1067 1068 1069
    {"get_grads_lists",
     (PyCFunction)(void (*)(void))eager_api_get_grads_lists,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1070 1071
    {"read_next_tensor_list",
     (PyCFunction)(void (*)(void))eager_api_read_next_tensor_list,
1072 1073
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1074 1075 1076 1077
    {"jit_function_call",
     (PyCFunction)(void (*)(void))eager_api_jit_function_call,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1078 1079 1080
    /**sparse functions**/
    {"sparse_coo_tensor",
     (PyCFunction)(void (*)(void))eager_api_sparse_coo_tensor,
1081 1082
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1083 1084
    {"sparse_csr_tensor",
     (PyCFunction)(void (*)(void))eager_api_sparse_csr_tensor,
1085 1086
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1087 1088 1089 1090 1091 1092 1093 1094
    {"register_saved_tensors_hooks",
     (PyCFunction)(void (*)(void))eager_api_register_saved_tensors_hooks,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"reset_saved_tensors_hooks",
     (PyCFunction)(void (*)(void))eager_api_reset_saved_tensors_hooks,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1095
/**sparse functions**/
W
wanghuancoder 已提交
1096
#if defined(PADDLE_WITH_CUDA)
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
    {"async_read",
     (PyCFunction)(void (*)(void))eager_api_async_read,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"async_write",
     (PyCFunction)(void (*)(void))eager_api_async_write,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"to_uva_tensor",
     (PyCFunction)(void (*)(void))eager_api_to_uva_tensor,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
W
wanghuancoder 已提交
1109
#endif
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
    {NULL, NULL, 0, NULL}};

void BindFunctions(PyObject* module) {
  if (PyModule_AddFunctions(module, variable_functions) < 0) {
    PADDLE_THROW(platform::errors::Fatal(
        "Init Paddle erroe in BindFunctions(PyModule_AddFunctions)."));
    return;
  }
}

}  // namespace pybind
}  // namespace paddle