eager_functions.cc 48.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// disable numpy compile error
12 13 14 15 16 17

#if defined(_MSC_VER)
#include <BaseTsd.h>
typedef SSIZE_T ssize_t;
#endif

18
#include <Python.h>
19 20 21 22
// Avoid a problem with copysign defined in pyconfig.h on Windows.
#ifdef copysign
#undef copysign
#endif
23 24 25 26 27 28 29 30

#include <string>
#include <vector>

#include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/backward.h"
31
#include "paddle/fluid/eager/custom_operator/custom_operator_node.h"
32
#include "paddle/fluid/eager/utils.h"
33
#include "paddle/fluid/framework/convert_utils.h"
34 35
#include "paddle/fluid/framework/custom_operator.h"
#include "paddle/fluid/framework/op_meta_info_helper.h"
36
#include "paddle/fluid/framework/phi_utils.h"
37
#include "paddle/fluid/framework/python_headers.h"
38 39
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memcpy.h"
W
wanghuancoder 已提交
40
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
41
#include "paddle/fluid/platform/dynload/dynamic_loader.h"
42 43 44 45
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h"
46
#include "paddle/fluid/pybind/tensor_py.h"
47
#include "paddle/phi/api/ext/op_meta_info.h"
48 49 50 51
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
52 53
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
54 55
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
56

L
Leo Chen 已提交
57 58 59 60
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/pybind/cuda_streams_py.h"
#endif

61
#include "gflags/gflags.h"
62
#include "paddle/phi/api/include/operants_manager.h"
63 64 65 66
#include "paddle/phi/api/include/tensor_operants.h"

DECLARE_string(tensor_operants_mode);

67 68 69 70 71
namespace paddle {
namespace pybind {

namespace py = ::pybind11;

72
extern PyTypeObject* p_tensor_type;
73 74
extern PyTypeObject* g_multidevicefeedreader_pytype;
extern PyTypeObject* g_orderedmultidevicefeedreader_pytype;
75 76 77 78 79 80 81 82 83 84 85

size_t PyArray_Size_(PyObject* numpy_data) {
  size_t res = 1;
  auto dims = pybind11::detail::array_proxy(numpy_data)->dimensions;
  auto nd = pybind11::detail::array_proxy(numpy_data)->nd;
  while (nd--) {
    res *= (*dims++);
  }
  return res;
}

86
class EagerNumpyAllocation : public phi::Allocation {
87
 public:
88
  explicit EagerNumpyAllocation(PyObject* numpy_data, phi::DataType dtype)
89 90
      : Allocation(
            static_cast<void*>(pybind11::detail::array_proxy(numpy_data)->data),
91
            phi::SizeOf(dtype) * PyArray_Size_(numpy_data),
92 93
            paddle::platform::CPUPlace()),
        arr_(numpy_data) {
94 95 96 97
    PADDLE_ENFORCE_NOT_NULL(
        arr_,
        platform::errors::InvalidArgument("The underlying PyObject pointer of "
                                          "numpy array cannot be nullptr"));
98
    PADDLE_ENFORCE_NE(
99 100
        arr_,
        Py_None,
101 102 103 104 105 106 107 108 109 110 111 112 113
        platform::errors::PreconditionNotMet(
            "The underlying PyObject pointer of numpy array cannot be None"));
    Py_INCREF(arr_);
  }
  ~EagerNumpyAllocation() override {
    py::gil_scoped_acquire gil;
    Py_DECREF(arr_);
  }

 private:
  PyObject* arr_;
};

114 115
static PyObject* eager_api_scale(PyObject* self,
                                 PyObject* args,
116 117 118
                                 PyObject* kwargs) {
  EAGER_TRY
  // TODO(jiabin): Sync Tensor and Variable here when we support
W
wanghuancoder 已提交
119 120 121 122 123 124 125

  auto& tensor =
      reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor;
  float scale = CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 1), 1);
  float bias = CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 2), 2);
  bool bias_after_scale = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
  bool trace_backward = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
126
  paddle::Tensor ret;
W
wanghuancoder 已提交
127 128 129 130
  {
    eager_gil_scoped_release guard;
    ret = egr::scale(tensor, scale, bias, bias_after_scale, trace_backward);
  }
131 132 133 134
  return ToPyObject(ret);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

135 136
static PyObject* eager_api_run_backward(PyObject* self,
                                        PyObject* args,
137 138
                                        PyObject* kwargs) {
  EAGER_TRY
139 140
  auto tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);
  auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
W
wanghuancoder 已提交
141
  bool retain_graph = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
142 143
  {
    eager_gil_scoped_release guard;
W
wanghuancoder 已提交
144
    egr::Backward(tensors, grad_tensors, retain_graph);
145
  }
146
  RETURN_PY_NONE
147 148 149
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

150 151
static PyObject* eager_api_run_partial_grad(PyObject* self,
                                            PyObject* args,
152 153 154 155 156 157 158 159 160 161
                                            PyObject* kwargs) {
  EAGER_TRY
  auto tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);
  auto inputs = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
  auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 2), 2);
  auto retain_graph = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
  auto create_graph = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
  auto only_inputs = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 5), 5);
  auto allow_unused = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 6), 6);
  auto no_grad_vars = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 7), 7);
162
  std::vector<paddle::Tensor> result;
163 164 165 166 167 168 169 170 171 172
  {
    eager_gil_scoped_release guard;
    result = egr::Grad(tensors,
                       inputs,
                       grad_tensors,
                       retain_graph,
                       create_graph,
                       only_inputs,
                       allow_unused,
                       no_grad_vars);
L
Leo Chen 已提交
173
    VLOG(4) << " in eager_api_run_partial_grad, after runing egr::Grad";
174
  }
175 176 177 178
  return ToPyObject(result, true /* return_py_none_if_not_initialize */);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

179 180
static PyObject* eager_api_tensor_copy(PyObject* self,
                                       PyObject* args,
181 182
                                       PyObject* kwargs) {
  EAGER_TRY
183
  paddle::Tensor& src =
184
      reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor;
185
  paddle::Tensor& dst =
186
      reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 1))->tensor;
187 188 189
  auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 2), 2);
  bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);

W
wanghuancoder 已提交
190 191 192 193 194 195 196 197
  {
    eager_gil_scoped_release guard;
    dst = src.copy_to(place, blocking);
    egr::EagerUtils::autograd_meta(&dst)->SetStopGradient(
        egr::EagerUtils::autograd_meta(&(src))->StopGradient());
    egr::EagerUtils::autograd_meta(&dst)->SetPersistable(
        egr::EagerUtils::autograd_meta(&(src))->Persistable());
  }
198
  RETURN_PY_NONE
199 200 201
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

202 203 204 205 206 207
PyObject* eager_api_get_all_grads(PyObject* self,
                                  PyObject* args,
                                  PyObject* kwargs) {
  EAGER_TRY
  auto tensor_list = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);

208
  std::vector<paddle::Tensor> ret;
209 210 211 212
  for (auto& tensor : tensor_list) {
    VLOG(6) << "Get grad for tensor: " << tensor.name();
    auto meta = egr::EagerUtils::nullable_autograd_meta(tensor);
    if (!meta || meta->StopGradient()) {
213
      ret.emplace_back(paddle::Tensor());
214 215 216 217 218
      continue;
    }
    if (meta && meta->Grad().initialized()) {
      ret.emplace_back(meta->Grad());
    } else {
219
      ret.emplace_back(paddle::Tensor());
220 221 222 223 224 225
    }
  }
  return ToPyObject(ret, true);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

226 227 228 229 230
PyObject* eager_api_get_grads_lists(PyObject* self,
                                    PyObject* args,
                                    PyObject* kwargs) {
  EAGER_TRY
  auto tensor_list = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);
231
  // The order of the 3 vectors is: FP16_grads, BF16_grads, FP32_grads
232
  std::vector<std::vector<paddle::Tensor>> ret(3);
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259

  for (auto& tensor : tensor_list) {
    VLOG(6) << "Get grad for tensor: " << tensor.name();
    auto meta = egr::EagerUtils::nullable_autograd_meta(tensor);
    if (meta && meta->Grad().initialized()) {
      auto& grad = meta->Grad();
      switch (grad.dtype()) {
        case paddle::experimental::DataType::FLOAT16:
          ret[0].emplace_back(grad);
          break;
        case paddle::experimental::DataType::BFLOAT16:
          ret[1].emplace_back(grad);
          break;
        case paddle::experimental::DataType::FLOAT32:
          ret[2].emplace_back(grad);
          break;
        default:
          break;
      }
    }
  }

  return ToPyObject(ret);

  EAGER_CATCH_AND_THROW_RETURN_NULL
}

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
PyObject* eager_api_get_grads_types(PyObject* self,
                                    PyObject* args,
                                    PyObject* kwargs) {
  EAGER_TRY
  auto tensor_list = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);

  std::vector<int> ret;

  for (auto& tensor : tensor_list) {
    VLOG(6) << "Get grad for tensor: " << tensor.name();
    auto meta = egr::EagerUtils::nullable_autograd_meta(tensor);
    if (!meta || meta->StopGradient()) {
      ret.emplace_back(-1);
      continue;
    }

    auto& grad = meta->Grad();
    if (meta && grad.initialized()) {
      if (grad.is_dense_tensor() &&
          (tensor.dtype() == paddle::experimental::DataType::FLOAT32 ||
280 281
           tensor.dtype() == paddle::experimental::DataType::FLOAT16 ||
           tensor.dtype() == paddle::experimental::DataType::BFLOAT16)) {
282 283 284 285 286 287 288 289 290 291 292 293 294
        ret.emplace_back(
            paddle::framework::TransToProtoVarType(tensor.dtype()));
      }
    } else {
      ret.emplace_back(-1);
    }
  }

  return ToPyObject(ret);

  EAGER_CATCH_AND_THROW_RETURN_NULL
}

295 296
static PyObject* eager_api_read_next_tensor_list(PyObject* self,
                                                 PyObject* args,
297
                                                 PyObject* kwargs) {
298
  EAGER_TRY
299 300
  auto tensor_base_list =
      CastPyArg2VectorOfTensorBase(PyTuple_GET_ITEM(args, 0), 0);
301
  std::vector<paddle::Tensor> tensor_list;
302 303 304
  {
    eager_gil_scoped_release guard;
    tensor_list.reserve(tensor_base_list.size());
305
    auto func = [](phi::DenseTensor& tensor_base) {
306
      paddle::Tensor tensor(egr::Controller::Instance().GenerateUniqueName());
307 308 309 310 311 312 313 314 315
      auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
      autograd_meta->SetPersistable(false);
      autograd_meta->SetStopGradient(true);
      tensor.set_impl(std::make_shared<phi::DenseTensor>(tensor_base));
      return tensor;
    };
    for (auto& tensor_base : tensor_base_list) {
      tensor_list.emplace_back(func(tensor_base));
    }
316
  }
317
  return ToPyObject(tensor_list);
318 319 320
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
static void ConstructFwdAndBwdMap(
    const std::vector<paddle::OpMetaInfo>& vec_map,
    const std::string& op_type) {
  auto& in_out_map = egr::Controller::Instance().GetCustomEdgesSlotMap();
  if (in_out_map.find(op_type) != in_out_map.end()) {
    VLOG(7) << "Find Exist CustomEdgesSlotMap Skip >>>> ";
    return;
  } else {
    VLOG(7) << "Construct CustomEdgesSlotMap ";
    auto inputs_names =
        paddle::framework::OpMetaInfoHelper::GetInputs(vec_map[0]);
    auto outputs_names =
        paddle::framework::OpMetaInfoHelper::GetOutputs(vec_map[0]);
    auto attrs_names =
        paddle::framework::OpMetaInfoHelper::GetAttrs(vec_map[0]);
    auto grad_outputs_names =
        paddle::framework::OpMetaInfoHelper::GetOutputs(vec_map[1]);
    auto grad_inputs_names =
        paddle::framework::OpMetaInfoHelper::GetInputs(vec_map[1]);
    auto grad_attrs_names =
        paddle::framework::OpMetaInfoHelper::GetAttrs(vec_map[1]);
    std::vector<std::unordered_map<int, int>> res(5);
343 344

    in_out_map.insert({op_type, {res}});
345 346 347
    // Prepare pos map for grad_outputs
    VLOG(7) << "Prepare pos map for grad_outputs";
    PADDLE_ENFORCE_LE(
348 349
        grad_outputs_names.size(),
        inputs_names.size(),
350 351 352 353 354
        paddle::platform::errors::InvalidArgument(
            "Grad outputs num should be less equal than forward inputs num."));
    for (size_t i = 0; i < grad_outputs_names.size(); i++) {
      size_t end = grad_outputs_names[i].find("@GRAD");
      PADDLE_ENFORCE_NE(
355 356
          end,
          std::string::npos,
357 358 359 360 361 362 363 364 365
          paddle::platform::errors::NotFound(
              "All Grad outputs should be grad and we got %s is not grad var, "
              "please check your op and change to fit the rule.",
              grad_outputs_names[i]));
      for (size_t j = 0; j < inputs_names.size(); j++) {
        if (grad_outputs_names[i].substr(0, end) == inputs_names[j]) {
          VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                  << " inputs: " << inputs_names[j] << " related to No." << i
                  << " grad_outputs: " << grad_outputs_names[i];
366
          in_out_map[op_type][0][0][j] = i;
367 368 369 370 371 372 373 374 375 376 377 378
        }
      }
    }
    // Prepare pos map for grad_inputs
    for (size_t i = 0; i < grad_inputs_names.size(); i++) {
      size_t end = grad_inputs_names[i].find("@GRAD");
      if (end != std::string::npos) {
        for (size_t j = 0; j < outputs_names.size(); j++) {
          if (grad_inputs_names[i].substr(0, end) == outputs_names[j]) {
            VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                    << " outputs: " << outputs_names[j] << " related to No."
                    << i << " grad_inputs's grad: " << grad_inputs_names[i];
379
            in_out_map[op_type][0][1][j] = i;
380 381 382
          }
        }
      } else {
383 384
        if (std::find(outputs_names.begin(),
                      outputs_names.end(),
385 386 387 388 389 390 391
                      grad_inputs_names[i]) != outputs_names.end()) {
          for (size_t j = 0; j < outputs_names.size(); j++) {
            if (grad_inputs_names[i] == outputs_names[j]) {
              VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                      << " outputs: " << outputs_names[j] << " related to No."
                      << i
                      << " grad_inputs fwd outputs: " << grad_inputs_names[i];
392
              in_out_map[op_type][0][2][j] = i;
393 394 395 396 397 398 399 400 401
            }
          }
        } else {
          for (size_t j = 0; j < inputs_names.size(); j++) {
            if (grad_inputs_names[i] == inputs_names[j]) {
              VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                      << " inputs: " << inputs_names[j] << " related to No."
                      << i
                      << " grad_inputs fwd inputs: " << grad_inputs_names[i];
402
              in_out_map[op_type][0][3][j] = i;
403 404 405 406 407 408 409 410
            }
          }
        }
      }
    }

    // Prepare pos map for grad attrs_
    for (size_t i = 0; i < grad_attrs_names.size(); i++) {
411 412 413 414
      auto end = std::find(
          attrs_names.begin(), attrs_names.end(), grad_attrs_names[i]);
      PADDLE_ENFORCE_NE(end,
                        attrs_names.end(),
415 416 417 418 419 420 421 422 423 424
                        paddle::platform::errors::NotFound(
                            "All Grad attrs should be one of forward attrs and "
                            "we got %s is not one of them, please check your "
                            "op and change to fit the rule.",
                            grad_attrs_names[i]));
      for (size_t j = 0; j < attrs_names.size(); j++) {
        if (grad_attrs_names[i] == attrs_names[j]) {
          VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                  << " attrs: " << attrs_names[j] << " related to No." << i
                  << " grad_attrs: " << grad_attrs_names[i];
425
          in_out_map[op_type][0][4][j] = i;
426 427 428 429 430 431
        }
      }
    }
  }
}

H
HongyuJia 已提交
432
static std::vector<paddle::any> CastAttrsToTargetType(
433 434 435
    const std::vector<paddle::any>& src,
    const std::vector<std::string>& attrs_names) {
  std::vector<paddle::any> res;
436 437
  PADDLE_ENFORCE_EQ(src.size(),
                    attrs_names.size(),
438 439 440 441
                    paddle::platform::errors::InvalidArgument(
                        "We Expected same size of attrs and attrs_name list, "
                        "if u got this error indicate your custom op setting "
                        "%s attrs, but you just give %s",
442 443
                        attrs_names.size(),
                        src.size()));
444 445
  for (size_t i = 0; i < src.size(); i++) {
    size_t end = attrs_names[i].find(": ");
446
    std::string type_name = attrs_names[i].substr(end + 2);
447 448 449 450 451 452 453 454 455
    if (type_name == "int") {
      if (src[i].type() == typeid(bool)) {
        res.emplace_back(static_cast<int>(paddle::any_cast<bool>(src[i])));
      } else if (src[i].type() == typeid(int)) {
        res.emplace_back(src[i]);
      } else {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "Your No. %s attrs should only can be bool or int32, other type is "
            "forbidden for now but we got %s. Check your code first please",
456 457
            i,
            src[i].type().name()));
458 459 460 461 462 463 464 465 466 467 468 469 470
      }
    } else if (type_name == "int64_t") {
      if (src[i].type() == typeid(bool)) {
        res.emplace_back(static_cast<int64_t>(paddle::any_cast<bool>(src[i])));
      } else if (src[i].type() == typeid(int)) {
        res.emplace_back(static_cast<int64_t>(paddle::any_cast<int>(src[i])));
      } else if (src[i].type() == typeid(int64_t)) {
        res.emplace_back(src[i]);
      } else {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "Your No. %s attrs should only can be bool or int32 or int64_t, "
            "other type is forbidden for now but we got %s. Check your code "
            "first please",
471 472
            i,
            src[i].type().name()));
473 474 475 476 477 478 479 480
      }
    } else {
      res.emplace_back(src[i]);
    }
  }
  return res;
}

481 482 483 484
static PyObject* eager_api_jit_function_call(PyObject* self,
                                             PyObject* args,
                                             PyObject* kwargs) {
  EAGER_TRY
485 486 487

  std::shared_ptr<jit::Function> function =
      CastPyArg2JitFunction(PyTuple_GET_ITEM(args, 0), 0);
488
  std::vector<paddle::Tensor> ins =
489
      CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
490
  std::vector<paddle::Tensor> outs;
W
wanghuancoder 已提交
491 492 493 494
  {
    eager_gil_scoped_release guard;
    outs = (*function)(ins);
  }
495 496 497 498
  return ToPyObject(outs);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

H
HongyuJia 已提交
499
static PyObject* eager_api_run_custom_op(PyObject* self,
500
                                         PyObject* args,
501 502
                                         PyObject* kwargs) {
  EAGER_TRY
503
  FLAGS_tensor_operants_mode = "phi";
504 505
  if (paddle::OperantsManager::Instance().phi_operants.get() == nullptr) {
    paddle::OperantsManager::Instance().phi_operants.reset(
506 507 508 509
        new paddle::operants::PhiTensorOperants());
    VLOG(4) << "Initialize phi tensor operants successfully";
  }

510 511 512 513
  paddle::CustomOpKernelContext ctx =
      CastPyArg2CustomOpKernelContext(PyTuple_GET_ITEM(args, 0), 0);
  std::string op_type = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 1), 1);
  bool trace_backward = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
W
wanghuancoder 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
  {
    eager_gil_scoped_release guard;
    VLOG(7) << "Get things for python for Custom Op: " << op_type
            << ", trace_backward is: " << trace_backward;
    auto meta_info_map = egr::Controller::Instance().GetOpMetaInfoMap();
    PADDLE_ENFORCE_NE(
        meta_info_map.find(op_type),
        meta_info_map.end(),
        paddle::platform::errors::NotFound(
            "Can't find %s in Eager OpMetaInfoMap which should be "
            "created by LoadOpMetaInfoAndRegisterOp, please make "
            "sure you registered your op first and try again. ",
            op_type));
    VLOG(7) << "Run Kernel of Custom Op: " << op_type;
    std::vector<paddle::any> res_attrs =
H
HongyuJia 已提交
529
        CastAttrsToTargetType(ctx.Attrs(),
W
wanghuancoder 已提交
530 531 532 533
                              paddle::framework::OpMetaInfoHelper::GetAttrs(
                                  meta_info_map.at(op_type)[0]));
    ctx.EmplaceBackAttrs(res_attrs);
    const auto& vec_map = meta_info_map.at(op_type);
534 535 536 537 538 539 540 541 542 543

    // handle inplace case
    const auto& inputs = paddle::framework::OpMetaInfoHelper::GetInputs(
        meta_info_map.at(op_type)[0]);
    const auto& outputs = paddle::framework::OpMetaInfoHelper::GetOutputs(
        meta_info_map.at(op_type)[0]);
    const auto& inplace_map =
        paddle::framework::OpMetaInfoHelper::GetInplaceMap(
            meta_info_map.at(op_type)[0]);
    ctx.MapPlainOutputs(inputs, outputs, inplace_map);
W
wanghuancoder 已提交
544
    (*paddle::framework::OpMetaInfoHelper::GetKernelFn(vec_map[0]))(&ctx);
545
    ctx.AssignInplaceOutputs();
W
wanghuancoder 已提交
546 547 548 549 550 551 552 553 554 555 556 557 558

    VLOG(7) << "Get AutogradMeta for inputs and outputs for Custom Op";
    std::vector<std::vector<egr::AutogradMeta*>> ins_auto_grad_metas;
    std::vector<std::vector<egr::AutogradMeta*>> outs_auto_grad_metas;
    VLOG(7) << "We got slot num of ins is: " << ctx.InputRange().size();
    ins_auto_grad_metas.resize(ctx.InputRange().size());
    VLOG(7) << "We got slot num of outs is: " << ctx.OutputRange().size();
    outs_auto_grad_metas.resize(ctx.OutputRange().size());

    for (size_t i = 0; i < ctx.InputRange().size(); i++) {
      ins_auto_grad_metas[i] =
          egr::EagerUtils::nullable_autograd_meta(ctx.InputsBetween(
              ctx.InputRangeAt(i).first, ctx.InputRangeAt(i).second));
559
    }
W
wanghuancoder 已提交
560 561 562 563
    for (size_t i = 0; i < ctx.OutputRange().size(); i++) {
      outs_auto_grad_metas[i] =
          egr::EagerUtils::unsafe_autograd_meta(ctx.OutputsBetweeen(
              ctx.OutputRangeAt(i).first, ctx.OutputRangeAt(i).second));
564
    }
W
wanghuancoder 已提交
565 566 567 568 569
    bool require_any_grad = false;
    for (size_t i = 0; i < ins_auto_grad_metas.size(); i++) {
      require_any_grad =
          require_any_grad || egr::EagerUtils::ComputeRequireGrad(
                                  trace_backward, &(ins_auto_grad_metas[i]));
570
    }
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590

    // handle inplace case
    for (size_t i = 0; i < ctx.InputRange().size(); i++) {
      if (inplace_map.find(inputs[i]) != inplace_map.end()) {
        size_t input_size =
            ctx.InputRangeAt(i).second - ctx.InputRangeAt(i).first;
        size_t start_idx = ctx.InputRangeAt(i).first;
        for (size_t j = 0; j < input_size; j++) {
          egr::EagerUtils::CheckInplace(ctx.InputAt(start_idx + j),
                                        ins_auto_grad_metas[i][j],
                                        require_any_grad);
          // Bump Inplace Version
          ctx.MutableInputAt(start_idx + j).bump_inplace_version();
          VLOG(3) << "Custom operator: Tensor("
                  << ctx.InputAt(start_idx + j).name()
                  << ") uses Inplace Strategy.";
        }
      }
    }

W
wanghuancoder 已提交
591 592 593 594 595 596
    if (require_any_grad && (vec_map.size() > 1)) {
      VLOG(6) << " Construct Grad for Custom Op: " << op_type;
      ConstructFwdAndBwdMap(vec_map, op_type);
      for (size_t i = 0; i < outs_auto_grad_metas.size(); i++) {
        egr::EagerUtils::PassStopGradient(false, &(outs_auto_grad_metas[i]));
      }
597 598 599 600 601 602 603 604 605 606 607
      // Note(HongyuJia): In dygraph eager mode, CheckInplace makes sure leaf
      // nodes set stop_gradient=True. However, dygraph mode can also outputs
      // lead nodes' gradients (For example, we can get x.grad after x.add_(y)).
      // To be consistent with dygraph mode, we have to PassStopGradient for all
      // inplaced ins_auto_grad_metas.
      std::unordered_map<size_t, size_t> inplace_tensor_map =
          ctx.GetInplaceTensorMap();
      for (auto pair : inplace_tensor_map) {
        egr::EagerUtils::PassStopGradient(false,
                                          &(ins_auto_grad_metas[pair.first]));
      }
W
wanghuancoder 已提交
608 609 610 611 612 613 614
      auto grad_node = std::make_shared<egr::RunCustomOpNode>(
          outs_auto_grad_metas.size(), ins_auto_grad_metas.size(), op_type);
      auto slot_map =
          egr::Controller::Instance().GetCustomEdgesSlotMap().at(op_type);
      // Prepare Grad outputs
      size_t no_grad_cnt = 0;
      for (size_t i = 0; i < ins_auto_grad_metas.size(); i++) {
615 616
        const std::vector<paddle::Tensor>& in_tensors = ctx.InputsBetween(
            ctx.InputRangeAt(i).first, ctx.InputRangeAt(i).second);
W
wanghuancoder 已提交
617 618 619 620 621 622 623 624 625 626 627

        if (slot_map[0][0].find(i) != slot_map[0][0].end()) {
          grad_node->SetGradOutMeta(in_tensors, slot_map[0][0][i]);
        } else {
          grad_node->SetGradOutMeta(
              in_tensors, ins_auto_grad_metas.size() - 1 - no_grad_cnt);
          no_grad_cnt++;
        }
      }
      // Prepare Grad inputs with grad of fwd outputs
      for (size_t i = 0; i < outs_auto_grad_metas.size(); i++) {
628 629
        const std::vector<paddle::Tensor>& out_tensors = ctx.OutputsBetweeen(
            ctx.OutputRangeAt(i).first, ctx.OutputRangeAt(i).second);
W
wanghuancoder 已提交
630 631 632 633 634

        egr::EagerUtils::SetOutRankWithSlot(&(outs_auto_grad_metas[i]), i);
        egr::EagerUtils::SetHistory(&(outs_auto_grad_metas[i]), grad_node);
        grad_node->SetGradInMeta(out_tensors, i);
      }
635

W
wanghuancoder 已提交
636 637 638 639 640 641 642 643 644
      // Prepare Grad inputs with fwd outputs
      for (auto it = slot_map[0][2].begin(); it != slot_map[0][2].end(); it++) {
        VLOG(7) << "Prepare fwd_outs: " << it->first
                << " to grad_inputs: " << it->second;
        grad_node->fwd_outs[it->second] =
            egr::RunCustomOpNode::ConstructTensorWrapper(
                ctx.OutputsBetweeen(ctx.OutputRangeAt(it->first).first,
                                    ctx.OutputRangeAt(it->first).second));
      }
645

W
wanghuancoder 已提交
646 647 648 649 650 651 652 653 654
      // Prepare Grad inputs with fwd inputs
      for (auto it = slot_map[0][3].begin(); it != slot_map[0][3].end(); it++) {
        VLOG(7) << "Prepare fwd_ins: " << it->first
                << " to grad_inputs: " << it->second;
        grad_node->fwd_ins[it->second] =
            egr::RunCustomOpNode::ConstructTensorWrapper(
                ctx.InputsBetween(ctx.InputRangeAt(it->first).first,
                                  ctx.InputRangeAt(it->first).second));
      }
655

W
wanghuancoder 已提交
656 657 658 659 660 661 662 663 664 665
      auto attrs_names = paddle::framework::OpMetaInfoHelper::GetAttrs(
          meta_info_map.at(op_type)[1]);
      std::vector<paddle::any> attrs(attrs_names.size());
      // Prepare attrs for Grad node
      for (auto it = slot_map[0][4].begin(); it != slot_map[0][4].end(); it++) {
        VLOG(7) << "Prepare fwd attrs: " << it->first
                << " to grad_attrs: " << it->second;
        attrs[it->second] = res_attrs[it->first];
      }
      grad_node->SetAttrs(attrs);
666 667
    }
  }
668
  RETURN_PY_NONE
669 670 671
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

672 673
static PyObject* eager_api_sparse_coo_tensor(PyObject* self,
                                             PyObject* args,
674 675 676 677 678 679
                                             PyObject* kwargs) {
  EAGER_TRY
  auto non_zero_indices = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
  auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1);
  auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 2), 2);
  auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
680
  paddle::Tensor tensor;
W
wanghuancoder 已提交
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
  {
    eager_gil_scoped_release guard;
    PADDLE_ENFORCE(non_zero_indices.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the non-zero indices must be a DenseTensor."));
    PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the non-zero elements must be a DenseTensor."));
    auto dense_indices =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_indices.impl());
    auto dense_elements =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
    // TODO(zhangkaihuo): After creating SparseCooTensor, call coalesced() to
    // sort and merge duplicate indices
    std::shared_ptr<phi::SparseCooTensor> coo_tensor =
        std::make_shared<phi::SparseCooTensor>(
            *dense_indices, *dense_elements, phi::make_ddim(dense_shape));
    tensor.set_impl(coo_tensor);
    auto name =
        egr::Controller::Instance().GenerateUniqueName("generated_tensor");
    tensor.set_name(name);
    auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
    autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
    if (!autograd_meta->GetMutableGradNode()) {
      VLOG(3) << "Tensor(" << name
              << ") doesn't have GradNode, add GradNodeAccumulation to it.";
      autograd_meta->SetGradNode(
          std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
    }
710 711 712 713 714
  }
  return ToPyObject(tensor);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

715 716
static PyObject* eager_api_sparse_csr_tensor(PyObject* self,
                                             PyObject* args,
717 718 719 720 721 722 723
                                             PyObject* kwargs) {
  EAGER_TRY
  auto non_zero_crows = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
  auto non_zero_cols = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1);
  auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 2), 2);
  auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 3), 3);
  auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
724
  paddle::Tensor tensor;
W
wanghuancoder 已提交
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
  {
    eager_gil_scoped_release guard;
    PADDLE_ENFORCE(non_zero_crows.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the compressed non-zero rows must be a DenseTensor."));
    PADDLE_ENFORCE(non_zero_cols.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the non-zero cols must be a DenseTensor."));
    PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the non-zero elements must be a DenseTensor."));

    auto dense_crows =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_crows.impl());
    auto dense_cols =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_cols.impl());
    auto dense_elements =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
    std::shared_ptr<phi::SparseCsrTensor> csr_tensor =
        std::make_shared<phi::SparseCsrTensor>(*dense_crows,
                                               *dense_cols,
                                               *dense_elements,
                                               phi::make_ddim(dense_shape));
    tensor.set_impl(csr_tensor);
    auto name =
        egr::Controller::Instance().GenerateUniqueName("generated_tensor");
    tensor.set_name(name);
    auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
    autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
    if (!autograd_meta->GetMutableGradNode()) {
      VLOG(3) << "Tensor(" << name
              << ") have not GradNode, add GradNodeAccumulation for it.";
      autograd_meta->SetGradNode(
          std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
    }
760 761 762 763
  }
  return ToPyObject(tensor);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
764 765 766 767 768 769 770 771

static PyObject* eager_api_register_saved_tensors_hooks(PyObject* self,
                                                        PyObject* args,
                                                        PyObject* kwargs) {
  EAGER_TRY
  if (egr::Controller::Instance().HasGrad()) {
    auto pack_hook = PyTuple_GET_ITEM(args, 0);
    auto unpack_hook = PyTuple_GET_ITEM(args, 1);
772 773 774
    egr::SavedTensorsHooks::GetInstance().SetHooks(
        std::make_shared<PackHook>(pack_hook),
        std::make_shared<UnPackHook>(unpack_hook));
775 776 777 778 779 780 781 782 783 784 785 786 787 788
  }
  RETURN_PY_NONE
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

static PyObject* eager_api_reset_saved_tensors_hooks(PyObject* self,
                                                     PyObject* args,
                                                     PyObject* kwargs) {
  EAGER_TRY
  egr::SavedTensorsHooks::GetInstance().ResetHooks();
  RETURN_PY_NONE
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

W
wanghuancoder 已提交
789
#if defined(PADDLE_WITH_CUDA)
790 791
static PyObject* eager_api_async_read(PyObject* self,
                                      PyObject* args,
W
wanghuancoder 已提交
792 793 794 795 796 797 798 799
                                      PyObject* kwargs) {
  EAGER_TRY
  auto& src = GetTensorFromArgs("async_read", "src", args, 0, false);
  auto& dst = GetTensorFromArgs("async_read", "dst", args, 1, false);
  auto& index = GetTensorFromArgs("async_read", "index", args, 2, false);
  auto& buffer = GetTensorFromArgs("async_read", "buffer", args, 3, false);
  auto& offset = GetTensorFromArgs("async_read", "offset", args, 4, false);
  auto& count = GetTensorFromArgs("async_read", "count", args, 5, false);
W
wanghuancoder 已提交
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
  {
    eager_gil_scoped_release guard;
    PADDLE_ENFORCE_EQ(
        src.is_gpu_pinned(),
        true,
        platform::errors::InvalidArgument("Required `src` device should be "
                                          "CUDAPinnedPlace, but received %d.",
                                          src.place()));
    PADDLE_ENFORCE_EQ(
        dst.is_gpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `dst` device should be CUDAPlace, but received %d.",
            dst.place()));
    PADDLE_ENFORCE_EQ(
        index.is_cpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `index` device should be CPUPlace, but received %d.",
            index.place()));
    PADDLE_ENFORCE_EQ(buffer.is_gpu_pinned(),
                      true,
W
wanghuancoder 已提交
822
                      platform::errors::InvalidArgument(
W
wanghuancoder 已提交
823 824 825 826 827 828 829 830 831
                          "Required `buffer` device should be CUDAPinnedPlace, "
                          "but received %d.",
                          buffer.place()));
    PADDLE_ENFORCE_EQ(
        offset.is_cpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `offset` device should be CPUPlace, but received %d.",
            offset.place()));
W
wanghuancoder 已提交
832
    PADDLE_ENFORCE_EQ(
W
wanghuancoder 已提交
833 834
        count.is_cpu(),
        true,
W
wanghuancoder 已提交
835
        platform::errors::InvalidArgument(
W
wanghuancoder 已提交
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
            "Required `count` device should be CPUPlace, but received %d.",
            count.place()));

    auto& src_tensor = src;
    auto* dst_tensor = &dst;
    auto& index_tensor = index;
    auto* buffer_tensor = &buffer;
    auto& offset_tensor = offset;
    auto& count_tensor = count;
    auto* dst_data = dst_tensor->mutable_data<float>(dst.place());
    const auto& deviceId = paddle::platform::GetCurrentDeviceId();

    PADDLE_ENFORCE_EQ(src_tensor.dims().size(),
                      dst_tensor->dims().size(),
                      platform::errors::InvalidArgument(
                          "`src` and `dst` should have same tensor shape, "
                          "except for the first dimension."));
    PADDLE_ENFORCE_EQ(src_tensor.dims().size(),
                      buffer_tensor->dims().size(),
                      platform::errors::InvalidArgument(
                          "`src` and `buffer` should have same tensor shape, "
                          "except for the first dimension."));
    for (int i = 1; i < src_tensor.dims().size(); i++) {
      PADDLE_ENFORCE_EQ(
          src_tensor.dims()[i],
          dst_tensor->dims()[i],
          platform::errors::InvalidArgument(
              "`src` and `dst` should have the same tensor shape, "
              "except for the first dimension."));
      PADDLE_ENFORCE_EQ(
          src_tensor.dims()[i],
          buffer_tensor->dims()[i],
          platform::errors::InvalidArgument(
              "`src` and `buffer` should have the same tensor shape, "
              "except for the first dimension."));
    }
    PADDLE_ENFORCE_EQ(index_tensor.dims().size(),
                      1,
                      platform::errors::InvalidArgument(
                          "`index` tensor should be one-dimensional."));

    auto stream = paddle::platform::get_current_stream(deviceId)->raw_stream();

    int64_t numel = 0;  // total copy length
    int64_t copy_flag = offset_tensor.dims()[0];
    int64_t size = src_tensor.numel() / src_tensor.dims()[0];

    if (copy_flag != 0) {
      PADDLE_ENFORCE_EQ(offset_tensor.dims().size(),
                        1,
                        platform::errors::InvalidArgument(
                            "`offset` tensor should be one-dimensional."));
      PADDLE_ENFORCE_EQ(count_tensor.dims().size(),
                        1,
                        platform::errors::InvalidArgument(
                            "`count` tensor should be one-dimensional."));
      PADDLE_ENFORCE_EQ(offset_tensor.numel(),
                        count_tensor.numel(),
                        platform::errors::InvalidArgument(
                            "`offset` and `count` tensor size dismatch."));
      auto* offset_data = offset_tensor.data<int64_t>();
      auto* count_data = count_tensor.data<int64_t>();
      for (int64_t i = 0; i < count_tensor.numel(); i++) {
        numel += count_data[i];
      }
      PADDLE_ENFORCE_LE(numel + index_tensor.numel(),
                        buffer_tensor->dims()[0],
                        platform::errors::InvalidArgument(
                            "Buffer tensor size is too small."));
      PADDLE_ENFORCE_LE(numel + index_tensor.numel(),
                        dst_tensor->dims()[0],
                        platform::errors::InvalidArgument(
                            "Target tensor size is too small."));

      int64_t src_offset, dst_offset = 0, c;
      auto* src_data = src_tensor.data<float>();
      for (int64_t i = 0; i < offset_tensor.numel(); i++) {
        src_offset = offset_data[i], c = count_data[i];
        PADDLE_ENFORCE_LE(src_offset + c,
                          src_tensor.dims()[0],
                          platform::errors::InvalidArgument(
                              "Invalid offset or count index."));
        PADDLE_ENFORCE_LE(dst_offset + c,
                          dst_tensor->dims()[0],
                          platform::errors::InvalidArgument(
                              "Invalid offset or count index."));
        cudaMemcpyAsync(dst_data + (dst_offset * size),
                        src_data + (src_offset * size),
                        c * size * sizeof(float),
                        cudaMemcpyHostToDevice,
                        stream);
        dst_offset += c;
      }
    } else {
      PADDLE_ENFORCE_LE(index_tensor.numel(),
                        buffer_tensor->dims()[0],
                        platform::errors::InvalidArgument(
                            "Buffer tensor size is too small."));
    }

    // Select the index data to the buffer
937 938 939
    auto index_select = [](const paddle::Tensor& src_tensor,
                           const paddle::Tensor& index_tensor,
                           paddle::Tensor* buffer_tensor) {
W
wanghuancoder 已提交
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
      auto* src_data = src_tensor.data<float>();
      auto* index_data = index_tensor.data<int64_t>();
      auto* buffer_data = buffer_tensor->data<float>();
      const int& slice_size = src_tensor.numel() / src_tensor.dims()[0];
      const int& copy_bytes = slice_size * sizeof(float);
      int64_t c = 0;
      for (int64_t i = 0; i < index_tensor.numel(); i++) {
        std::memcpy(buffer_data + c * slice_size,
                    src_data + index_data[i] * slice_size,
                    copy_bytes);
        c += 1;
      }
    };
    index_select(src_tensor, index_tensor, buffer_tensor);

    // Copy the data to device memory
    cudaMemcpyAsync(dst_data + (numel * size),
                    buffer_tensor->data<float>(),
                    index_tensor.numel() * size * sizeof(float),
                    cudaMemcpyHostToDevice,
                    stream);
W
wanghuancoder 已提交
961
  }
W
wanghuancoder 已提交
962 963 964
  RETURN_PY_NONE
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
W
wanghuancoder 已提交
965

W
wanghuancoder 已提交
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
static PyObject* eager_api_async_write(PyObject* self,
                                       PyObject* args,
                                       PyObject* kwargs) {
  EAGER_TRY
  auto& src = GetTensorFromArgs("async_write", "src", args, 0, false);
  auto& dst = GetTensorFromArgs("async_write", "dst", args, 1, false);
  auto& offset = GetTensorFromArgs("async_write", "offset", args, 2, false);
  auto& count = GetTensorFromArgs("async_write", "count", args, 3, false);
  {
    eager_gil_scoped_release guard;
    PADDLE_ENFORCE_EQ(
        src.is_gpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `src` device should be CUDAPlace, but received %d. ",
            src.place()));
    PADDLE_ENFORCE_EQ(dst.is_gpu_pinned(),
                      true,
                      platform::errors::InvalidArgument(
                          "Required `dst` device should be CUDAPinnedPlace, "
                          "but received %d. ",
                          dst.place()));
    PADDLE_ENFORCE_EQ(
        offset.is_cpu(),
        true,
        platform::errors::InvalidArgument("Required `offset` device should "
                                          "be CPUPlace, but received %d. ",
                                          offset.place()));
    PADDLE_ENFORCE_EQ(
        count.is_cpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `count` device should be CPUPlace, but received %d. ",
            count.place()));
W
wanghuancoder 已提交
1000

W
wanghuancoder 已提交
1001 1002 1003 1004 1005 1006 1007
    // TODO(daisiming): In future, add index as arguments following
    // async_read.
    auto& src_tensor = src;
    auto* dst_tensor = &dst;
    auto& offset_tensor = offset;
    auto& count_tensor = count;
    const auto& deviceId = paddle::platform::GetCurrentDeviceId();
W
wanghuancoder 已提交
1008

1009 1010
    PADDLE_ENFORCE_EQ(offset_tensor.dims().size(),
                      1,
W
wanghuancoder 已提交
1011 1012
                      platform::errors::InvalidArgument(
                          "`offset` tensor should be one-dimensional."));
1013 1014
    PADDLE_ENFORCE_EQ(count_tensor.dims().size(),
                      1,
W
wanghuancoder 已提交
1015 1016
                      platform::errors::InvalidArgument(
                          "`count` tensor should be one-dimensional."));
1017 1018
    PADDLE_ENFORCE_EQ(offset_tensor.numel(),
                      count_tensor.numel(),
W
wanghuancoder 已提交
1019 1020
                      platform::errors::InvalidArgument(
                          "`offset` and `count` tensor size dismatch."));
W
wanghuancoder 已提交
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
    PADDLE_ENFORCE_EQ(src_tensor.dims().size(),
                      dst_tensor->dims().size(),
                      platform::errors::InvalidArgument(
                          "`src` and `dst` should have the same tensor shape, "
                          "except for the first dimension."));
    for (int i = 1; i < src_tensor.dims().size(); i++) {
      PADDLE_ENFORCE_EQ(
          src_tensor.dims()[i],
          dst_tensor->dims()[i],
          platform::errors::InvalidArgument(
              "`src` and `dst` should have the same tensor shape, "
              "except for the first dimension."));
W
wanghuancoder 已提交
1033 1034
    }

W
wanghuancoder 已提交
1035 1036 1037
    auto stream = paddle::platform::get_current_stream(deviceId)->raw_stream();

    int64_t size = src_tensor.numel() / src_tensor.dims()[0];
W
wanghuancoder 已提交
1038
    auto* src_data = src_tensor.data<float>();
W
wanghuancoder 已提交
1039 1040 1041 1042
    auto* dst_data = dst_tensor->data<float>();
    const int64_t* offset_data = offset_tensor.data<int64_t>();
    const int64_t* count_data = count_tensor.data<int64_t>();
    int64_t src_offset = 0, dst_offset, c;
W
wanghuancoder 已提交
1043
    for (int64_t i = 0; i < offset_tensor.numel(); i++) {
W
wanghuancoder 已提交
1044
      dst_offset = offset_data[i], c = count_data[i];
W
wanghuancoder 已提交
1045
      PADDLE_ENFORCE_LE(
1046 1047
          src_offset + c,
          src_tensor.dims()[0],
W
wanghuancoder 已提交
1048
          platform::errors::InvalidArgument("Invalid offset or count index"));
W
wanghuancoder 已提交
1049
      PADDLE_ENFORCE_LE(
1050 1051
          dst_offset + c,
          dst_tensor->dims()[0],
W
wanghuancoder 已提交
1052
          platform::errors::InvalidArgument("Invalid offset or count index"));
W
wanghuancoder 已提交
1053
      cudaMemcpyAsync(dst_data + (dst_offset * size),
1054 1055
                      src_data + (src_offset * size),
                      c * size * sizeof(float),
W
wanghuancoder 已提交
1056
                      cudaMemcpyDeviceToHost,
1057
                      stream);
W
wanghuancoder 已提交
1058
      src_offset += c;
W
wanghuancoder 已提交
1059 1060
    }
  }
1061
  RETURN_PY_NONE
W
wanghuancoder 已提交
1062 1063
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
1064

1065 1066
static PyObject* eager_api_to_uva_tensor(PyObject* self,
                                         PyObject* args,
1067 1068 1069
                                         PyObject* kwargs) {
  EAGER_TRY
  VLOG(4) << "Running in eager_api_to_uva_tensor.";
1070 1071
  auto new_tensor = std::shared_ptr<paddle::Tensor>(
      new paddle::Tensor(egr::Controller::Instance().GenerateUniqueName()));
1072 1073 1074
  PyObject* obj = PyTuple_GET_ITEM(args, 0);
  auto array = py::cast<py::array>(py::handle(obj));

1075 1076 1077 1078 1079 1080 1081
  Py_ssize_t args_num = PyTuple_Size(args);
  int64_t device_id = 0;
  if (args_num > 1) {
    PyObject* Py_device_id = PyTuple_GET_ITEM(args, 1);
    if (Py_device_id) {
      device_id = CastPyArg2AttrLong(Py_device_id, 1);
    }
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
  }

  if (py::isinstance<py::array_t<int32_t>>(array)) {
    SetUVATensorFromPyArray<int32_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<int64_t>>(array)) {
    SetUVATensorFromPyArray<int64_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<float>>(array)) {
    SetUVATensorFromPyArray<float>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<double>>(array)) {
    SetUVATensorFromPyArray<double>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<int8_t>>(array)) {
    SetUVATensorFromPyArray<int8_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<int16_t>>(array)) {
    SetUVATensorFromPyArray<int16_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<paddle::platform::float16>>(array)) {
1097 1098
    SetUVATensorFromPyArray<paddle::platform::float16>(
        new_tensor, array, device_id);
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
  } else if (py::isinstance<py::array_t<bool>>(array)) {
    SetUVATensorFromPyArray<bool>(new_tensor, array, device_id);
  } else {
    // obj may be any type, obj.cast<py::array>() may be failed,
    // then the array.dtype will be string of unknown meaning.
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Input object type error or incompatible array data type. "
        "tensor.set() supports array with bool, float16, float32, "
        "float64, int8, int16, int32, int64,"
        "please check your input or input array data type."));
  }
  return ToPyObject(*(new_tensor.get()));
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
W
wanghuancoder 已提交
1113
#endif
1114

1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
static PyObject* eager_api__add_backward_final_hook(PyObject* self,
                                                    PyObject* args,
                                                    PyObject* kwargs) {
  EAGER_TRY
  PyObject* hook_func = PyTuple_GET_ITEM(args, 0);
  egr::Controller::Instance().RegisterBackwardFinalHook(
      std::make_shared<PyVoidHook>(hook_func));
  RETURN_PY_NONE
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

1126
PyMethodDef variable_functions[] = {
1127
    // TODO(jiabin): Remove scale when we have final state tests
1128 1129 1130 1131
    {"scale",
     (PyCFunction)(void (*)(void))eager_api_scale,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1132 1133 1134 1135
    {"_add_backward_final_hook",
     (PyCFunction)(void (*)(void))eager_api__add_backward_final_hook,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1136 1137 1138 1139
    {"run_backward",
     (PyCFunction)(void (*)(void))eager_api_run_backward,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1140 1141
    {"run_partial_grad",
     (PyCFunction)(void (*)(void))eager_api_run_partial_grad,
1142 1143 1144
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"_run_custom_op",
H
HongyuJia 已提交
1145
     (PyCFunction)(void (*)(void))eager_api_run_custom_op,
1146 1147 1148 1149 1150 1151
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"tensor_copy",
     (PyCFunction)(void (*)(void))eager_api_tensor_copy,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1152 1153 1154 1155
    {"get_all_grads",
     (PyCFunction)(void (*)(void))eager_api_get_all_grads,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1156 1157 1158 1159
    {"get_grads_lists",
     (PyCFunction)(void (*)(void))eager_api_get_grads_lists,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1160 1161 1162 1163
    {"get_grads_types",
     (PyCFunction)(void (*)(void))eager_api_get_grads_types,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1164 1165
    {"read_next_tensor_list",
     (PyCFunction)(void (*)(void))eager_api_read_next_tensor_list,
1166 1167
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1168 1169 1170 1171
    {"jit_function_call",
     (PyCFunction)(void (*)(void))eager_api_jit_function_call,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1172 1173 1174
    /**sparse functions**/
    {"sparse_coo_tensor",
     (PyCFunction)(void (*)(void))eager_api_sparse_coo_tensor,
1175 1176
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1177 1178
    {"sparse_csr_tensor",
     (PyCFunction)(void (*)(void))eager_api_sparse_csr_tensor,
1179 1180
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1181 1182 1183 1184 1185 1186 1187 1188
    {"register_saved_tensors_hooks",
     (PyCFunction)(void (*)(void))eager_api_register_saved_tensors_hooks,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"reset_saved_tensors_hooks",
     (PyCFunction)(void (*)(void))eager_api_reset_saved_tensors_hooks,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1189
/**sparse functions**/
W
wanghuancoder 已提交
1190
#if defined(PADDLE_WITH_CUDA)
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
    {"async_read",
     (PyCFunction)(void (*)(void))eager_api_async_read,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"async_write",
     (PyCFunction)(void (*)(void))eager_api_async_write,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"to_uva_tensor",
     (PyCFunction)(void (*)(void))eager_api_to_uva_tensor,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
W
wanghuancoder 已提交
1203
#endif
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
    {NULL, NULL, 0, NULL}};

void BindFunctions(PyObject* module) {
  if (PyModule_AddFunctions(module, variable_functions) < 0) {
    PADDLE_THROW(platform::errors::Fatal(
        "Init Paddle erroe in BindFunctions(PyModule_AddFunctions)."));
    return;
  }
}

}  // namespace pybind
}  // namespace paddle