eager_functions.cc 50.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// disable numpy compile error
12 13 14 15 16 17

#if defined(_MSC_VER)
#include <BaseTsd.h>
typedef SSIZE_T ssize_t;
#endif

18
#include <Python.h>
19 20 21 22
// Avoid a problem with copysign defined in pyconfig.h on Windows.
#ifdef copysign
#undef copysign
#endif
23 24

#include <string>
25
#include <unordered_map>
26 27 28 29 30 31
#include <vector>

#include "paddle/fluid/eager/accumulation/accumulation_node.h"
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/backward.h"
32
#include "paddle/fluid/eager/custom_operator/custom_operator_node.h"
33
#include "paddle/fluid/eager/utils.h"
34
#include "paddle/fluid/framework/convert_utils.h"
35
#include "paddle/fluid/framework/custom_operator.h"
36
#include "paddle/fluid/framework/phi_utils.h"
37
#include "paddle/fluid/framework/python_headers.h"
38 39
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memcpy.h"
W
wanghuancoder 已提交
40
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
41
#include "paddle/fluid/platform/dynload/dynamic_loader.h"
42 43 44 45
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h"
46
#include "paddle/fluid/pybind/tensor_py.h"
47
#include "paddle/phi/api/ext/op_meta_info.h"
48 49 50 51
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
52 53
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
54
#include "paddle/utils/string/string_helper.h"
55 56
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
57

L
Leo Chen 已提交
58 59 60 61
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/pybind/cuda_streams_py.h"
#endif

62
#include "gflags/gflags.h"
63
#include "paddle/phi/api/include/operants_manager.h"
64 65 66 67
#include "paddle/phi/api/include/tensor_operants.h"

DECLARE_string(tensor_operants_mode);

68 69 70 71 72
namespace paddle {
namespace pybind {

namespace py = ::pybind11;

73
extern PyTypeObject* p_tensor_type;
74 75
extern PyTypeObject* g_multidevicefeedreader_pytype;
extern PyTypeObject* g_orderedmultidevicefeedreader_pytype;
76 77 78 79 80 81 82 83 84 85 86

size_t PyArray_Size_(PyObject* numpy_data) {
  size_t res = 1;
  auto dims = pybind11::detail::array_proxy(numpy_data)->dimensions;
  auto nd = pybind11::detail::array_proxy(numpy_data)->nd;
  while (nd--) {
    res *= (*dims++);
  }
  return res;
}

87
class EagerNumpyAllocation : public phi::Allocation {
88
 public:
89
  explicit EagerNumpyAllocation(PyObject* numpy_data, phi::DataType dtype)
90 91
      : Allocation(
            static_cast<void*>(pybind11::detail::array_proxy(numpy_data)->data),
92
            phi::SizeOf(dtype) * PyArray_Size_(numpy_data),
93 94
            paddle::platform::CPUPlace()),
        arr_(numpy_data) {
95 96 97 98
    PADDLE_ENFORCE_NOT_NULL(
        arr_,
        platform::errors::InvalidArgument("The underlying PyObject pointer of "
                                          "numpy array cannot be nullptr"));
99
    PADDLE_ENFORCE_NE(
100 101
        arr_,
        Py_None,
102 103 104 105 106 107 108 109 110 111 112 113 114
        platform::errors::PreconditionNotMet(
            "The underlying PyObject pointer of numpy array cannot be None"));
    Py_INCREF(arr_);
  }
  ~EagerNumpyAllocation() override {
    py::gil_scoped_acquire gil;
    Py_DECREF(arr_);
  }

 private:
  PyObject* arr_;
};

115 116
static PyObject* eager_api_scale(PyObject* self,
                                 PyObject* args,
117 118 119
                                 PyObject* kwargs) {
  EAGER_TRY
  // TODO(jiabin): Sync Tensor and Variable here when we support
W
wanghuancoder 已提交
120 121 122 123 124 125 126

  auto& tensor =
      reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor;
  float scale = CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 1), 1);
  float bias = CastPyArg2AttrFloat(PyTuple_GET_ITEM(args, 2), 2);
  bool bias_after_scale = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
  bool trace_backward = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
127
  paddle::Tensor ret;
W
wanghuancoder 已提交
128 129 130 131
  {
    eager_gil_scoped_release guard;
    ret = egr::scale(tensor, scale, bias, bias_after_scale, trace_backward);
  }
132 133 134 135
  return ToPyObject(ret);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

136 137
static PyObject* eager_api_run_backward(PyObject* self,
                                        PyObject* args,
138 139
                                        PyObject* kwargs) {
  EAGER_TRY
140 141
  auto tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);
  auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
W
wanghuancoder 已提交
142
  bool retain_graph = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
143 144
  {
    eager_gil_scoped_release guard;
W
wanghuancoder 已提交
145
    egr::Backward(tensors, grad_tensors, retain_graph);
146
  }
147
  RETURN_PY_NONE
148 149 150
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

151 152
static PyObject* eager_api_run_partial_grad(PyObject* self,
                                            PyObject* args,
153 154 155 156 157 158 159 160 161 162
                                            PyObject* kwargs) {
  EAGER_TRY
  auto tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);
  auto inputs = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
  auto grad_tensors = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 2), 2);
  auto retain_graph = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
  auto create_graph = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
  auto only_inputs = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 5), 5);
  auto allow_unused = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 6), 6);
  auto no_grad_vars = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 7), 7);
163
  std::vector<paddle::Tensor> result;
164 165 166 167 168 169 170 171 172 173
  {
    eager_gil_scoped_release guard;
    result = egr::Grad(tensors,
                       inputs,
                       grad_tensors,
                       retain_graph,
                       create_graph,
                       only_inputs,
                       allow_unused,
                       no_grad_vars);
L
Leo Chen 已提交
174
    VLOG(4) << " in eager_api_run_partial_grad, after runing egr::Grad";
175
  }
176 177 178 179
  return ToPyObject(result, true /* return_py_none_if_not_initialize */);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

180 181
static PyObject* eager_api_tensor_copy(PyObject* self,
                                       PyObject* args,
182 183
                                       PyObject* kwargs) {
  EAGER_TRY
184
  paddle::Tensor& src =
185
      reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor;
186
  paddle::Tensor& dst =
187
      reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 1))->tensor;
188 189 190
  auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 2), 2);
  bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);

W
wanghuancoder 已提交
191 192 193 194 195 196 197 198
  {
    eager_gil_scoped_release guard;
    dst = src.copy_to(place, blocking);
    egr::EagerUtils::autograd_meta(&dst)->SetStopGradient(
        egr::EagerUtils::autograd_meta(&(src))->StopGradient());
    egr::EagerUtils::autograd_meta(&dst)->SetPersistable(
        egr::EagerUtils::autograd_meta(&(src))->Persistable());
  }
199
  RETURN_PY_NONE
200 201 202
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

203 204 205 206 207 208
PyObject* eager_api_get_all_grads(PyObject* self,
                                  PyObject* args,
                                  PyObject* kwargs) {
  EAGER_TRY
  auto tensor_list = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);

209
  std::vector<paddle::Tensor> ret;
210 211 212 213
  for (auto& tensor : tensor_list) {
    VLOG(6) << "Get grad for tensor: " << tensor.name();
    auto meta = egr::EagerUtils::nullable_autograd_meta(tensor);
    if (!meta || meta->StopGradient()) {
214
      ret.emplace_back(paddle::Tensor());
215 216 217 218 219
      continue;
    }
    if (meta && meta->Grad().initialized()) {
      ret.emplace_back(meta->Grad());
    } else {
220
      ret.emplace_back(paddle::Tensor());
221 222 223 224 225 226
    }
  }
  return ToPyObject(ret, true);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

227 228 229 230 231
PyObject* eager_api_get_grads_lists(PyObject* self,
                                    PyObject* args,
                                    PyObject* kwargs) {
  EAGER_TRY
  auto tensor_list = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);
232
  // The order of the 3 vectors is: FP16_grads, BF16_grads, FP32_grads
233
  std::vector<std::vector<paddle::Tensor>> ret(3);
234 235 236 237 238 239 240

  for (auto& tensor : tensor_list) {
    VLOG(6) << "Get grad for tensor: " << tensor.name();
    auto meta = egr::EagerUtils::nullable_autograd_meta(tensor);
    if (meta && meta->Grad().initialized()) {
      auto& grad = meta->Grad();
      switch (grad.dtype()) {
241
        case phi::DataType::FLOAT16:
242 243
          ret[0].emplace_back(grad);
          break;
244
        case phi::DataType::BFLOAT16:
245 246
          ret[1].emplace_back(grad);
          break;
247
        case phi::DataType::FLOAT32:
248 249 250 251 252 253 254 255 256 257 258 259 260
          ret[2].emplace_back(grad);
          break;
        default:
          break;
      }
    }
  }

  return ToPyObject(ret);

  EAGER_CATCH_AND_THROW_RETURN_NULL
}

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
PyObject* eager_api_get_grads_types(PyObject* self,
                                    PyObject* args,
                                    PyObject* kwargs) {
  EAGER_TRY
  auto tensor_list = CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 0), 0);

  std::vector<int> ret;

  for (auto& tensor : tensor_list) {
    VLOG(6) << "Get grad for tensor: " << tensor.name();
    auto meta = egr::EagerUtils::nullable_autograd_meta(tensor);
    if (!meta || meta->StopGradient()) {
      ret.emplace_back(-1);
      continue;
    }

    auto& grad = meta->Grad();
    if (meta && grad.initialized()) {
      if (grad.is_dense_tensor() &&
280 281 282
          (tensor.dtype() == phi::DataType::FLOAT32 ||
           tensor.dtype() == phi::DataType::FLOAT16 ||
           tensor.dtype() == phi::DataType::BFLOAT16)) {
283 284 285 286 287 288 289 290 291 292 293 294 295
        ret.emplace_back(
            paddle::framework::TransToProtoVarType(tensor.dtype()));
      }
    } else {
      ret.emplace_back(-1);
    }
  }

  return ToPyObject(ret);

  EAGER_CATCH_AND_THROW_RETURN_NULL
}

296 297
static PyObject* eager_api_read_next_tensor_list(PyObject* self,
                                                 PyObject* args,
298
                                                 PyObject* kwargs) {
299
  EAGER_TRY
300 301
  auto tensor_base_list =
      CastPyArg2VectorOfTensorBase(PyTuple_GET_ITEM(args, 0), 0);
302
  std::vector<paddle::Tensor> tensor_list;
303 304 305
  {
    eager_gil_scoped_release guard;
    tensor_list.reserve(tensor_base_list.size());
306
    auto func = [](phi::DenseTensor& tensor_base) {
307
      paddle::Tensor tensor(egr::Controller::Instance().GenerateUniqueName());
308 309 310 311 312 313 314 315 316
      auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
      autograd_meta->SetPersistable(false);
      autograd_meta->SetStopGradient(true);
      tensor.set_impl(std::make_shared<phi::DenseTensor>(tensor_base));
      return tensor;
    };
    for (auto& tensor_base : tensor_base_list) {
      tensor_list.emplace_back(func(tensor_base));
    }
317
  }
318
  return ToPyObject(tensor_list);
319 320 321
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

322 323 324 325 326 327 328 329 330
static void ConstructFwdAndBwdMap(
    const std::vector<paddle::OpMetaInfo>& vec_map,
    const std::string& op_type) {
  auto& in_out_map = egr::Controller::Instance().GetCustomEdgesSlotMap();
  if (in_out_map.find(op_type) != in_out_map.end()) {
    VLOG(7) << "Find Exist CustomEdgesSlotMap Skip >>>> ";
    return;
  } else {
    VLOG(7) << "Construct CustomEdgesSlotMap ";
331 332 333 334 335 336
    auto inputs_names = paddle::OpMetaInfoHelper::GetInputs(vec_map[0]);
    auto outputs_names = paddle::OpMetaInfoHelper::GetOutputs(vec_map[0]);
    auto attrs_names = paddle::OpMetaInfoHelper::GetAttrs(vec_map[0]);
    auto grad_outputs_names = paddle::OpMetaInfoHelper::GetOutputs(vec_map[1]);
    auto grad_inputs_names = paddle::OpMetaInfoHelper::GetInputs(vec_map[1]);
    auto grad_attrs_names = paddle::OpMetaInfoHelper::GetAttrs(vec_map[1]);
337
    std::vector<std::unordered_map<int, int>> res(5);
338 339

    in_out_map.insert({op_type, {res}});
340 341 342
    // Prepare pos map for grad_outputs
    VLOG(7) << "Prepare pos map for grad_outputs";
    PADDLE_ENFORCE_LE(
343 344
        grad_outputs_names.size(),
        inputs_names.size(),
345 346 347 348 349
        paddle::platform::errors::InvalidArgument(
            "Grad outputs num should be less equal than forward inputs num."));
    for (size_t i = 0; i < grad_outputs_names.size(); i++) {
      size_t end = grad_outputs_names[i].find("@GRAD");
      PADDLE_ENFORCE_NE(
350 351
          end,
          std::string::npos,
352 353 354 355 356 357 358 359 360
          paddle::platform::errors::NotFound(
              "All Grad outputs should be grad and we got %s is not grad var, "
              "please check your op and change to fit the rule.",
              grad_outputs_names[i]));
      for (size_t j = 0; j < inputs_names.size(); j++) {
        if (grad_outputs_names[i].substr(0, end) == inputs_names[j]) {
          VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                  << " inputs: " << inputs_names[j] << " related to No." << i
                  << " grad_outputs: " << grad_outputs_names[i];
361
          in_out_map[op_type][0][0][j] = i;
362 363 364 365 366 367 368 369 370 371 372 373
        }
      }
    }
    // Prepare pos map for grad_inputs
    for (size_t i = 0; i < grad_inputs_names.size(); i++) {
      size_t end = grad_inputs_names[i].find("@GRAD");
      if (end != std::string::npos) {
        for (size_t j = 0; j < outputs_names.size(); j++) {
          if (grad_inputs_names[i].substr(0, end) == outputs_names[j]) {
            VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                    << " outputs: " << outputs_names[j] << " related to No."
                    << i << " grad_inputs's grad: " << grad_inputs_names[i];
374
            in_out_map[op_type][0][1][j] = i;
375 376 377
          }
        }
      } else {
378 379
        if (std::find(outputs_names.begin(),
                      outputs_names.end(),
380 381 382 383 384 385 386
                      grad_inputs_names[i]) != outputs_names.end()) {
          for (size_t j = 0; j < outputs_names.size(); j++) {
            if (grad_inputs_names[i] == outputs_names[j]) {
              VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                      << " outputs: " << outputs_names[j] << " related to No."
                      << i
                      << " grad_inputs fwd outputs: " << grad_inputs_names[i];
387
              in_out_map[op_type][0][2][j] = i;
388 389 390 391 392 393 394 395 396
            }
          }
        } else {
          for (size_t j = 0; j < inputs_names.size(); j++) {
            if (grad_inputs_names[i] == inputs_names[j]) {
              VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                      << " inputs: " << inputs_names[j] << " related to No."
                      << i
                      << " grad_inputs fwd inputs: " << grad_inputs_names[i];
397
              in_out_map[op_type][0][3][j] = i;
398 399 400 401 402 403 404 405
            }
          }
        }
      }
    }

    // Prepare pos map for grad attrs_
    for (size_t i = 0; i < grad_attrs_names.size(); i++) {
406 407 408 409
      auto end = std::find(
          attrs_names.begin(), attrs_names.end(), grad_attrs_names[i]);
      PADDLE_ENFORCE_NE(end,
                        attrs_names.end(),
410 411 412 413 414 415 416 417 418 419
                        paddle::platform::errors::NotFound(
                            "All Grad attrs should be one of forward attrs and "
                            "we got %s is not one of them, please check your "
                            "op and change to fit the rule.",
                            grad_attrs_names[i]));
      for (size_t j = 0; j < attrs_names.size(); j++) {
        if (grad_attrs_names[i] == attrs_names[j]) {
          VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j
                  << " attrs: " << attrs_names[j] << " related to No." << i
                  << " grad_attrs: " << grad_attrs_names[i];
420
          in_out_map[op_type][0][4][j] = i;
421 422 423 424 425 426
        }
      }
    }
  }
}

H
HongyuJia 已提交
427
static std::vector<paddle::any> CastAttrsToTargetType(
428 429 430
    const std::vector<paddle::any>& src,
    const std::vector<std::string>& attrs_names) {
  std::vector<paddle::any> res;
431 432
  PADDLE_ENFORCE_EQ(src.size(),
                    attrs_names.size(),
433 434 435 436
                    paddle::platform::errors::InvalidArgument(
                        "We Expected same size of attrs and attrs_name list, "
                        "if u got this error indicate your custom op setting "
                        "%s attrs, but you just give %s",
437 438
                        attrs_names.size(),
                        src.size()));
439 440
  for (size_t i = 0; i < src.size(); i++) {
    size_t end = attrs_names[i].find(": ");
441
    std::string type_name = attrs_names[i].substr(end + 2);
442 443 444 445 446 447 448 449 450
    if (type_name == "int") {
      if (src[i].type() == typeid(bool)) {
        res.emplace_back(static_cast<int>(paddle::any_cast<bool>(src[i])));
      } else if (src[i].type() == typeid(int)) {
        res.emplace_back(src[i]);
      } else {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "Your No. %s attrs should only can be bool or int32, other type is "
            "forbidden for now but we got %s. Check your code first please",
451 452
            i,
            src[i].type().name()));
453 454 455 456 457 458 459 460 461 462 463 464 465
      }
    } else if (type_name == "int64_t") {
      if (src[i].type() == typeid(bool)) {
        res.emplace_back(static_cast<int64_t>(paddle::any_cast<bool>(src[i])));
      } else if (src[i].type() == typeid(int)) {
        res.emplace_back(static_cast<int64_t>(paddle::any_cast<int>(src[i])));
      } else if (src[i].type() == typeid(int64_t)) {
        res.emplace_back(src[i]);
      } else {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "Your No. %s attrs should only can be bool or int32 or int64_t, "
            "other type is forbidden for now but we got %s. Check your code "
            "first please",
466 467
            i,
            src[i].type().name()));
468 469 470 471 472 473 474 475
      }
    } else {
      res.emplace_back(src[i]);
    }
  }
  return res;
}

476 477 478 479
static PyObject* eager_api_jit_function_call(PyObject* self,
                                             PyObject* args,
                                             PyObject* kwargs) {
  EAGER_TRY
480 481 482

  std::shared_ptr<jit::Function> function =
      CastPyArg2JitFunction(PyTuple_GET_ITEM(args, 0), 0);
483
  std::vector<paddle::Tensor> ins =
484
      CastPyArg2VectorOfTensor(PyTuple_GET_ITEM(args, 1), 1);
485
  std::vector<paddle::Tensor> outs;
W
wanghuancoder 已提交
486 487 488 489
  {
    eager_gil_scoped_release guard;
    outs = (*function)(ins);
  }
490 491 492 493
  return ToPyObject(outs);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
static PyObject* eager_api__get_custom_operator_inplace_reverse_idx(
    PyObject* self, PyObject* args, PyObject* kwargs) {
  EAGER_TRY
  std::string op_type = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 0), 0);
  auto meta_info_map = egr::Controller::Instance().GetOpMetaInfoMap();
  PADDLE_ENFORCE_NE(meta_info_map.find(op_type),
                    meta_info_map.end(),
                    paddle::platform::errors::NotFound(
                        "Can't find %s in Eager OpMetaInfoMap which should be "
                        "created by LoadOpMetaInfoAndRegisterOp, please make "
                        "sure you registered your op first and try again. ",
                        op_type));

  const auto& inputs =
      paddle::OpMetaInfoHelper::GetInputs(meta_info_map.at(op_type)[0]);
  const auto& outputs =
      paddle::OpMetaInfoHelper::GetOutputs(meta_info_map.at(op_type)[0]);
  const auto& inplace_map =
      paddle::OpMetaInfoHelper::GetInplaceMap(meta_info_map.at(op_type)[0]);
  VLOG(7) << "Custom operator " << op_type
          << " get InplaceMap for python, inplace map size = "
          << inplace_map.size();

  std::unordered_map<int, int> inplace_idx_map;
  for (size_t in_idx = 0; in_idx < inputs.size(); ++in_idx) {
    auto& input = inputs[in_idx];
    if (inplace_map.find(input) == inplace_map.end()) {
      continue;
    }
    auto out_iter = find(outputs.begin(), outputs.end(), inplace_map.at(input));
    PADDLE_ENFORCE(
        out_iter != outputs.end(),
        phi::errors::NotFound("Can't find the mapped value of %s, please check "
                              "the input of `Inplace` again and make "
                              "sure you registered your op accurately. ",
                              input));
    inplace_idx_map[distance(outputs.begin(), out_iter)] = in_idx;
  }

  return ToPyObject(inplace_idx_map);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

H
HongyuJia 已提交
537
static PyObject* eager_api_run_custom_op(PyObject* self,
538
                                         PyObject* args,
539 540
                                         PyObject* kwargs) {
  EAGER_TRY
541
  FLAGS_tensor_operants_mode = "phi";
542 543
  if (paddle::OperantsManager::Instance().phi_operants.get() == nullptr) {
    paddle::OperantsManager::Instance().phi_operants.reset(
544 545 546 547
        new paddle::operants::PhiTensorOperants());
    VLOG(4) << "Initialize phi tensor operants successfully";
  }

548 549 550 551
  paddle::CustomOpKernelContext ctx =
      CastPyArg2CustomOpKernelContext(PyTuple_GET_ITEM(args, 0), 0);
  std::string op_type = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 1), 1);
  bool trace_backward = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 2), 2);
W
wanghuancoder 已提交
552 553 554 555 556 557 558 559 560 561 562 563 564 565
  {
    eager_gil_scoped_release guard;
    VLOG(7) << "Get things for python for Custom Op: " << op_type
            << ", trace_backward is: " << trace_backward;
    auto meta_info_map = egr::Controller::Instance().GetOpMetaInfoMap();
    PADDLE_ENFORCE_NE(
        meta_info_map.find(op_type),
        meta_info_map.end(),
        paddle::platform::errors::NotFound(
            "Can't find %s in Eager OpMetaInfoMap which should be "
            "created by LoadOpMetaInfoAndRegisterOp, please make "
            "sure you registered your op first and try again. ",
            op_type));
    VLOG(7) << "Run Kernel of Custom Op: " << op_type;
566
    // TODO(HongyuJia): Optimize Attrs Cast naming and implementation
567 568 569
    std::vector<paddle::any> res_attrs = CastAttrsToTargetType(
        ctx.Attrs(),
        paddle::OpMetaInfoHelper::GetAttrs(meta_info_map.at(op_type)[0]));
W
wanghuancoder 已提交
570 571
    ctx.EmplaceBackAttrs(res_attrs);
    const auto& vec_map = meta_info_map.at(op_type);
572

573 574 575 576
    const auto& inputs =
        paddle::OpMetaInfoHelper::GetInputs(meta_info_map.at(op_type)[0]);
    const auto& outputs =
        paddle::OpMetaInfoHelper::GetOutputs(meta_info_map.at(op_type)[0]);
577
    const auto& inplace_map =
578 579
        paddle::OpMetaInfoHelper::GetInplaceMap(meta_info_map.at(op_type)[0]);
    // handle inplace map
580
    ctx.MapPlainOutputs(inputs, outputs, inplace_map);
581
    (*paddle::OpMetaInfoHelper::GetKernelFn(vec_map[0]))(&ctx);
582
    ctx.AssignInplaceOutputs();
W
wanghuancoder 已提交
583 584 585 586 587 588 589 590 591 592 593 594 595

    VLOG(7) << "Get AutogradMeta for inputs and outputs for Custom Op";
    std::vector<std::vector<egr::AutogradMeta*>> ins_auto_grad_metas;
    std::vector<std::vector<egr::AutogradMeta*>> outs_auto_grad_metas;
    VLOG(7) << "We got slot num of ins is: " << ctx.InputRange().size();
    ins_auto_grad_metas.resize(ctx.InputRange().size());
    VLOG(7) << "We got slot num of outs is: " << ctx.OutputRange().size();
    outs_auto_grad_metas.resize(ctx.OutputRange().size());

    for (size_t i = 0; i < ctx.InputRange().size(); i++) {
      ins_auto_grad_metas[i] =
          egr::EagerUtils::nullable_autograd_meta(ctx.InputsBetween(
              ctx.InputRangeAt(i).first, ctx.InputRangeAt(i).second));
596
    }
W
wanghuancoder 已提交
597 598 599 600
    for (size_t i = 0; i < ctx.OutputRange().size(); i++) {
      outs_auto_grad_metas[i] =
          egr::EagerUtils::unsafe_autograd_meta(ctx.OutputsBetweeen(
              ctx.OutputRangeAt(i).first, ctx.OutputRangeAt(i).second));
601
    }
W
wanghuancoder 已提交
602 603 604 605 606
    bool require_any_grad = false;
    for (size_t i = 0; i < ins_auto_grad_metas.size(); i++) {
      require_any_grad =
          require_any_grad || egr::EagerUtils::ComputeRequireGrad(
                                  trace_backward, &(ins_auto_grad_metas[i]));
607
    }
608

609
    // handle inplace map
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
    for (size_t i = 0; i < ctx.InputRange().size(); i++) {
      if (inplace_map.find(inputs[i]) != inplace_map.end()) {
        size_t input_size =
            ctx.InputRangeAt(i).second - ctx.InputRangeAt(i).first;
        size_t start_idx = ctx.InputRangeAt(i).first;
        for (size_t j = 0; j < input_size; j++) {
          egr::EagerUtils::CheckInplace(ctx.InputAt(start_idx + j),
                                        ins_auto_grad_metas[i][j],
                                        require_any_grad);
          // Bump Inplace Version
          ctx.MutableInputAt(start_idx + j).bump_inplace_version();
          VLOG(3) << "Custom operator: Tensor("
                  << ctx.InputAt(start_idx + j).name()
                  << ") uses Inplace Strategy.";
        }
      }
    }

W
wanghuancoder 已提交
628 629 630 631 632 633
    if (require_any_grad && (vec_map.size() > 1)) {
      VLOG(6) << " Construct Grad for Custom Op: " << op_type;
      ConstructFwdAndBwdMap(vec_map, op_type);
      for (size_t i = 0; i < outs_auto_grad_metas.size(); i++) {
        egr::EagerUtils::PassStopGradient(false, &(outs_auto_grad_metas[i]));
      }
634 635 636 637 638 639 640 641 642 643 644
      // Note(HongyuJia): In dygraph eager mode, CheckInplace makes sure leaf
      // nodes set stop_gradient=True. However, dygraph mode can also outputs
      // lead nodes' gradients (For example, we can get x.grad after x.add_(y)).
      // To be consistent with dygraph mode, we have to PassStopGradient for all
      // inplaced ins_auto_grad_metas.
      std::unordered_map<size_t, size_t> inplace_tensor_map =
          ctx.GetInplaceTensorMap();
      for (auto pair : inplace_tensor_map) {
        egr::EagerUtils::PassStopGradient(false,
                                          &(ins_auto_grad_metas[pair.first]));
      }
W
wanghuancoder 已提交
645 646 647 648 649 650 651
      auto grad_node = std::make_shared<egr::RunCustomOpNode>(
          outs_auto_grad_metas.size(), ins_auto_grad_metas.size(), op_type);
      auto slot_map =
          egr::Controller::Instance().GetCustomEdgesSlotMap().at(op_type);
      // Prepare Grad outputs
      size_t no_grad_cnt = 0;
      for (size_t i = 0; i < ins_auto_grad_metas.size(); i++) {
652 653
        const std::vector<paddle::Tensor>& in_tensors = ctx.InputsBetween(
            ctx.InputRangeAt(i).first, ctx.InputRangeAt(i).second);
W
wanghuancoder 已提交
654 655 656 657 658 659 660 661 662 663 664

        if (slot_map[0][0].find(i) != slot_map[0][0].end()) {
          grad_node->SetGradOutMeta(in_tensors, slot_map[0][0][i]);
        } else {
          grad_node->SetGradOutMeta(
              in_tensors, ins_auto_grad_metas.size() - 1 - no_grad_cnt);
          no_grad_cnt++;
        }
      }
      // Prepare Grad inputs with grad of fwd outputs
      for (size_t i = 0; i < outs_auto_grad_metas.size(); i++) {
665 666
        const std::vector<paddle::Tensor>& out_tensors = ctx.OutputsBetweeen(
            ctx.OutputRangeAt(i).first, ctx.OutputRangeAt(i).second);
W
wanghuancoder 已提交
667 668 669 670 671

        egr::EagerUtils::SetOutRankWithSlot(&(outs_auto_grad_metas[i]), i);
        egr::EagerUtils::SetHistory(&(outs_auto_grad_metas[i]), grad_node);
        grad_node->SetGradInMeta(out_tensors, i);
      }
672

W
wanghuancoder 已提交
673 674 675 676 677 678 679 680 681
      // Prepare Grad inputs with fwd outputs
      for (auto it = slot_map[0][2].begin(); it != slot_map[0][2].end(); it++) {
        VLOG(7) << "Prepare fwd_outs: " << it->first
                << " to grad_inputs: " << it->second;
        grad_node->fwd_outs[it->second] =
            egr::RunCustomOpNode::ConstructTensorWrapper(
                ctx.OutputsBetweeen(ctx.OutputRangeAt(it->first).first,
                                    ctx.OutputRangeAt(it->first).second));
      }
682

W
wanghuancoder 已提交
683 684 685 686 687 688 689 690 691
      // Prepare Grad inputs with fwd inputs
      for (auto it = slot_map[0][3].begin(); it != slot_map[0][3].end(); it++) {
        VLOG(7) << "Prepare fwd_ins: " << it->first
                << " to grad_inputs: " << it->second;
        grad_node->fwd_ins[it->second] =
            egr::RunCustomOpNode::ConstructTensorWrapper(
                ctx.InputsBetween(ctx.InputRangeAt(it->first).first,
                                  ctx.InputRangeAt(it->first).second));
      }
692

693 694
      auto attrs_names =
          paddle::OpMetaInfoHelper::GetAttrs(meta_info_map.at(op_type)[1]);
W
wanghuancoder 已提交
695 696 697 698 699 700 701 702
      std::vector<paddle::any> attrs(attrs_names.size());
      // Prepare attrs for Grad node
      for (auto it = slot_map[0][4].begin(); it != slot_map[0][4].end(); it++) {
        VLOG(7) << "Prepare fwd attrs: " << it->first
                << " to grad_attrs: " << it->second;
        attrs[it->second] = res_attrs[it->first];
      }
      grad_node->SetAttrs(attrs);
703 704
    }
  }
705
  RETURN_PY_NONE
706 707 708
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

709 710
static PyObject* eager_api_sparse_coo_tensor(PyObject* self,
                                             PyObject* args,
711 712 713 714 715 716
                                             PyObject* kwargs) {
  EAGER_TRY
  auto non_zero_indices = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
  auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1);
  auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 2), 2);
  auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 3), 3);
717
  paddle::Tensor tensor;
W
wanghuancoder 已提交
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
  {
    eager_gil_scoped_release guard;
    PADDLE_ENFORCE(non_zero_indices.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the non-zero indices must be a DenseTensor."));
    PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the non-zero elements must be a DenseTensor."));
    auto dense_indices =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_indices.impl());
    auto dense_elements =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
    // TODO(zhangkaihuo): After creating SparseCooTensor, call coalesced() to
    // sort and merge duplicate indices
    std::shared_ptr<phi::SparseCooTensor> coo_tensor =
        std::make_shared<phi::SparseCooTensor>(
            *dense_indices, *dense_elements, phi::make_ddim(dense_shape));
    tensor.set_impl(coo_tensor);
    auto name =
        egr::Controller::Instance().GenerateUniqueName("generated_tensor");
    tensor.set_name(name);
    auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
    autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
    if (!autograd_meta->GetMutableGradNode()) {
      VLOG(3) << "Tensor(" << name
              << ") doesn't have GradNode, add GradNodeAccumulation to it.";
      autograd_meta->SetGradNode(
          std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
    }
747 748 749 750 751
  }
  return ToPyObject(tensor);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

752 753
static PyObject* eager_api_sparse_csr_tensor(PyObject* self,
                                             PyObject* args,
754 755 756 757 758 759 760
                                             PyObject* kwargs) {
  EAGER_TRY
  auto non_zero_crows = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
  auto non_zero_cols = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 1), 1);
  auto non_zero_elements = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 2), 2);
  auto dense_shape = CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 3), 3);
  auto stop_gradient = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
761
  paddle::Tensor tensor;
W
wanghuancoder 已提交
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
  {
    eager_gil_scoped_release guard;
    PADDLE_ENFORCE(non_zero_crows.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the compressed non-zero rows must be a DenseTensor."));
    PADDLE_ENFORCE(non_zero_cols.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the non-zero cols must be a DenseTensor."));
    PADDLE_ENFORCE(non_zero_elements.is_dense_tensor(),
                   paddle::platform::errors::Fatal(
                       "the non-zero elements must be a DenseTensor."));

    auto dense_crows =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_crows.impl());
    auto dense_cols =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_cols.impl());
    auto dense_elements =
        std::dynamic_pointer_cast<phi::DenseTensor>(non_zero_elements.impl());
    std::shared_ptr<phi::SparseCsrTensor> csr_tensor =
        std::make_shared<phi::SparseCsrTensor>(*dense_crows,
                                               *dense_cols,
                                               *dense_elements,
                                               phi::make_ddim(dense_shape));
    tensor.set_impl(csr_tensor);
    auto name =
        egr::Controller::Instance().GenerateUniqueName("generated_tensor");
    tensor.set_name(name);
    auto autograd_meta = egr::EagerUtils::autograd_meta(&tensor);
    autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
    if (!autograd_meta->GetMutableGradNode()) {
      VLOG(3) << "Tensor(" << name
              << ") have not GradNode, add GradNodeAccumulation for it.";
      autograd_meta->SetGradNode(
          std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
    }
797 798 799 800
  }
  return ToPyObject(tensor);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
801 802 803 804 805 806 807 808

static PyObject* eager_api_register_saved_tensors_hooks(PyObject* self,
                                                        PyObject* args,
                                                        PyObject* kwargs) {
  EAGER_TRY
  if (egr::Controller::Instance().HasGrad()) {
    auto pack_hook = PyTuple_GET_ITEM(args, 0);
    auto unpack_hook = PyTuple_GET_ITEM(args, 1);
809 810 811
    egr::SavedTensorsHooks::GetInstance().SetHooks(
        std::make_shared<PackHook>(pack_hook),
        std::make_shared<UnPackHook>(unpack_hook));
812 813 814 815 816 817 818 819 820 821 822 823 824 825
  }
  RETURN_PY_NONE
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

static PyObject* eager_api_reset_saved_tensors_hooks(PyObject* self,
                                                     PyObject* args,
                                                     PyObject* kwargs) {
  EAGER_TRY
  egr::SavedTensorsHooks::GetInstance().ResetHooks();
  RETURN_PY_NONE
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

W
wanghuancoder 已提交
826
#if defined(PADDLE_WITH_CUDA)
827 828
static PyObject* eager_api_async_read(PyObject* self,
                                      PyObject* args,
W
wanghuancoder 已提交
829 830 831 832 833 834 835 836
                                      PyObject* kwargs) {
  EAGER_TRY
  auto& src = GetTensorFromArgs("async_read", "src", args, 0, false);
  auto& dst = GetTensorFromArgs("async_read", "dst", args, 1, false);
  auto& index = GetTensorFromArgs("async_read", "index", args, 2, false);
  auto& buffer = GetTensorFromArgs("async_read", "buffer", args, 3, false);
  auto& offset = GetTensorFromArgs("async_read", "offset", args, 4, false);
  auto& count = GetTensorFromArgs("async_read", "count", args, 5, false);
W
wanghuancoder 已提交
837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
  {
    eager_gil_scoped_release guard;
    PADDLE_ENFORCE_EQ(
        src.is_gpu_pinned(),
        true,
        platform::errors::InvalidArgument("Required `src` device should be "
                                          "CUDAPinnedPlace, but received %d.",
                                          src.place()));
    PADDLE_ENFORCE_EQ(
        dst.is_gpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `dst` device should be CUDAPlace, but received %d.",
            dst.place()));
    PADDLE_ENFORCE_EQ(
        index.is_cpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `index` device should be CPUPlace, but received %d.",
            index.place()));
    PADDLE_ENFORCE_EQ(buffer.is_gpu_pinned(),
                      true,
W
wanghuancoder 已提交
859
                      platform::errors::InvalidArgument(
W
wanghuancoder 已提交
860 861 862 863 864 865 866 867 868
                          "Required `buffer` device should be CUDAPinnedPlace, "
                          "but received %d.",
                          buffer.place()));
    PADDLE_ENFORCE_EQ(
        offset.is_cpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `offset` device should be CPUPlace, but received %d.",
            offset.place()));
W
wanghuancoder 已提交
869
    PADDLE_ENFORCE_EQ(
W
wanghuancoder 已提交
870 871
        count.is_cpu(),
        true,
W
wanghuancoder 已提交
872
        platform::errors::InvalidArgument(
W
wanghuancoder 已提交
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
            "Required `count` device should be CPUPlace, but received %d.",
            count.place()));

    auto& src_tensor = src;
    auto* dst_tensor = &dst;
    auto& index_tensor = index;
    auto* buffer_tensor = &buffer;
    auto& offset_tensor = offset;
    auto& count_tensor = count;
    auto* dst_data = dst_tensor->mutable_data<float>(dst.place());
    const auto& deviceId = paddle::platform::GetCurrentDeviceId();

    PADDLE_ENFORCE_EQ(src_tensor.dims().size(),
                      dst_tensor->dims().size(),
                      platform::errors::InvalidArgument(
                          "`src` and `dst` should have same tensor shape, "
                          "except for the first dimension."));
    PADDLE_ENFORCE_EQ(src_tensor.dims().size(),
                      buffer_tensor->dims().size(),
                      platform::errors::InvalidArgument(
                          "`src` and `buffer` should have same tensor shape, "
                          "except for the first dimension."));
    for (int i = 1; i < src_tensor.dims().size(); i++) {
      PADDLE_ENFORCE_EQ(
          src_tensor.dims()[i],
          dst_tensor->dims()[i],
          platform::errors::InvalidArgument(
              "`src` and `dst` should have the same tensor shape, "
              "except for the first dimension."));
      PADDLE_ENFORCE_EQ(
          src_tensor.dims()[i],
          buffer_tensor->dims()[i],
          platform::errors::InvalidArgument(
              "`src` and `buffer` should have the same tensor shape, "
              "except for the first dimension."));
    }
    PADDLE_ENFORCE_EQ(index_tensor.dims().size(),
                      1,
                      platform::errors::InvalidArgument(
                          "`index` tensor should be one-dimensional."));

    auto stream = paddle::platform::get_current_stream(deviceId)->raw_stream();

    int64_t numel = 0;  // total copy length
    int64_t copy_flag = offset_tensor.dims()[0];
    int64_t size = src_tensor.numel() / src_tensor.dims()[0];

    if (copy_flag != 0) {
      PADDLE_ENFORCE_EQ(offset_tensor.dims().size(),
                        1,
                        platform::errors::InvalidArgument(
                            "`offset` tensor should be one-dimensional."));
      PADDLE_ENFORCE_EQ(count_tensor.dims().size(),
                        1,
                        platform::errors::InvalidArgument(
                            "`count` tensor should be one-dimensional."));
      PADDLE_ENFORCE_EQ(offset_tensor.numel(),
                        count_tensor.numel(),
                        platform::errors::InvalidArgument(
                            "`offset` and `count` tensor size dismatch."));
      auto* offset_data = offset_tensor.data<int64_t>();
      auto* count_data = count_tensor.data<int64_t>();
      for (int64_t i = 0; i < count_tensor.numel(); i++) {
        numel += count_data[i];
      }
      PADDLE_ENFORCE_LE(numel + index_tensor.numel(),
                        buffer_tensor->dims()[0],
                        platform::errors::InvalidArgument(
                            "Buffer tensor size is too small."));
      PADDLE_ENFORCE_LE(numel + index_tensor.numel(),
                        dst_tensor->dims()[0],
                        platform::errors::InvalidArgument(
                            "Target tensor size is too small."));

      int64_t src_offset, dst_offset = 0, c;
      auto* src_data = src_tensor.data<float>();
      for (int64_t i = 0; i < offset_tensor.numel(); i++) {
        src_offset = offset_data[i], c = count_data[i];
        PADDLE_ENFORCE_LE(src_offset + c,
                          src_tensor.dims()[0],
                          platform::errors::InvalidArgument(
                              "Invalid offset or count index."));
        PADDLE_ENFORCE_LE(dst_offset + c,
                          dst_tensor->dims()[0],
                          platform::errors::InvalidArgument(
                              "Invalid offset or count index."));
        cudaMemcpyAsync(dst_data + (dst_offset * size),
                        src_data + (src_offset * size),
                        c * size * sizeof(float),
                        cudaMemcpyHostToDevice,
                        stream);
        dst_offset += c;
      }
    } else {
      PADDLE_ENFORCE_LE(index_tensor.numel(),
                        buffer_tensor->dims()[0],
                        platform::errors::InvalidArgument(
                            "Buffer tensor size is too small."));
    }

    // Select the index data to the buffer
974 975 976
    auto index_select = [](const paddle::Tensor& src_tensor,
                           const paddle::Tensor& index_tensor,
                           paddle::Tensor* buffer_tensor) {
W
wanghuancoder 已提交
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
      auto* src_data = src_tensor.data<float>();
      auto* index_data = index_tensor.data<int64_t>();
      auto* buffer_data = buffer_tensor->data<float>();
      const int& slice_size = src_tensor.numel() / src_tensor.dims()[0];
      const int& copy_bytes = slice_size * sizeof(float);
      int64_t c = 0;
      for (int64_t i = 0; i < index_tensor.numel(); i++) {
        std::memcpy(buffer_data + c * slice_size,
                    src_data + index_data[i] * slice_size,
                    copy_bytes);
        c += 1;
      }
    };
    index_select(src_tensor, index_tensor, buffer_tensor);

    // Copy the data to device memory
    cudaMemcpyAsync(dst_data + (numel * size),
                    buffer_tensor->data<float>(),
                    index_tensor.numel() * size * sizeof(float),
                    cudaMemcpyHostToDevice,
                    stream);
W
wanghuancoder 已提交
998
  }
W
wanghuancoder 已提交
999 1000 1001
  RETURN_PY_NONE
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
W
wanghuancoder 已提交
1002

W
wanghuancoder 已提交
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
static PyObject* eager_api_async_write(PyObject* self,
                                       PyObject* args,
                                       PyObject* kwargs) {
  EAGER_TRY
  auto& src = GetTensorFromArgs("async_write", "src", args, 0, false);
  auto& dst = GetTensorFromArgs("async_write", "dst", args, 1, false);
  auto& offset = GetTensorFromArgs("async_write", "offset", args, 2, false);
  auto& count = GetTensorFromArgs("async_write", "count", args, 3, false);
  {
    eager_gil_scoped_release guard;
    PADDLE_ENFORCE_EQ(
        src.is_gpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `src` device should be CUDAPlace, but received %d. ",
            src.place()));
    PADDLE_ENFORCE_EQ(dst.is_gpu_pinned(),
                      true,
                      platform::errors::InvalidArgument(
                          "Required `dst` device should be CUDAPinnedPlace, "
                          "but received %d. ",
                          dst.place()));
    PADDLE_ENFORCE_EQ(
        offset.is_cpu(),
        true,
        platform::errors::InvalidArgument("Required `offset` device should "
                                          "be CPUPlace, but received %d. ",
                                          offset.place()));
    PADDLE_ENFORCE_EQ(
        count.is_cpu(),
        true,
        platform::errors::InvalidArgument(
            "Required `count` device should be CPUPlace, but received %d. ",
            count.place()));
W
wanghuancoder 已提交
1037

W
wanghuancoder 已提交
1038 1039 1040 1041 1042 1043 1044
    // TODO(daisiming): In future, add index as arguments following
    // async_read.
    auto& src_tensor = src;
    auto* dst_tensor = &dst;
    auto& offset_tensor = offset;
    auto& count_tensor = count;
    const auto& deviceId = paddle::platform::GetCurrentDeviceId();
W
wanghuancoder 已提交
1045

1046 1047
    PADDLE_ENFORCE_EQ(offset_tensor.dims().size(),
                      1,
W
wanghuancoder 已提交
1048 1049
                      platform::errors::InvalidArgument(
                          "`offset` tensor should be one-dimensional."));
1050 1051
    PADDLE_ENFORCE_EQ(count_tensor.dims().size(),
                      1,
W
wanghuancoder 已提交
1052 1053
                      platform::errors::InvalidArgument(
                          "`count` tensor should be one-dimensional."));
1054 1055
    PADDLE_ENFORCE_EQ(offset_tensor.numel(),
                      count_tensor.numel(),
W
wanghuancoder 已提交
1056 1057
                      platform::errors::InvalidArgument(
                          "`offset` and `count` tensor size dismatch."));
W
wanghuancoder 已提交
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
    PADDLE_ENFORCE_EQ(src_tensor.dims().size(),
                      dst_tensor->dims().size(),
                      platform::errors::InvalidArgument(
                          "`src` and `dst` should have the same tensor shape, "
                          "except for the first dimension."));
    for (int i = 1; i < src_tensor.dims().size(); i++) {
      PADDLE_ENFORCE_EQ(
          src_tensor.dims()[i],
          dst_tensor->dims()[i],
          platform::errors::InvalidArgument(
              "`src` and `dst` should have the same tensor shape, "
              "except for the first dimension."));
W
wanghuancoder 已提交
1070 1071
    }

W
wanghuancoder 已提交
1072 1073 1074
    auto stream = paddle::platform::get_current_stream(deviceId)->raw_stream();

    int64_t size = src_tensor.numel() / src_tensor.dims()[0];
W
wanghuancoder 已提交
1075
    auto* src_data = src_tensor.data<float>();
W
wanghuancoder 已提交
1076 1077 1078 1079
    auto* dst_data = dst_tensor->data<float>();
    const int64_t* offset_data = offset_tensor.data<int64_t>();
    const int64_t* count_data = count_tensor.data<int64_t>();
    int64_t src_offset = 0, dst_offset, c;
W
wanghuancoder 已提交
1080
    for (int64_t i = 0; i < offset_tensor.numel(); i++) {
W
wanghuancoder 已提交
1081
      dst_offset = offset_data[i], c = count_data[i];
W
wanghuancoder 已提交
1082
      PADDLE_ENFORCE_LE(
1083 1084
          src_offset + c,
          src_tensor.dims()[0],
W
wanghuancoder 已提交
1085
          platform::errors::InvalidArgument("Invalid offset or count index"));
W
wanghuancoder 已提交
1086
      PADDLE_ENFORCE_LE(
1087 1088
          dst_offset + c,
          dst_tensor->dims()[0],
W
wanghuancoder 已提交
1089
          platform::errors::InvalidArgument("Invalid offset or count index"));
W
wanghuancoder 已提交
1090
      cudaMemcpyAsync(dst_data + (dst_offset * size),
1091 1092
                      src_data + (src_offset * size),
                      c * size * sizeof(float),
W
wanghuancoder 已提交
1093
                      cudaMemcpyDeviceToHost,
1094
                      stream);
W
wanghuancoder 已提交
1095
      src_offset += c;
W
wanghuancoder 已提交
1096 1097
    }
  }
1098
  RETURN_PY_NONE
W
wanghuancoder 已提交
1099 1100
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
1101

1102 1103
static PyObject* eager_api_to_uva_tensor(PyObject* self,
                                         PyObject* args,
1104 1105 1106
                                         PyObject* kwargs) {
  EAGER_TRY
  VLOG(4) << "Running in eager_api_to_uva_tensor.";
1107 1108
  auto new_tensor = std::shared_ptr<paddle::Tensor>(
      new paddle::Tensor(egr::Controller::Instance().GenerateUniqueName()));
1109 1110 1111
  PyObject* obj = PyTuple_GET_ITEM(args, 0);
  auto array = py::cast<py::array>(py::handle(obj));

1112 1113 1114 1115 1116 1117 1118
  Py_ssize_t args_num = PyTuple_Size(args);
  int64_t device_id = 0;
  if (args_num > 1) {
    PyObject* Py_device_id = PyTuple_GET_ITEM(args, 1);
    if (Py_device_id) {
      device_id = CastPyArg2AttrLong(Py_device_id, 1);
    }
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
  }

  if (py::isinstance<py::array_t<int32_t>>(array)) {
    SetUVATensorFromPyArray<int32_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<int64_t>>(array)) {
    SetUVATensorFromPyArray<int64_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<float>>(array)) {
    SetUVATensorFromPyArray<float>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<double>>(array)) {
    SetUVATensorFromPyArray<double>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<int8_t>>(array)) {
    SetUVATensorFromPyArray<int8_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<int16_t>>(array)) {
    SetUVATensorFromPyArray<int16_t>(new_tensor, array, device_id);
  } else if (py::isinstance<py::array_t<paddle::platform::float16>>(array)) {
1134 1135
    SetUVATensorFromPyArray<paddle::platform::float16>(
        new_tensor, array, device_id);
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
  } else if (py::isinstance<py::array_t<bool>>(array)) {
    SetUVATensorFromPyArray<bool>(new_tensor, array, device_id);
  } else {
    // obj may be any type, obj.cast<py::array>() may be failed,
    // then the array.dtype will be string of unknown meaning.
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Input object type error or incompatible array data type. "
        "tensor.set() supports array with bool, float16, float32, "
        "float64, int8, int16, int32, int64,"
        "please check your input or input array data type."));
  }
  return ToPyObject(*(new_tensor.get()));
  EAGER_CATCH_AND_THROW_RETURN_NULL
}
W
wanghuancoder 已提交
1150
#endif
1151

1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
static PyObject* eager_api__add_backward_final_hook(PyObject* self,
                                                    PyObject* args,
                                                    PyObject* kwargs) {
  EAGER_TRY
  PyObject* hook_func = PyTuple_GET_ITEM(args, 0);
  egr::Controller::Instance().RegisterBackwardFinalHook(
      std::make_shared<PyVoidHook>(hook_func));
  RETURN_PY_NONE
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

1163
PyMethodDef variable_functions[] = {
1164
    // TODO(jiabin): Remove scale when we have final state tests
1165 1166 1167 1168
    {"scale",
     (PyCFunction)(void (*)(void))eager_api_scale,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1169 1170 1171 1172
    {"_add_backward_final_hook",
     (PyCFunction)(void (*)(void))eager_api__add_backward_final_hook,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1173 1174 1175 1176
    {"run_backward",
     (PyCFunction)(void (*)(void))eager_api_run_backward,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1177 1178
    {"run_partial_grad",
     (PyCFunction)(void (*)(void))eager_api_run_partial_grad,
1179 1180
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1181 1182 1183 1184 1185
    {"_get_custom_operator_inplace_map",
     (PyCFunction)(void (*)(
         void))eager_api__get_custom_operator_inplace_reverse_idx,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1186
    {"_run_custom_op",
H
HongyuJia 已提交
1187
     (PyCFunction)(void (*)(void))eager_api_run_custom_op,
1188 1189 1190 1191 1192 1193
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"tensor_copy",
     (PyCFunction)(void (*)(void))eager_api_tensor_copy,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1194 1195 1196 1197
    {"get_all_grads",
     (PyCFunction)(void (*)(void))eager_api_get_all_grads,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1198 1199 1200 1201
    {"get_grads_lists",
     (PyCFunction)(void (*)(void))eager_api_get_grads_lists,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1202 1203 1204 1205
    {"get_grads_types",
     (PyCFunction)(void (*)(void))eager_api_get_grads_types,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1206 1207
    {"read_next_tensor_list",
     (PyCFunction)(void (*)(void))eager_api_read_next_tensor_list,
1208 1209
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1210 1211 1212 1213
    {"jit_function_call",
     (PyCFunction)(void (*)(void))eager_api_jit_function_call,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1214 1215 1216
    /**sparse functions**/
    {"sparse_coo_tensor",
     (PyCFunction)(void (*)(void))eager_api_sparse_coo_tensor,
1217 1218
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1219 1220
    {"sparse_csr_tensor",
     (PyCFunction)(void (*)(void))eager_api_sparse_csr_tensor,
1221 1222
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1223 1224 1225 1226 1227 1228 1229 1230
    {"register_saved_tensors_hooks",
     (PyCFunction)(void (*)(void))eager_api_register_saved_tensors_hooks,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"reset_saved_tensors_hooks",
     (PyCFunction)(void (*)(void))eager_api_reset_saved_tensors_hooks,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
1231
/**sparse functions**/
W
wanghuancoder 已提交
1232
#if defined(PADDLE_WITH_CUDA)
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
    {"async_read",
     (PyCFunction)(void (*)(void))eager_api_async_read,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"async_write",
     (PyCFunction)(void (*)(void))eager_api_async_write,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
    {"to_uva_tensor",
     (PyCFunction)(void (*)(void))eager_api_to_uva_tensor,
     METH_VARARGS | METH_KEYWORDS,
     NULL},
W
wanghuancoder 已提交
1245
#endif
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
    {NULL, NULL, 0, NULL}};

void BindFunctions(PyObject* module) {
  if (PyModule_AddFunctions(module, variable_functions) < 0) {
    PADDLE_THROW(platform::errors::Fatal(
        "Init Paddle erroe in BindFunctions(PyModule_AddFunctions)."));
    return;
  }
}

}  // namespace pybind
}  // namespace paddle