eager.cc 55.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// disable numpy compile error
12 13
#include "paddle/fluid/pybind/eager.h"

14
#include <Python.h>
15 16 17 18
// Avoid a problem with copysign defined in pyconfig.h on Windows.
#ifdef copysign
#undef copysign
#endif
19 20 21 22

#include <string>
#include <vector>

23
#include "paddle/fluid/eager/accumulation/accumulation_node.h"
24 25 26
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/utils.h"
27
#include "paddle/fluid/framework/convert_utils.h"
28 29 30 31
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/eager_utils.h"
32 33 34
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
35
#include "pybind11/detail/internals.h"
36 37
#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"
38
#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
39
#include "paddle/fluid/framework/phi_utils.h"
40
#include "paddle/fluid/framework/python_headers.h"
41
#include "paddle/fluid/pybind/exception.h"
42
#include "paddle/fluid/pybind/tensor_py.h"
L
LiYuRio 已提交
43 44
#include "paddle/phi/core/distributed/auto_parallel/dist_attr.h"
#include "paddle/phi/core/distributed/auto_parallel/dist_tensor.h"
45
#include "paddle/phi/core/string_tensor.h"
46
using phi::distributed::DistTensor;
47
using phi::distributed::TensorDistAttr;
L
LiYuRio 已提交
48

49 50 51 52 53
namespace paddle {
namespace pybind {

namespace py = ::pybind11;

54 55
extern PyTypeObject* p_tensor_type;
extern PyTypeObject* p_string_tensor_type;  // For StringTensor
56
extern PyTypeObject* g_vartype_pytype;
57
extern PyTypeObject* g_framework_tensor_pytype;
58

59
PyObject* TensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) {
60 61
  PyObject* obj = type->tp_alloc(type, 0);
  if (obj) {
62
    auto v = reinterpret_cast<TensorObject*>(obj);
63
    new (&(v->tensor)) paddle::Tensor();
64 65 66 67
  }
  return obj;
}

68
// TODO(jiabin): Overload this once we need more constructor in Python
69 70
void EmptyTensorInitializer(TensorObject* self,
                            const std::string& name,
71
                            const paddle::platform::Place& place,
72 73
                            bool persistable = false,
                            int stop_gradient = -1,
74 75
                            framework::proto::VarType::Type dtype =
                                paddle::framework::proto::VarType::FP32,
76
                            const std::vector<int>& dims = {0},
77 78
                            framework::proto::VarType::Type var_type =
                                paddle::framework::proto::VarType::LOD_TENSOR) {
79
  auto ddims = phi::make_ddim(dims);
80 81
  self->tensor.set_name(name);
  auto autograd_meta = egr::EagerUtils::autograd_meta(&(self->tensor));
82
  autograd_meta->SetPersistable(persistable);
83 84 85
  if (stop_gradient != -1) {
    autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
  }
86 87
  if (var_type == paddle::framework::proto::VarType::LOD_TENSOR) {
    // TODO(jiabin): Maybe support LOD later
88
    std::shared_ptr<phi::DenseTensor> dense_tensor = nullptr;
89
    if (dims.size() == 1 && dims[0] == 0) {
90 91 92 93 94 95 96 97
      std::shared_ptr<phi::Allocation> allocation_ptr = nullptr;
      dense_tensor = std::make_shared<phi::DenseTensor>(
          allocation_ptr,
          phi::DenseTensorMeta(paddle::framework::TransToPhiDataType(dtype),
                               ddims));
    } else {
      // TODO(dev): we need enhance check for ddims.
      dense_tensor = std::make_shared<phi::DenseTensor>(
Z
zyfncg 已提交
98
          std::make_shared<phi::Allocation>(),
99 100 101
          phi::DenseTensorMeta(paddle::framework::TransToPhiDataType(dtype),
                               ddims));
    }
102
    self->tensor.set_impl(dense_tensor);
103 104 105 106
  } else if (var_type == paddle::framework::proto::VarType::SELECTED_ROWS) {
    std::shared_ptr<phi::SelectedRows> tensor =
        std::make_shared<phi::SelectedRows>();
    self->tensor.set_impl(tensor);
107 108 109
  }

  if (!autograd_meta->GetMutableGradNode()) {
110 111
    autograd_meta->SetGradNode(
        std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
112 113 114
    VLOG(3) << "Tensor(" << name
            << ") have not GradNode, add GradNodeAccumulation"
            << autograd_meta->GradNode() << " for it.";
115 116 117
  }
}

118 119
void EmptyStringTensorInitializer(TensorObject* self,
                                  const std::string& name,
J
Jack Zhou 已提交
120 121 122 123 124 125 126
                                  const paddle::platform::Place& place,
                                  const std::vector<int>& dims = {}) {
  auto ddims = phi::make_ddim(dims);
  self->tensor.set_name(name);
  // Note(zhoushunjie): Only support CPUPlace when create StringTensor
  auto actual_place = platform::CPUPlace();
  // Allocate memory
127
  paddle::experimental::DefaultAllocator string_allocator(actual_place);
J
Jack Zhou 已提交
128
  std::shared_ptr<phi::StringTensor> string_tensor =
129 130
      std::make_shared<phi::StringTensor>(&string_allocator,
                                          phi::StringTensorMeta{ddims});
J
Jack Zhou 已提交
131 132 133 134 135 136
  if (phi::product(ddims) > 0) {
    string_tensor->mutable_data(actual_place);
  }
  self->tensor.set_impl(string_tensor);
}

137 138 139 140 141 142 143 144 145 146 147
void CreateDistTensorWithNumpyValue(TensorObject* self,
                                    const std::string& name,
                                    const paddle::platform::Place& place,
                                    const TensorDistAttr& dist_attr,
                                    const py::object& array,
                                    bool persistable = false,
                                    int stop_gradient = -1,
                                    bool zero_copy = false,
                                    framework::proto::VarType::Type dtype =
                                        paddle::framework::proto::VarType::FP32,
                                    const std::vector<int>& dims = {0}) {
148
#ifdef PADDLE_WITH_DISTRIBUTE
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
  auto ddims = phi::make_ddim(dims);
  self->tensor.set_name(name);
  auto autograd_meta = egr::EagerUtils::autograd_meta(&(self->tensor));
  autograd_meta->SetPersistable(persistable);
  if (stop_gradient != -1) {
    autograd_meta->SetStopGradient(static_cast<bool>(stop_gradient));
  }

  phi::DenseTensor dense_tensor;
  if (dims.size() == 1 && dims[0] == 0) {
    std::shared_ptr<phi::Allocation> allocation_ptr = nullptr;
    dense_tensor = phi::DenseTensor(
        nullptr,
        phi::DenseTensorMeta(paddle::framework::TransToPhiDataType(dtype),
                             ddims));
  } else {
    dense_tensor = phi::DenseTensor(
        std::make_shared<phi::Allocation>(),
        phi::DenseTensorMeta(paddle::framework::TransToPhiDataType(dtype),
                             ddims));
  }
L
LiYuRio 已提交
170 171

  if (platform::is_cpu_place(place)) {
172 173
    SetTensorFromPyArray<platform::CPUPlace>(
        &dense_tensor, array, place, zero_copy);
L
LiYuRio 已提交
174
  } else if (platform::is_xpu_place(place)) {
175 176
    SetTensorFromPyArray<platform::XPUPlace>(
        &dense_tensor, array, place, zero_copy);
L
LiYuRio 已提交
177 178
  } else if (platform::is_gpu_place(place)) {
    SetTensorFromPyArray<platform::CUDAPlace>(
179
        &dense_tensor, array, place, zero_copy);
L
LiYuRio 已提交
180 181
  } else if (platform::is_cuda_pinned_place(place)) {
    SetTensorFromPyArray<platform::CUDAPinnedPlace>(
182
        &dense_tensor, array, place, zero_copy);
L
LiYuRio 已提交
183 184
  } else if (platform::is_custom_place(place)) {
    SetTensorFromPyArray<platform::CustomPlace>(
185
        &dense_tensor, array, place, zero_copy);
L
LiYuRio 已提交
186 187 188 189 190 191
  } else {
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Place should be one of "
        "CPUPlace/XPUPlace/CUDAPlace/CUDAPinnedPlace/CustomPlace"));
  }

192 193 194 195 196 197 198 199 200 201 202
  auto dist_tensor =
      std::make_shared<phi::distributed::DistTensor>(dense_tensor, dist_attr);
  self->tensor.set_impl(dist_tensor);

  if (!autograd_meta->GetMutableGradNode()) {
    autograd_meta->SetGradNode(
        std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
    VLOG(3) << "Tensor(" << name
            << ") have not GradNode, add GradNodeAccumulation"
            << autograd_meta->GradNode() << " for it.";
  }
203 204 205 206 207
#else
  PADDLE_THROW(platform::errors::Unavailable(
      "The numpy value-based initialization of (Dist)Tensor is not supported "
      "in the current PaddlePaddle, please recompile and install PaddlePaddle "
      "with the option of `WITH_DISTRIBUTE=ON`."));
L
LiYuRio 已提交
208
#endif
209
}
L
LiYuRio 已提交
210

211 212
void InitTensorWithNumpyValue(TensorObject* self,
                              const py::object& array,
213
                              const paddle::platform::Place& place,
214
                              bool zero_copy = false) {
215
  PADDLE_ENFORCE_EQ(
216 217
      self->tensor.defined(),
      true,
218
      paddle::platform::errors::Unavailable(
219 220
          "Calling InitTensorWithNumpyValue of Eager Tensor without "
          "EmptyTensorInitializer is "
221 222
          "forbidden. Please check your code and make sure you new a "
          "eager tensor before init it with NumPy."));
223 224
  phi::DenseTensor* impl_ptr =
      static_cast<phi::DenseTensor*>(self->tensor.impl().get());
L
LiYuRio 已提交
225

226
  if (platform::is_cpu_place(place)) {
227
    SetTensorFromPyArray<platform::CPUPlace>(impl_ptr, array, place, zero_copy);
228
  } else if (platform::is_xpu_place(place)) {
229
    SetTensorFromPyArray<platform::XPUPlace>(impl_ptr, array, place, zero_copy);
230
  } else if (platform::is_gpu_place(place)) {
231 232
    SetTensorFromPyArray<platform::CUDAPlace>(
        impl_ptr, array, place, zero_copy);
233
  } else if (platform::is_cuda_pinned_place(place)) {
234 235
    SetTensorFromPyArray<platform::CUDAPinnedPlace>(
        impl_ptr, array, place, zero_copy);
236
  } else if (platform::is_custom_place(place)) {
237 238
    SetTensorFromPyArray<platform::CustomPlace>(
        impl_ptr, array, place, zero_copy);
239 240 241
  } else {
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Place should be one of "
张春乔 已提交
242
        "CPUPlace/XPUPlace/CUDAPlace/CUDAPinnedPlace/CustomPlace"));
243 244 245
  }
}

J
Jack Zhou 已提交
246 247
void InitStringTensorWithNumpyValue(TensorObject* self, const py::object& obj) {
  PADDLE_ENFORCE_EQ(
248 249
      self->tensor.defined(),
      true,
J
Jack Zhou 已提交
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
      paddle::platform::errors::Fatal(
          "Calling InitStringTensorWithNumpyValue of Eager StringTensor "
          "without "
          "EmptyStringTensorInitializer is "
          "forbidden. Please check your code and make sure you new a "
          "eager tensor before init it with NumPy."));
  phi::StringTensor* impl_ptr =
      static_cast<phi::StringTensor*>(self->tensor.impl().get());
  paddle::platform::Place place = impl_ptr->place();
  auto array = obj.cast<py::array>();
  if (platform::is_cpu_place(place)) {
    SetStringTensorFromPyArray<platform::CPUPlace>(impl_ptr, array, place);
  } else {
    PADDLE_THROW(platform::errors::InvalidArgument(
        "StringTensor only support CPUPlace now, but receive %s",
        place.DebugString()));
  }
}

269 270 271 272 273
void InitDistTensorWithTensor(TensorObject* self,
                              const paddle::Tensor& src,
                              const paddle::platform::Place& place,
                              const std::string& name,
                              const TensorDistAttr& dist_attr) {
274
#ifdef PADDLE_WITH_DISTRIBUTE
L
LiYuRio 已提交
275 276 277 278 279 280 281
  PADDLE_ENFORCE(src.is_dense_tensor(),
                 paddle::platform::errors::InvalidArgument(
                     "DistTensor can only initialize by DenseTensor"));
  self->tensor.set_name(name);
  if (place == src.place()) {
    std::shared_ptr<phi::DenseTensor> tensor =
        std::static_pointer_cast<phi::DenseTensor>(src.impl());
282
    self->tensor.set_impl(std::make_shared<DistTensor>(*tensor, dist_attr));
283
    VLOG(4) << "Same place, do ShareDataWith for DistTensor.";
L
LiYuRio 已提交
284 285 286 287
  } else {
    std::shared_ptr<phi::DenseTensor> tensor =
        std::static_pointer_cast<phi::DenseTensor>(
            src.copy_to(place, true).impl());
288
    self->tensor.set_impl(std::make_shared<DistTensor>(*tensor, dist_attr));
289
    VLOG(4) << "Different place, do TensorCopy for DistTensor.";
L
LiYuRio 已提交
290 291 292 293 294 295 296 297
  }
  if (src.get_autograd_meta()) {
    egr::EagerUtils::autograd_meta(&(self->tensor))
        ->SetPersistable(
            egr::EagerUtils::unsafe_autograd_meta(src)->Persistable());
  } else {
    egr::EagerUtils::autograd_meta(&(self->tensor))->SetPersistable(false);
  }
298 299 300 301 302
#else
  PADDLE_THROW(platform::errors::Unavailable(
      "The tensor-based initialization of (Dist)Tensor is not supported "
      "in the current PaddlePaddle, please recompile and install PaddlePaddle "
      "with the option of `WITH_DISTRIBUTE=ON`."));
L
LiYuRio 已提交
303
#endif
304
}
L
LiYuRio 已提交
305

306
void InitTensorWithTensor(TensorObject* self,
307
                          const paddle::Tensor& src,
308 309
                          const paddle::platform::Place& place,
                          const std::string& name) {
310
  self->tensor.set_name(name);
C
Chen Weihang 已提交
311
  if (place == src.place()) {
312
    self->tensor.set_impl(src.impl());
313 314
    VLOG(4) << "Same place, do ShareDataWith";
  } else {
315
    self->tensor.set_impl(src.copy_to(place, true).impl());
316 317 318
    VLOG(4) << "Different place, do TensorCopy";
  }
  if (src.get_autograd_meta()) {
319
    egr::EagerUtils::autograd_meta(&(self->tensor))
320 321 322
        ->SetPersistable(
            egr::EagerUtils::unsafe_autograd_meta(src)->Persistable());
  } else {
323
    egr::EagerUtils::autograd_meta(&(self->tensor))->SetPersistable(false);
324 325 326
  }
}

327
void InitTensorWithFrameworkTensor(TensorObject* self,
328
                                   const phi::DenseTensor& src,
329 330
                                   const paddle::platform::Place& place,
                                   const std::string& name) {
331
  self->tensor.set_name(name);
332
  if (place == src.place()) {
333
    self->tensor.set_impl(std::make_shared<phi::DenseTensor>(src));
334 335
    VLOG(4) << "Same place, do ShareDataWith";
  } else {
336
    auto temp = paddle::Tensor(std::make_shared<phi::DenseTensor>(src));
337
    self->tensor.set_impl(temp.copy_to(place, true).impl());
338 339
    VLOG(4) << "Different place, do TensorCopy";
  }
340
  egr::EagerUtils::autograd_meta(&(self->tensor))->SetPersistable(false);
341
}
342

J
Jack Zhou 已提交
343
void InitStringTensorWithStringTensor(TensorObject* self,
344
                                      const paddle::Tensor& src,
J
Jack Zhou 已提交
345 346 347 348 349 350 351 352 353
                                      const paddle::platform::Place& place,
                                      const std::string& name) {
  self->tensor.set_name(name);
  auto impl = std::static_pointer_cast<phi::StringTensor>(src.impl());
  self->tensor.set_impl(impl);
  VLOG(4)
      << "Do ShareDataWith when using StringTensor to initialize StringTensor";
}

354 355
py::object ParsePyArray(
    std::unordered_map<std::string, PyObject*> kws_map,
356 357 358 359
    std::unordered_map<std::string, Py_ssize_t> kw_order_map,
    PyObject* args,
    bool flag_kwargs,
    Py_ssize_t args_num) {
360 361 362 363 364 365
  py::object numpy_value = py::object();

  if (kw_order_map["value"] <= args_num) {
    numpy_value = py::object(
        py::handle(PyTuple_GET_ITEM(args, kw_order_map["value"] - 1)), true);
  } else {
366
    if (flag_kwargs && kws_map["value"] != nullptr) {
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
      numpy_value = py::object(py::handle(kws_map["value"]), true);
    } else {
      PADDLE_THROW(platform::errors::InvalidArgument(
          "The first expected arguments is {value: PyArray}, "
          "but could not parse the first argument {value: PyArray} "
          "successfully. "
          "Please check your input first and make sure you are on the right "
          "way."));
    }
  }
  return numpy_value;
}

paddle::platform::Place ParsePlace(
    std::unordered_map<std::string, PyObject*> kws_map,
382 383 384 385
    std::unordered_map<std::string, Py_ssize_t> kw_order_map,
    PyObject* args,
    bool flag_kwargs,
    Py_ssize_t args_num) {
386 387 388 389 390 391 392
  paddle::platform::Place place =
      egr::Controller::Instance().GetExpectedPlace();

  if (kw_order_map["place"] <= args_num) {
    place = CastPyArg2Place(PyTuple_GET_ITEM(args, kw_order_map["place"] - 1),
                            kw_order_map["place"] - 1);
  } else {
393
    if (flag_kwargs && kws_map["place"] != nullptr) {
394 395 396 397 398 399 400 401 402
      place = CastPyArg2Place(kws_map["place"], 0);
    } else {
      // default
      return place;
    }
  }
  return place;
}

403
TensorDistAttr ParseDistAttrArgs(
L
LiYuRio 已提交
404 405 406 407 408
    std::unordered_map<std::string, PyObject*> kws_map,
    std::unordered_map<std::string, Py_ssize_t> kw_order_map,
    PyObject* args,
    bool flag_kwargs,
    Py_ssize_t args_num) {
409
  TensorDistAttr dist_attr;
L
LiYuRio 已提交
410 411 412 413
  if (kw_order_map["dist_attr"] <= args_num) {
    dist_attr = CastPyArg2DistAttr(
        PyTuple_GET_ITEM(args, kw_order_map["dist_attr"] - 1),
        kw_order_map["dist_attr"] - 1);
414
  } else if (flag_kwargs && kws_map["dist_attr"] != nullptr) {
L
LiYuRio 已提交
415 416 417 418 419
    dist_attr = CastPyArg2DistAttr(kws_map["dist_attr"], 0);
  }
  return dist_attr;
}

420
// boolean arguments: zero_copy, stop_gradient, persistable
421 422 423
int ParseBooleanArgs(std::string key,
                     std::unordered_map<std::string, PyObject*> kws_map,
                     std::unordered_map<std::string, Py_ssize_t> kw_order_map,
424 425 426
                     PyObject* args,
                     bool flag_kwargs,
                     Py_ssize_t args_num) {
427
  int res = -1;
428 429

  if (kw_order_map[key] <= args_num) {
430 431
    res = static_cast<int>(CastPyArg2AttrBoolean(
        PyTuple_GET_ITEM(args, kw_order_map[key] - 1), kw_order_map[key] - 1));
432
  } else {
433
    if (flag_kwargs && kws_map[key] != nullptr) {
434
      res = static_cast<int>(CastPyArg2AttrBoolean(kws_map[key], 0));
435 436 437 438 439 440 441
    }
  }
  return res;
}

std::string ParseName(std::unordered_map<std::string, PyObject*> kws_map,
                      std::unordered_map<std::string, Py_ssize_t> kw_order_map,
442 443 444
                      PyObject* args,
                      bool flag_kwargs,
                      Py_ssize_t args_num,
J
Jack Zhou 已提交
445
                      std::string unique_name_prefix = "generated_tensor") {
446 447 448 449 450
  std::string act_name = "";
  if (kw_order_map["name"] <= args_num) {
    PyObject* name_obj = PyTuple_GET_ITEM(args, kw_order_map["name"] - 1);
    if (name_obj == Py_None) {
      act_name =
J
Jack Zhou 已提交
451
          egr::Controller::Instance().GenerateUniqueName(unique_name_prefix);
452 453 454 455 456
    } else {
      act_name = CastPyArg2AttrString(name_obj, kw_order_map["name"] - 1);
    }
  } else {
    if (flag_kwargs) {
457
      if ((kws_map["name"] == nullptr) || (kws_map["name"] == Py_None)) {
458
        act_name =
J
Jack Zhou 已提交
459
            egr::Controller::Instance().GenerateUniqueName(unique_name_prefix);
460 461 462 463 464
      } else {
        act_name = CastPyArg2AttrString(kws_map["name"], 0);
      }
    } else {
      act_name =
J
Jack Zhou 已提交
465
          egr::Controller::Instance().GenerateUniqueName(unique_name_prefix);
466 467 468 469 470
    }
  }
  return act_name;
}

471
// initialize Tensor by PyArray(first argument is PyArray,
472
// mix args and kwargs) automatically.
473 474
void AutoInitTensorByPyArray(TensorObject* py_tensor_ptr,
                             std::unordered_map<std::string, PyObject*> kws_map,
475 476
                             PyObject* args,
                             bool flag_kwargs,
477 478 479
                             Py_ssize_t args_num) {
  // The first argument of the Tensor constructor is PyArray,
  // there are 6 arguments to construct the new Tensor,
480 481 482 483
  // kw_order_map's key is every arguments of the constructor,
  // kw_order_map's value is the position of the arguments respectively.
  // If u want to update this constructor with new arguments,
  // need to update this map and to add or change related code.
L
LiYuRio 已提交
484 485 486 487 488 489 490
  std::unordered_map<std::string, Py_ssize_t> kw_order_map{{"value", 1},
                                                           {"place", 2},
                                                           {"persistable", 3},
                                                           {"zero_copy", 4},
                                                           {"name", 5},
                                                           {"stop_gradient", 6},
                                                           {"dist_attr", 7}};
491 492 493 494 495 496 497

  py::object numpy_value = py::object();
  paddle::platform::Place place =
      egr::Controller::Instance().GetExpectedPlace();
  bool persistable = false;
  bool zero_copy = false;
  std::string act_name = "";
498
  int stop_gradient = -1;
499 500 501 502

  numpy_value =
      ParsePyArray(kws_map, kw_order_map, args, flag_kwargs, args_num);
  place = ParsePlace(kws_map, kw_order_map, args, flag_kwargs, args_num);
503 504 505 506 507 508 509 510
  persistable =
      (1 ==
       ParseBooleanArgs(
           "persistable", kws_map, kw_order_map, args, flag_kwargs, args_num));
  zero_copy =
      (1 ==
       ParseBooleanArgs(
           "zero_copy", kws_map, kw_order_map, args, flag_kwargs, args_num));
511
  act_name = ParseName(kws_map, kw_order_map, args, flag_kwargs, args_num);
512 513
  stop_gradient = ParseBooleanArgs(
      "stop_gradient", kws_map, kw_order_map, args, flag_kwargs, args_num);
514

515
  TensorDistAttr dist_attr =
L
LiYuRio 已提交
516 517
      ParseDistAttrArgs(kws_map, kw_order_map, args, flag_kwargs, args_num);

518 519 520 521 522 523 524 525 526
  if (!dist_attr.empty()) {
    CreateDistTensorWithNumpyValue(py_tensor_ptr,
                                   act_name,
                                   place,
                                   dist_attr,
                                   numpy_value,
                                   persistable,
                                   stop_gradient,
                                   zero_copy);
L
LiYuRio 已提交
527 528 529
    return;
  }

530 531
  EmptyTensorInitializer(
      py_tensor_ptr, act_name, place, persistable, stop_gradient);
532
  InitTensorWithNumpyValue(py_tensor_ptr, numpy_value, place, zero_copy);
533 534
}

535
// initialize Tensor by Tensor or phi::DenseTensor (mix args and
536
// kwargs) automatically.
537 538
void AutoInitTensorByTensor(TensorObject* py_tensor_ptr,
                            std::unordered_map<std::string, PyObject*> kws_map,
539 540
                            PyObject* args,
                            bool flag_kwargs,
541 542 543
                            Py_ssize_t args_num,
                            bool init_by_egr_tensor = true) {
  // The first argument of the Tensor constructor is Tensor or
544
  // framework Tensor,
545
  // there are 3 arguments to construct the new Tensor,
546 547 548 549 550
  // kw_order_map's key is every arguments of the constructor,
  // kw_order_map's value is the position of the arguments respectively.
  // If u want to update this constructor with new arguments,
  // need to update this map and to add or change related code.
  std::unordered_map<std::string, Py_ssize_t> kw_order_map{
L
LiYuRio 已提交
551
      {"value", 1}, {"place", 2}, {"name", 3}, {"dist_attr", 4}};
552 553 554 555 556 557 558 559

  paddle::platform::Place place =
      egr::Controller::Instance().GetExpectedPlace();
  std::string act_name = "";

  place = ParsePlace(kws_map, kw_order_map, args, flag_kwargs, args_num);
  act_name = ParseName(kws_map, kw_order_map, args, flag_kwargs, args_num);

560
  TensorDistAttr dist_attr =
L
LiYuRio 已提交
561 562
      ParseDistAttrArgs(kws_map, kw_order_map, args, flag_kwargs, args_num);

563
  if (init_by_egr_tensor) {
564
    paddle::Tensor src_tensor;
565
    if (kw_order_map["value"] <= args_num) {
566 567 568
      src_tensor =
          CastPyArg2Tensor(PyTuple_GET_ITEM(args, kw_order_map["value"] - 1),
                           kw_order_map["value"] - 1);
569
    } else {
570
      if (flag_kwargs && kws_map["value"] != nullptr) {
571
        src_tensor = CastPyArg2Tensor(kws_map["value"], 0);
572 573
      } else {
        PADDLE_THROW(platform::errors::InvalidArgument(
574 575
            "The first expected kwargs is {value: Tensor}, "
            "but could not parse the first argument {value: Tensor} "
576 577 578 579 580
            "successfully. "
            "Please check your input first and make sure you are on the right "
            "way."));
      }
    }
581

582
    if (!dist_attr.empty()) {
L
LiYuRio 已提交
583 584 585 586 587
      InitDistTensorWithTensor(
          py_tensor_ptr, src_tensor, place, act_name, dist_attr);
    } else {
      InitTensorWithTensor(py_tensor_ptr, src_tensor, place, act_name);
    }
588 589
  } else {
    // init by framework tensor
590
    phi::DenseTensor src_tensor;
591 592 593 594 595
    if (kw_order_map["value"] <= args_num) {
      src_tensor = CastPyArg2FrameworkTensor(
          PyTuple_GET_ITEM(args, kw_order_map["value"] - 1),
          kw_order_map["value"] - 1);
    } else {
596
      if (flag_kwargs && kws_map["value"] != nullptr) {
597 598 599
        src_tensor = CastPyArg2FrameworkTensor(kws_map["value"], 0);
      } else {
        PADDLE_THROW(platform::errors::InvalidArgument(
600 601
            "The first expected arguments is {value: phi::DenseTensor}, "
            "but could not parse the first argument {value: phi::DenseTensor} "
602 603 604 605 606
            "successfully. "
            "Please check your input first and make sure you are on the right "
            "way."));
      }
    }
607
    InitTensorWithFrameworkTensor(py_tensor_ptr, src_tensor, place, act_name);
608 609 610
  }
}

J
Jack Zhou 已提交
611 612
void AutoInitStringTensorByPyArray(
    TensorObject* py_tensor_ptr,
613 614 615 616
    std::unordered_map<std::string, PyObject*> kws_map,
    PyObject* args,
    bool flag_kwargs,
    Py_ssize_t args_num) {
J
Jack Zhou 已提交
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
  // The first argument of the StringTensor constructor is PyArray,
  // there are 4 arguments to construct the new StringTensor,
  // kw_order_map's key is every arguments of the constructor,
  // kw_order_map's value is the position of the arguments respectively.
  // If u want to update this constructor with new arguments,
  // need to update this map and to add or change related code.
  std::unordered_map<std::string, Py_ssize_t> kw_order_map{{"value", 1},
                                                           {"name", 2}};
  py::object numpy_value = py::object();
  paddle::platform::Place place =
      egr::Controller::Instance().GetExpectedPlace();
  std::string act_name = "";

  numpy_value =
      ParsePyArray(kws_map, kw_order_map, args, flag_kwargs, args_num);
632 633 634 635 636
  act_name = ParseName(kws_map,
                       kw_order_map,
                       args,
                       flag_kwargs,
                       args_num,
J
Jack Zhou 已提交
637 638 639 640 641 642 643
                       "generated_string_tensor");
  EmptyStringTensorInitializer(py_tensor_ptr, act_name, place);
  InitStringTensorWithNumpyValue(py_tensor_ptr, numpy_value);
}

void AutoInitStringTensorByStringTensor(
    TensorObject* py_tensor_ptr,
644 645 646 647
    std::unordered_map<std::string, PyObject*> kws_map,
    PyObject* args,
    bool flag_kwargs,
    Py_ssize_t args_num) {
J
Jack Zhou 已提交
648 649 650 651 652 653 654 655 656 657 658 659 660
  // The first argument of the Tensor constructor is StringTensor,
  // there are 3 arguments to construct the new StringTensor,
  // kw_order_map's key is every arguments of the constructor,
  // kw_order_map's value is the position of the arguments respectively.
  // If u want to update this constructor with new arguments,
  // need to update this map and to add or change related code.
  std::unordered_map<std::string, Py_ssize_t> kw_order_map{{"value", 1},
                                                           {"name", 2}};

  paddle::platform::Place place =
      egr::Controller::Instance().GetExpectedPlace();
  std::string act_name = "";

661 662 663 664 665
  act_name = ParseName(kws_map,
                       kw_order_map,
                       args,
                       flag_kwargs,
                       args_num,
J
Jack Zhou 已提交
666
                       "generated_string_tensor");
667
  paddle::Tensor src_tensor;
J
Jack Zhou 已提交
668 669 670 671 672
  if (kw_order_map["value"] <= args_num) {
    src_tensor =
        CastPyArg2Tensor(PyTuple_GET_ITEM(args, kw_order_map["value"] - 1),
                         kw_order_map["value"] - 1);
  } else {
673
    if (flag_kwargs && kws_map["value"] != nullptr) {
J
Jack Zhou 已提交
674 675 676 677 678 679 680 681 682 683 684 685 686
      src_tensor = CastPyArg2Tensor(kws_map["value"], 0);
    } else {
      PADDLE_THROW(platform::errors::InvalidArgument(
          "The first expected kwargs is {value: Tensor}, "
          "but could not parse the first argument {value: Tensor} "
          "successfully. "
          "Please check your input first and make sure you are on the right "
          "way."));
    }
  }
  InitStringTensorWithStringTensor(py_tensor_ptr, src_tensor, place, act_name);
}

687
PyDoc_STRVAR(  // NOLINT
W
wanghuancoder 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700
    TensorDoc,
    R"DOC(Tensor($self, /, value, place, persistable, zero_copy, name, stop_gradient, dims, dtype, type)
--

Tensor is the basic data structure in PaddlePaddle. There are some ways to create a Tensor:

- Use the exsiting ``data`` to create a Tensor, please refer to :ref:`api_paddle_to_tensor`.
- Create a Tensor with a specified ``shape``, please refer to :ref:`api_paddle_ones`,
  :ref:`api_paddle_zeros`, :ref:`api_paddle_full`.
- Create a Tensor with the same ``shape`` and ``dtype`` as other Tensor, please refer to
  :ref:`api_paddle_ones_like`, :ref:`api_paddle_zeros_like`, :ref:`api_paddle_full_like`.
)DOC");

701
/** We should have init function with signature:
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
 * 1.
 * def __init__ ()
 * 2.
 * def __init__ (
 * ** dtype: paddle::framework::proto::VarType::Type,
 * ** dims: vector<int>,
 * ** name: std::string,
 * ** type: paddle::framework::proto::VarType::LodTensor,
 * ** persistable: bool)
 * 3. (multi-place)
 * (should have at least one parameter, one parameter equals to case 4, zero
 * parameter equals to case 1)
 * def __init__ (
 * ** value: ndarray,
 * ** place: paddle::platform::Place,
 * ** persistable: bool,
 * ** zero_copy: bool,
 * ** name: std::string,
L
LiYuRio 已提交
720
 * ** stop_gradient: bool,
721
 * ** dist_attr: phi::distributed::TensorDistAttr)
722 723 724 725 726 727 728 729 730 731 732 733
 * 4.
 * def __init__ (
 * ** value: ndarray)
 * 5.
 * def __init__ (
 * ** tensor: Tensor)
 * 6. (multi-place)
 * (should have at least one parameter, one parameter equals to case 5, zero
 * parameter equals to case 1.)
 * def __init__ (
 * ** tensor: Tensor,
 * ** place: paddle::platform::Place,
L
LiYuRio 已提交
734
 * ** name: std::string,
735
 * ** dist_attr: phi::distributed::TensorDistAttr)
736 737 738 739 740 741 742
 * 7. (multi-place) (should have at least one parameter, one parameter similar
 * to case 5, zero parameter equals to case 1.)
 * def __init__ (
 * ** tensor: FrameworkTensor,
 * ** place: paddle::platform::Place,
 * ** name: std::string)
 *  **/
743
int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
0
0x45f 已提交
744
  EAGER_TRY
745 746 747 748 749
  // set a flag to record use kwargs or not
  bool flag_kwargs = false;
  if (kwargs) flag_kwargs = true;

  // all kwargs
750 751 752 753 754 755 756 757 758 759 760
  PyObject* kw_zero_copy = nullptr;
  PyObject* kw_persistable = nullptr;
  PyObject* kw_stop_gradient = nullptr;

  PyObject* kw_value = nullptr;  // receive PyArray or Tensor
  PyObject* kw_place = nullptr;
  PyObject* kw_name = nullptr;
  PyObject* kw_dims = nullptr;
  PyObject* kw_dtype = nullptr;
  PyObject* kw_type = nullptr;
  PyObject* kw_dist_attr = nullptr;
761 762

  // the keywords argument
763
  static char* kwlist[] = {const_cast<char*>("value"),  // NOLINT
764 765 766 767 768 769 770 771
                           const_cast<char*>("place"),
                           const_cast<char*>("persistable"),
                           const_cast<char*>("zero_copy"),
                           const_cast<char*>("name"),
                           const_cast<char*>("stop_gradient"),
                           const_cast<char*>("dims"),
                           const_cast<char*>("dtype"),
                           const_cast<char*>("type"),
L
LiYuRio 已提交
772
                           const_cast<char*>("dist_attr"),
773
                           nullptr};
774 775 776 777 778 779 780

  // 'O' Store a Python object (without any conversion) in a C object pointer,
  // '|' Indicates that the remaining arguments in the Python argument list are
  // optional.
  // PyArg_ParseTupleAndKeywords can Parse the parameters of a function that
  // takes both positional and keyword parameters into local variables,
  // which enhance case2, case3, case4, case5, case6, case7.
781 782
  bool flag_ = PyArg_ParseTupleAndKeywords(args,
                                           kwargs,
L
LiYuRio 已提交
783
                                           "|OOOOOOOOOO",
784 785 786 787 788 789 790 791 792
                                           kwlist,
                                           &kw_value,
                                           &kw_place,
                                           &kw_persistable,
                                           &kw_zero_copy,
                                           &kw_name,
                                           &kw_stop_gradient,
                                           &kw_dims,
                                           &kw_dtype,
L
LiYuRio 已提交
793 794
                                           &kw_type,
                                           &kw_dist_attr);
795 796 797 798 799 800 801 802 803 804 805

  // helper map
  std::unordered_map<std::string, PyObject*> kws_map{
      {"value", kw_value},
      {"place", kw_place},
      {"persistable", kw_persistable},
      {"zero_copy", kw_zero_copy},
      {"name", kw_name},
      {"stop_gradient", kw_stop_gradient},
      {"dims", kw_dims},
      {"dtype", kw_dtype},
L
LiYuRio 已提交
806 807
      {"type", kw_type},
      {"dist_attr", kw_dist_attr}};
808

809 810
  PADDLE_ENFORCE_EQ(flag_,
                    true,
811 812 813 814 815 816
                    paddle::platform::errors::PreconditionNotMet(
                        "Could not parse args and kwargs successfully, "
                        "please check your input first and make"
                        "sure you are on the right way. "
                        "The expected arguments as follow: ("
                        "value, place, persistable, zero_copy, "
L
LiYuRio 已提交
817
                        "name, stop_gradient, dims, dtype, type, dist_attr)"));
818

819
  PADDLE_ENFORCE_NOT_NULL(
820 821 822 823 824
      self,
      paddle::platform::errors::Fatal(
          "Calling __init__ of Eager Tensor without __new__ is "
          "forbidden. Please check your code and make sure you new a "
          "eager tensor before init it."));
825

826
  auto py_tensor_ptr = reinterpret_cast<TensorObject*>(self);
827 828

  Py_ssize_t args_num = PyTuple_Size(args);
829 830 831 832 833
  VLOG(6) << " args_num: " << args_num;

  // args_num = 0, means that there is no position arguments.
  if (args_num == (Py_ssize_t)0) {
    if (!flag_kwargs) {
834 835
      // case 1
      VLOG(6) << "Calling case1's initializer.";
836
      EmptyTensorInitializer(
837 838 839 840
          py_tensor_ptr,
          egr::Controller::Instance().GenerateUniqueName("generated_tensor"),
          egr::Controller::Instance().GetExpectedPlace());
      return 0;
841
    } else {  // no position args, all arguments are kwargs
842
      if (kw_value != nullptr) {
843 844
        if (pybind11::detail::npy_api::get().PyArray_Check_(kw_value)) {
          VLOG(6) << "Calling case3's or case4's initializer";
845 846
          AutoInitTensorByPyArray(
              py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
847
          return 0;
848
        } else if (PyObject_TypeCheck(kw_value, p_tensor_type)) {
849
          VLOG(6) << "Calling case5's or case6's initializer";
850 851
          AutoInitTensorByTensor(
              py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
852
          return 0;
853
        } else if (PyObject_TypeCheck(kw_value, g_framework_tensor_pytype)) {
854
          VLOG(6) << "Calling case7's initializer.";
855 856 857 858
          AutoInitTensorByTensor(py_tensor_ptr,
                                 kws_map,
                                 args,
                                 flag_kwargs,
859 860
                                 args_num,
                                 /* false means not init by egr tensor*/ false);
861
          return 0;
862
        } else {
863 864 865
          PADDLE_THROW(platform::errors::InvalidArgument(
              "Could not parse the first keyword argument successfully, "
              "the first keyword argument is value, but it should be PyArray "
866
              "or Tensor or phi::DenseTensor. "
867 868
              "Please check your input first and make sure you are on the "
              "right way."));
869
        }
870
      } else if (kw_dtype != nullptr &&
871
                 PyObject_TypeCheck(kw_dtype, g_vartype_pytype)) {
872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
        VLOG(6) << "Calling case2's initializer";

        PADDLE_ENFORCE_NOT_NULL(
            kw_dims,
            paddle::platform::errors::InvalidArgument(
                "Calling __init__ of Eager Tensor with NULL dims is "
                "forbidden. Please check your code and make sure you new a "
                "dims before calling this constructor."));

        PADDLE_ENFORCE_NOT_NULL(
            kw_name,
            paddle::platform::errors::InvalidArgument(
                "Calling __init__ of Eager Tensor with NULL name is "
                "forbidden. Please check your code and make sure you new a "
                "name before calling this constructor."));

        PADDLE_ENFORCE_NOT_NULL(
            kw_dtype,
            paddle::platform::errors::InvalidArgument(
                "Calling __init__ of Eager Tensor with NULL dtype is "
                "forbidden. Please check your code and make sure you new a "
                "dtype before calling this constructor."));

        PADDLE_ENFORCE_NOT_NULL(
            kw_persistable,
            paddle::platform::errors::InvalidArgument(
                "Calling __init__ of Eager Tensor with NULL persistable is "
                "forbidden. Please check your code and make sure you new a "
                "persistable before calling this constructor."));

        paddle::framework::proto::VarType::Type dtype =
            CastPyArg2ProtoType(kw_dtype, 0);
        std::vector<int> dims = CastPyArg2VectorOfInt(kw_dims, 0);

906
        std::string act_name = "";
907
        if (kw_name == Py_None) {
908 909 910
          act_name = egr::Controller::Instance().GenerateUniqueName(
              "generated_tensor");
        } else {
911
          act_name = CastPyArg2AttrString(kw_name, 0);
912
        }
913 914 915 916 917

        paddle::framework::proto::VarType::Type var_type =
            CastPyArg2ProtoType(kw_type, 0);
        bool persistable = CastPyArg2AttrBoolean(kw_persistable, 0);

918 919
        EmptyTensorInitializer(py_tensor_ptr,
                               act_name,
920 921
                               egr::Controller::Instance().GetExpectedPlace(),
                               persistable,
922 923 924 925
                               /* stop_gradient */ -1,
                               dtype,
                               dims,
                               var_type);
926

927
        return 0;
928 929
      } else {
        PADDLE_THROW(platform::errors::InvalidArgument(
930
            "We not only support construct Tensor from numpy value "
931
            "or tensor(Tensor or phi::DenseTensor) "
932
            "with python kwargs by this initializer, "
933
            "but also even support dtype to init a empty Tensor. "
934 935
            "Please check your input first and make sure you call the existed "
            "constructor."));
936
      }
937 938 939
    }
  } else if (args_num == (Py_ssize_t)1 || args_num == (Py_ssize_t)2 ||
             args_num == (Py_ssize_t)3) {
C
co63oc 已提交
940
    // 1 to 3 position args, remaining arguments are kwargs
941 942 943
    PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
    if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
      VLOG(6) << "Calling case3's or case4's initializer.";
944 945
      AutoInitTensorByPyArray(
          py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
946
      return 0;
947
    } else if (PyObject_TypeCheck(arg0_ptr, p_tensor_type)) {
948
      VLOG(6) << "Calling case5's or case6's initializer.";
949 950
      AutoInitTensorByTensor(
          py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
951
      return 0;
952
    } else if (PyObject_TypeCheck(arg0_ptr, g_framework_tensor_pytype)) {
953
      VLOG(6) << "Calling case7's initializer.";
954 955 956 957
      AutoInitTensorByTensor(py_tensor_ptr,
                             kws_map,
                             args,
                             flag_kwargs,
958 959
                             args_num,
                             /* false means not init by egr tensor*/ false);
960 961 962
      return 0;
    } else {
      PADDLE_THROW(platform::errors::InvalidArgument(
963
          "We support construct Tensor from numpy value "
964
          "or tensor(Tensor or phi::DenseTensor) "
965
          "with python args and kwargs by this initializer, "
966
          "but the first argument should be PyArray or Tensor or "
967
          "phi::DenseTensor. "
968 969
          "Please check your input first and make sure you call the existed "
          "constructor."));
970
    }
971
  } else if (args_num == (Py_ssize_t)4) {
C
co63oc 已提交
972
    // 4 position args, remaining arguments are kwargs
973 974 975
    PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
    if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
      VLOG(6) << "Calling case3's or case4's initializer.";
976 977
      AutoInitTensorByPyArray(
          py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
978
      return 0;
979 980 981
    } else {
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Incompatible constructor arguments, "
C
co63oc 已提交
982
          "there are 4 position args and remaining arguments arg kwargs,"
983 984 985
          "but the first position args should be PyArray. "
          "Please check your code and make sure the first position args is "
          "PyArray."));
986
    }
987 988
  } else if (args_num == (Py_ssize_t)5) {
    if (!flag_kwargs) {
989
      PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
990
      if (PyObject_TypeCheck(arg0_ptr, g_vartype_pytype)) {
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
        VLOG(6) << "Calling case2's initializer.";
        paddle::framework::proto::VarType::Type dtype =
            CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 0), 0);
        std::vector<int> dims =
            CastPyArg2VectorOfInt(PyTuple_GET_ITEM(args, 1), 1);
        std::string act_name = "";
        PyObject* name_obj = PyTuple_GET_ITEM(args, 2);
        if (name_obj == Py_None) {
          act_name = egr::Controller::Instance().GenerateUniqueName(
              "generated_tensor");
        } else {
          act_name = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 2), 2);
        }
        paddle::framework::proto::VarType::Type var_type =
            CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 3), 3);
        bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
1007 1008
        EmptyTensorInitializer(py_tensor_ptr,
                               act_name,
1009
                               egr::Controller::Instance().GetExpectedPlace(),
1010 1011 1012 1013 1014
                               persistable,
                               -1,
                               dtype,
                               dims,
                               var_type);
1015
        return 0;
1016 1017
      } else if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
        VLOG(6) << "Calling case3's initializer.";
1018 1019
        AutoInitTensorByPyArray(
            py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
1020 1021 1022
        return 0;
      } else {
        PADDLE_THROW(platform::errors::InvalidArgument(
1023 1024 1025 1026 1027
            "Incompatible constructor arguments, "
            "there are only 5 position args,"
            "but the first position args should be PyArray or dtype. "
            "Please check your code and make sure you call the existed "
            "constructor."));
1028
      }
C
co63oc 已提交
1029
    } else {  // five position args, remaining arguments are kwargs
1030
      PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
1031 1032
      if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
        VLOG(6) << "Calling case3's or case4's initializer";
1033 1034
        AutoInitTensorByPyArray(
            py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
1035
        return 0;
1036
      } else {
1037 1038
        PADDLE_THROW(platform::errors::InvalidArgument(
            "Incompatible constructor arguments, "
C
co63oc 已提交
1039
            "there are 5 position args and remaining arguments are kwargs,"
1040 1041 1042
            "but the first position args should be PyArray. "
            "Please check your code and make sure the first position args is "
            "PyArray."));
1043 1044
      }
    }
1045 1046 1047 1048
  } else if (args_num == (Py_ssize_t)6) {
    if (!flag_kwargs) {
      // case 3
      VLOG(6) << "Calling case3's initializer.";
1049 1050
      AutoInitTensorByPyArray(
          py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
1051
      return 0;
C
co63oc 已提交
1052
    } else {  // six position args, remaining arguments are kwargs, but this
1053 1054 1055
              // is not a right way
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Incompatible constructor arguments, "
C
co63oc 已提交
1056
          "there are 6 position args and the remaining arguments are kwargs. "
1057 1058
          "Please check your code and make sure the first position args is "
          "PyArray."));
1059
    }
1060 1061 1062 1063
  } else {
    PADDLE_THROW(platform::errors::Fatal(
        "Can't not find expected num of args, please check your call, and "
        "make sure u call the existed constructor."));
1064
  }
1065

0
0x45f 已提交
1066 1067
  return -1;
  EAGER_CATCH_AND_THROW_RETURN_NEG
1068 1069
}

J
Jack Zhou 已提交
1070
/** We should have init function with signature:
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
 * 1.
 * def __init__ ()
 *
 * 2.
 * def __init__ (
 * ** dims: vector<int>,
 * ** name: std::string)
 *
 * 3.
 * (should have at least one parameter, one parameter equals to case 4, zero
 * parameter equals to case 1)
 * def __init__ (
 * ** value: ndarray,
 * ** zero_copy: bool,
 * ** name: std::string)
 *
 * 4.
 * def __init__ (
 * ** value: ndarray)
 *
 * 5.
 * def __init__ (
 * ** tensor: Tensor)
 *
 * 6.
 * (should have at least one parameter, one parameter equals to case 5, zero
 * parameter equals to case 1.)
 * def __init__ (
 * ** tensor: Tensor,
 * ** name: std::string)
 * **/
J
Jack Zhou 已提交
1102 1103 1104 1105 1106 1107
int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
  // set a flag to record use kwargs or not
  bool flag_kwargs = false;
  if (kwargs) flag_kwargs = true;

  // all kwargs
1108
  PyObject* kw_zero_copy = nullptr;
J
Jack Zhou 已提交
1109

1110 1111 1112
  PyObject* kw_value = nullptr;  // receive PyArray or Tensor
  PyObject* kw_name = nullptr;
  PyObject* kw_dims = nullptr;
J
Jack Zhou 已提交
1113 1114

  // the keywords argument
1115
  static char* kwlist[] = {const_cast<char*>("value"),  // NOLINT
1116 1117 1118
                           const_cast<char*>("zero_copy"),
                           const_cast<char*>("name"),
                           const_cast<char*>("dims"),
1119
                           nullptr};
J
Jack Zhou 已提交
1120 1121 1122 1123 1124 1125
  // 'O' Store a Python object (without any conversion) in a C object pointer,
  // '|' Indicates that the remaining arguments in the Python argument list are
  // optional.
  // PyArg_ParseTupleAndKeywords can Parse the parameters of a function that
  // takes both positional and keyword parameters into local variables,
  // which enhance case1, case2, case3, case4, case 5, case 6.
1126 1127 1128 1129 1130 1131 1132 1133
  bool flag_ = PyArg_ParseTupleAndKeywords(args,
                                           kwargs,
                                           "|OOOO",
                                           kwlist,
                                           &kw_value,
                                           &kw_zero_copy,
                                           &kw_name,
                                           &kw_dims);
J
Jack Zhou 已提交
1134 1135 1136 1137 1138 1139 1140 1141

  // helper map
  std::unordered_map<std::string, PyObject*> kws_map{
      {"value", kw_value},
      {"zero_copy", kw_zero_copy},
      {"name", kw_name},
      {"dims", kw_dims}};

1142 1143
  PADDLE_ENFORCE_EQ(flag_,
                    true,
J
Jack Zhou 已提交
1144 1145 1146 1147 1148 1149 1150 1151
                    paddle::platform::errors::PreconditionNotMet(
                        "Could not parse args and kwargs successfully, "
                        "please check your input first and make"
                        "sure you are on the right way. "
                        "The expected arguments as follow: ("
                        "value, zero_copy, name, dims)"));

  PADDLE_ENFORCE_NOT_NULL(
1152 1153 1154 1155 1156
      self,
      paddle::platform::errors::Fatal(
          "Calling __init__ of Eager Tensor without __new__ is "
          "forbidden. Please check your code and make sure you new a "
          "eager tensor before init it."));
J
Jack Zhou 已提交
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167

  auto py_tensor_ptr = reinterpret_cast<TensorObject*>(self);

  Py_ssize_t args_num = PyTuple_Size(args);
  VLOG(6) << " args_num: " << args_num;
  // args_num = 0, means that there is no position arguments.
  if (args_num == (Py_ssize_t)0) {
    if (!flag_kwargs) {
      // case 1
      VLOG(6) << "Calling case1's string initializer.";
      EmptyStringTensorInitializer(
1168 1169 1170
          py_tensor_ptr,
          egr::Controller::Instance().GenerateUniqueName(
              "generated_string_tensor"),
J
Jack Zhou 已提交
1171 1172 1173
          egr::Controller::Instance().GetExpectedPlace());
      return 0;
    } else {
1174
      if (kw_value != nullptr) {
J
Jack Zhou 已提交
1175 1176
        if (pybind11::detail::npy_api::get().PyArray_Check_(kw_value)) {
          VLOG(6) << "Calling case3's or case4's string initializer";
1177 1178
          AutoInitStringTensorByPyArray(
              py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
J
Jack Zhou 已提交
1179
          return 0;
1180
        } else if (PyObject_TypeCheck(kw_value, p_string_tensor_type)) {
J
Jack Zhou 已提交
1181
          VLOG(6) << "Calling case5's or case6's string initializer";
1182 1183
          AutoInitStringTensorByStringTensor(
              py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
J
Jack Zhou 已提交
1184 1185 1186 1187 1188 1189 1190 1191 1192
          return 0;
        } else {
          PADDLE_THROW(platform::errors::InvalidArgument(
              "Could not parse the first keyword argument successfully, "
              "the first keyword argument is value, but it should be PyArray "
              "or StringTensor."
              "Please check your input first and make sure you are on the "
              "right way."));
        }
1193
      } else if (kw_dims != nullptr) {
J
Jack Zhou 已提交
1194 1195 1196 1197 1198
        VLOG(6) << "Calling case2's string initializer.";
        std::unordered_map<std::string, Py_ssize_t> kw_order_map{{"dims", 1},
                                                                 {"name", 2}};

        std::vector<int> dims = CastPyArg2VectorOfInt(kw_dims, 0);
1199 1200 1201 1202 1203 1204
        std::string act_name = ParseName(kws_map,
                                         kw_order_map,
                                         args,
                                         flag_kwargs,
                                         args_num,
                                         "generated_string_tensor");
J
Jack Zhou 已提交
1205
        EmptyStringTensorInitializer(
1206 1207 1208 1209
            py_tensor_ptr,
            act_name,
            egr::Controller::Instance().GetExpectedPlace(),
            dims);
J
Jack Zhou 已提交
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
        return 0;
      } else {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "We not only support construct Tensor from numpy value "
            "or StringTensor with python kwargs by this initializer, "
            "but also even support dtype to init a empty StringTensor. "
            "Please check your input first and make sure you call the existed "
            "constructor."));
      }
    }
  } else if (args_num == (Py_ssize_t)1) {  // case 3 ~ 6
C
co63oc 已提交
1221
    // 1 position args, remaining arguments are kwargs
J
Jack Zhou 已提交
1222 1223 1224
    PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
    if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
      VLOG(6) << "Calling case3's or case4's string initializer.";
1225 1226
      AutoInitStringTensorByPyArray(
          py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
J
Jack Zhou 已提交
1227
      return 0;
1228
    } else if (PyObject_TypeCheck(arg0_ptr, p_string_tensor_type)) {
J
Jack Zhou 已提交
1229
      VLOG(6) << "Calling case5's or case6's string initializer.";
1230 1231
      AutoInitStringTensorByStringTensor(
          py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
J
Jack Zhou 已提交
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
      return 0;
    } else {
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Could not parse the first keyword argument successfully, "
          "the first keyword argument is value, but it should be PyArray "
          "or StringTensor."
          "Please check your input first and make sure you are on the "
          "right way."));
    }
  } else if (args_num == (Py_ssize_t)2) {  // case 2
    // 2 position args
    if (!flag_kwargs) {
      PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
1245
      if (PyObject_TypeCheck(arg0_ptr, p_string_tensor_type)) {
J
Jack Zhou 已提交
1246
        VLOG(6) << "Calling case6's string initializer.";
1247 1248
        AutoInitStringTensorByStringTensor(
            py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
J
Jack Zhou 已提交
1249 1250 1251
        return 0;
      } else if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
        VLOG(6) << "Calling case3's string initializer.";
1252 1253
        AutoInitStringTensorByPyArray(
            py_tensor_ptr, kws_map, args, flag_kwargs, args_num);
J
Jack Zhou 已提交
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
        return 0;
      } else {
        VLOG(6) << "Calling case2's string initializer.";
        std::vector<int> dims = CastPyArg2VectorOfInt(arg0_ptr, 0);
        std::string act_name = "";
        PyObject* name_obj = PyTuple_GET_ITEM(args, 1);
        if (name_obj == Py_None) {
          act_name = egr::Controller::Instance().GenerateUniqueName(
              "generated_string_tensor");
        } else {
          act_name = CastPyArg2AttrString(PyTuple_GET_ITEM(args, 1), 1);
        }
        EmptyStringTensorInitializer(
1267 1268 1269 1270
            py_tensor_ptr,
            act_name,
            egr::Controller::Instance().GetExpectedPlace(),
            dims);
J
Jack Zhou 已提交
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
        return 0;
      }
    } else {
      PADDLE_THROW(platform::errors::Fatal(
          "Can't not find expected num of args, please check your call, and "
          "make sure u call the existed constructor."));
    }
  }
  return 1;
}

1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
void AddPyMethodDefs(std::vector<PyMethodDef>* vector, PyMethodDef* methods) {
  if (!vector->empty()) {
    // remove nullptr terminator
    vector->pop_back();
  }
  while (true) {
    vector->push_back(*methods);
    if (!methods->ml_name) {
      break;
    }
    methods++;
  }
}

1296
static void TensorDealloc(TensorObject* self) {
1297
  if (self->weakrefs != nullptr)
1298
    PyObject_ClearWeakRefs(reinterpret_cast<PyObject*>(self));
1299
  self->tensor.~Tensor();
1300 1301 1302
  Py_TYPE(self)->tp_free(reinterpret_cast<PyObject*>(self));
}

1303 1304
extern struct PyGetSetDef variable_properties[];                // NOLINT
extern struct PyGetSetDef string_tensor_variable_properties[];  // NOLINT
1305

1306 1307 1308
extern PyMethodDef variable_methods[];                // NOLINT
extern PyMethodDef math_op_patch_methods[];           // NOLINT
extern PyMethodDef string_tensor_variable_methods[];  // NOLINT
1309

W
wanghuancoder 已提交
1310 1311 1312 1313
PyNumberMethods number_methods;
PySequenceMethods sequence_methods;
PyMappingMethods mapping_methods;

1314 1315 1316
void BindEager(pybind11::module* module) {
  auto m = module->def_submodule("eager");

1317 1318 1319 1320
  static std::vector<PyMethodDef> methods;
  AddPyMethodDefs(&methods, variable_methods);
  AddPyMethodDefs(&methods, math_op_patch_methods);

1321
  auto heap_type = reinterpret_cast<PyHeapTypeObject*>(
1322
      PyType_Type.tp_alloc(&PyType_Type, 0));
1323 1324
  heap_type->ht_name = ToPyObject("Tensor");
  heap_type->ht_qualname = ToPyObject("Tensor");
1325
  auto type = &heap_type->ht_type;
1326
  type->tp_name = "Tensor";
1327
  type->tp_basicsize = sizeof(TensorObject);
1328
  type->tp_dealloc = (destructor)TensorDealloc;
1329 1330 1331
  type->tp_as_number = &number_methods;
  type->tp_as_sequence = &sequence_methods;
  type->tp_as_mapping = &mapping_methods;
1332
  type->tp_methods = methods.data();
1333
  type->tp_getset = variable_properties;
1334 1335
  type->tp_init = TensorInit;
  type->tp_new = TensorNew;
W
wanghuancoder 已提交
1336
  type->tp_doc = TensorDoc;
1337
  type->tp_weaklistoffset = offsetof(TensorObject, weakrefs);
1338 1339
  Py_INCREF(&PyBaseObject_Type);
  type->tp_base = reinterpret_cast<PyTypeObject*>(&PyBaseObject_Type);
1340 1341 1342 1343 1344
  type->tp_flags |=
      Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
#if PY_VERSION_HEX >= 0x03050000
  type->tp_as_async = &heap_type->as_async;
#endif
1345
  p_tensor_type = type;
1346 1347

  if (PyType_Ready(type) < 0) {
1348
    PADDLE_THROW(platform::errors::Fatal(
1349
        "Init Paddle error in BindEager(PyType_Ready)."));
1350 1351 1352
    return;
  }

1353
  Py_INCREF(type);
1354 1355
  if (PyModule_AddObject(m.ptr(), "Tensor", reinterpret_cast<PyObject*>(type)) <
      0) {
1356
    Py_DECREF(type);
1357 1358
    Py_DECREF(m.ptr());
    PADDLE_THROW(platform::errors::Fatal(
1359
        "Init Paddle error in BindEager(PyModule_AddObject)."));
1360 1361 1362 1363
    return;
  }

  BindFunctions(m.ptr());
W
wanghuancoder 已提交
1364
  BindEagerPyLayer(m.ptr());
1365
  BindEagerOpFunctions(&m);
1366 1367
}

J
Jack Zhou 已提交
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
void BindEagerStringTensor(pybind11::module* module) {
  auto m = module->def_submodule("eager");

  auto heap_type = reinterpret_cast<PyHeapTypeObject*>(
      PyType_Type.tp_alloc(&PyType_Type, 0));
  heap_type->ht_name = ToPyObject("StringTensor");
  heap_type->ht_qualname = ToPyObject("StringTensor");
  auto type = &heap_type->ht_type;
  type->tp_name = "StringTensor";
  type->tp_basicsize = sizeof(TensorObject);
  type->tp_dealloc = (destructor)TensorDealloc;
  type->tp_as_number = &number_methods;
  type->tp_as_sequence = &sequence_methods;
  type->tp_as_mapping = &mapping_methods;
  type->tp_methods = string_tensor_variable_methods;
  type->tp_getset = string_tensor_variable_properties;
  type->tp_init = StringTensorInit;
  type->tp_new = TensorNew;
  Py_INCREF(&PyBaseObject_Type);
  type->tp_base = reinterpret_cast<PyTypeObject*>(&PyBaseObject_Type);
  type->tp_flags |=
      Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
#if PY_VERSION_HEX >= 0x03050000
  type->tp_as_async = &heap_type->as_async;
#endif
  p_string_tensor_type = type;

  if (PyType_Ready(type) < 0) {
    PADDLE_THROW(platform::errors::Fatal(
        "Init Paddle error in BindEager(PyType_Ready)."));
    return;
  }

  Py_INCREF(type);
1402 1403
  if (PyModule_AddObject(
          m.ptr(), "StringTensor", reinterpret_cast<PyObject*>(type)) < 0) {
J
Jack Zhou 已提交
1404 1405 1406 1407 1408 1409 1410 1411
    Py_DECREF(type);
    Py_DECREF(m.ptr());
    PADDLE_THROW(platform::errors::Fatal(
        "Init Paddle error in BindEagerStringTensor(PyModule_AddObject)."));
    return;
  }
}

1412 1413
}  // namespace pybind
}  // namespace paddle