eager_method.cc 17.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// disable numpy compile error
#include <Python.h>

#include <string>
#include <vector>

#include "pybind11/numpy.h"
#include "pybind11/pybind11.h"

20
#include "paddle/fluid/eager/accumulation/accumulation_node.h"
21 22
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
23
#include "paddle/fluid/eager/utils.h"
24
#include "paddle/fluid/framework/convert_utils.h"
25 26 27 28 29 30
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h"
31 32 33 34
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
35 36 37
namespace paddle {
namespace pybind {

38 39 40
extern void InitTensorWithNumpyValue(TensorObject* self,
                                     const pybind11::object& array,
                                     bool zero_copy);
41

42
extern PyTypeObject* p_tensor_type;
43

44 45 46
static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args,
                                     PyObject* kwargs) {
  EAGER_TRY
47
  PADDLE_ENFORCE_EQ(
48
      self->tensor.initialized(), true,
49 50 51
      platform::errors::InvalidArgument(
          "Tensor data of %s is Empty that indicates we have null tensor for "
          "now, please check if it has no data and initialize it first.",
52 53 54
          self->tensor.name()));
  auto tensor_dims = self->tensor.shape();
  auto numpy_dtype = TensorDtype2NumpyDtype(self->tensor.type());
55
  auto sizeof_dtype = paddle::framework::DataTypeSize(self->tensor.type());
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
  Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank];
  Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank];
  size_t numel = 1;
  for (int i = tensor_dims.size() - 1; i >= 0; --i) {
    py_dims[i] = static_cast<size_t>(tensor_dims[i]);
    py_strides[i] = sizeof_dtype * numel;
    numel *= py_dims[i];
  }
  auto& api = pybind11::detail::npy_api::get();
  PyObject* array = api.PyArray_NewFromDescr_(
      api.PyArray_Type_, api.PyArray_DescrFromType_(numpy_dtype),
      tensor_dims.size(), py_dims, py_strides, nullptr,
      pybind11::detail::npy_api::NPY_ARRAY_ALIGNED_ |
          pybind11::detail::npy_api::NPY_ARRAY_WRITEABLE_,
      nullptr);

72
  if (self->tensor.is_cpu()) {
73
    auto dense_tensor =
74
        std::dynamic_pointer_cast<phi::DenseTensor>(self->tensor.impl());
75 76 77 78 79 80
    platform::CPUPlace place;
    // deep copy
    paddle::memory::Copy(place, reinterpret_cast<void*>(
                                    pybind11::detail::array_proxy(array)->data),
                         place, dense_tensor->data(), sizeof_dtype * numel);
#if defined(PADDLE_WITH_CUDA)
81
  } else if (self->tensor.is_cuda()) {
82
    auto dense_tensor =
83
        std::dynamic_pointer_cast<phi::DenseTensor>(self->tensor.impl());
84 85 86

    paddle::platform::GpuMemcpySync(
        pybind11::detail::array_proxy(array)->data, dense_tensor->data(),
87 88
        paddle::framework::DataTypeSize(dense_tensor->dtype()) *
            dense_tensor->numel(),
89 90 91 92 93 94 95 96 97 98 99 100 101
        cudaMemcpyDeviceToHost);
#endif
  } else {
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Tensor.numpy() only support cpu tensor."));
    Py_INCREF(Py_None);
    return Py_None;
  }

  return array;
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

102 103 104 105
static PyObject* tensor_method__is_initialized(TensorObject* self,
                                               PyObject* args,
                                               PyObject* kwargs) {
  EAGER_TRY
106
  return ToPyObject(self->tensor.initialized());
107 108 109
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

110 111 112
static PyObject* tensor_method__copy_to(TensorObject* self, PyObject* args,
                                        PyObject* kwargs) {
  EAGER_TRY
113 114 115
  bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0);
  auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
  auto cp_tensor =
116
      self->tensor.copy_to(phi::TransToPtenBackend(place), blocking);
117 118 119
  egr::EagerUtils::autograd_meta(&cp_tensor)->SetStopGradient(true);
  egr::EagerUtils::autograd_meta(&cp_tensor)
      ->SetPersistable(
120
          egr::EagerUtils::autograd_meta(&(self->tensor))->Persistable());
121 122 123 124
  return ToPyObject(cp_tensor);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

125 126 127 128
static PyObject* tensor_method_reconstruct_from_(TensorObject* self,
                                                 PyObject* args,
                                                 PyObject* kwargs) {
  EAGER_TRY
129 130 131
  paddle::experimental::Tensor src_tensor =
      CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
  std::string orig_name = self->tensor.name();
132 133
  VLOG(6) << "Start Reconstructing Tensor from" << src_tensor.name() << " to "
          << orig_name;
134
  self->tensor = src_tensor;
135 136

  // Recover source name
137
  self->tensor.set_name(orig_name);
138 139

  VLOG(6) << "Finished Reconstructing Tensor from" << src_tensor.name()
140
          << " to " << self->tensor.name();
141 142 143 144 145
  Py_INCREF(Py_None);
  return Py_None;
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

146 147 148
static PyObject* tensor_method_copy_(TensorObject* self, PyObject* args,
                                     PyObject* kwargs) {
  EAGER_TRY
149 150
  paddle::experimental::Tensor src_tensor =
      CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
151
  bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 1), 1);
152
  VLOG(6) << "Start Copy Tensor " << src_tensor.name() << " to "
153 154 155
          << self->tensor.name();
  if (!self->tensor.defined()) {
    egr::EagerUtils::autograd_meta(&(self->tensor))
156 157
        ->SetStopGradient(
            egr::EagerUtils::autograd_meta(&(src_tensor))->StopGradient());
158
    egr::EagerUtils::autograd_meta(&(self->tensor))
159 160 161 162
        ->SetPersistable(
            egr::EagerUtils::autograd_meta(&(src_tensor))->Persistable());
  }

163
  self->tensor.copy_(src_tensor, blocking);
164

165
  VLOG(6) << "Finish Copy Tensor " << src_tensor.name() << " to "
166
          << self->tensor.name();
167 168 169 170 171
  Py_INCREF(Py_None);
  return Py_None;
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

172 173
static PyObject* tensor_retain_grads(TensorObject* self, PyObject* args,
                                     PyObject* kwargs) {
174
  EAGER_TRY
175
  if (egr::Controller::Instance().HasGrad()) {
176
    auto meta = egr::EagerUtils::autograd_meta(&(self->tensor));
177
    if (!meta->GetMutableGradNode()) {
178
      VLOG(6) << "Make grad node of tensor: " << self->tensor.name()
179 180 181
              << "become accumulation node";
      meta->SetGradNode(std::make_shared<egr::GradNodeAccumulation>());
    }
182
    egr::egr_utils_api::RetainGradForTensor(self->tensor);
183
  }
184 185 186 187 188
  Py_INCREF(Py_None);
  return Py_None;
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

189 190 191
static PyObject* tensor__clear_gradient(TensorObject* self, PyObject* args,
                                        PyObject* kwargs) {
  EAGER_TRY
192
  VLOG(4) << "ClearGradient " << self->tensor.name();
193

194 195
  paddle::experimental::Tensor* grad;
  if (egr::egr_utils_api::IsLeafTensor(self->tensor)) {
196 197
    // Add RetainGrad as PostHook to AccumulationNode
    std::shared_ptr<egr::GradNodeBase> grad_node =
198
        egr::EagerUtils::grad_node(self->tensor);
199 200 201 202 203 204 205 206 207
    PADDLE_ENFORCE(
        grad_node.get() != nullptr,
        paddle::platform::errors::Fatal("Detected NULL grad_node"
                                        "Leaf tensor should have had grad_node "
                                        "with type: GradNodeAccumulation"));
    auto accumulation_grad_node =
        std::dynamic_pointer_cast<egr::GradNodeAccumulation>(grad_node);
    grad = accumulation_grad_node->Grad();
  } else {
208
    auto meta = egr::EagerUtils::unsafe_autograd_meta(self->tensor);
209
    grad = meta->MutableGrad();
210 211
  }

212
  if (grad->initialized()) {
213
    VLOG(4) << "Gradient of " << self->tensor.name()
214 215
            << " is initialized, will be released.";
    auto dense_tensor =
216
        std::dynamic_pointer_cast<phi::DenseTensor>(grad->impl());
217
    dense_tensor->MoveMemoryHolder();
218 219 220 221 222 223
  }
  Py_INCREF(Py_None);
  return Py_None;
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

224 225
static PyObject* tensor__zero_grads(TensorObject* self, PyObject* args,
                                    PyObject* kwargs) {
226
  EAGER_TRY
227
  VLOG(4) << "ZeroGrads " << self->tensor.name();
228

229
  if (egr::egr_utils_api::IsLeafTensor(self->tensor)) {
230 231
    // Add RetainGrad as PostHook to AccumulationNode
    std::shared_ptr<egr::GradNodeBase> grad_node =
232
        egr::EagerUtils::grad_node(self->tensor);
233 234 235 236 237 238 239
    PADDLE_ENFORCE(
        grad_node.get() != nullptr,
        paddle::platform::errors::Fatal("Detected NULL grad_node"
                                        "Leaf tensor should have had grad_node "
                                        "with type: GradNodeAccumulation"));
    auto accumulation_grad_node =
        std::dynamic_pointer_cast<egr::GradNodeAccumulation>(grad_node);
240
    if (accumulation_grad_node->Grad()->initialized()) {
241 242 243
      accumulation_grad_node->Grad()->set_impl(
          paddle::experimental::zeros_like(*(accumulation_grad_node->Grad()))
              .impl());
244
    }
245
  } else {
246
    auto meta = egr::EagerUtils::unsafe_autograd_meta(self->tensor);
247
    if (meta->MutableGrad()->initialized()) {
248 249
      meta->MutableGrad()->set_impl(
          paddle::experimental::zeros_like(*(meta->MutableGrad())).impl());
250
    }
251 252 253 254 255 256 257
  }

  Py_INCREF(Py_None);
  return Py_None;
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

258 259 260
static PyObject* tensor__share_buffer_to(TensorObject* self, PyObject* args,
                                         PyObject* kwargs) {
  EAGER_TRY
261 262 263
  paddle::experimental::Tensor* dst_ptr =
      &(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
  PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
264 265 266
                    platform::errors::InvalidArgument(
                        "Tensor %s has not been initialized! please initialize "
                        "src tensor before share_buffer_with to other.",
267
                        self->tensor.name()));
268
  auto* src_tensor =
269
      static_cast<paddle::framework::Tensor*>(self->tensor.impl().get());
270 271 272 273
  auto dst_tensor =
      static_cast<paddle::framework::Tensor*>(dst_ptr->impl().get());
  dst_tensor->ShareDataWith(*src_tensor);
  dst_tensor->ShareDataTypeWith(*src_tensor);
274 275 276 277 278
  Py_INCREF(Py_None);
  return Py_None;
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

279 280 281 282
static PyObject* tensor__is_shared_buffer_with(TensorObject* self,
                                               PyObject* args,
                                               PyObject* kwargs) {
  EAGER_TRY
283 284 285
  paddle::experimental::Tensor* dst_ptr =
      &(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
  PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
286 287 288
                    platform::errors::InvalidArgument(
                        "Tensor %s has not been initialized! please initialize "
                        "src tensor before share_buffer_with to other.",
289
                        self->tensor.name()));
290
  bool res = false;
291
  if (!self->tensor.defined() || !dst_ptr->defined()) {
292 293 294
    return ToPyObject(res);
  }
  auto* self_ptr =
295
      static_cast<paddle::framework::Tensor*>(self->tensor.impl().get());
296 297 298 299 300 301 302
  auto dst_tensor =
      static_cast<paddle::framework::Tensor*>(dst_ptr->impl().get());
  res = dst_tensor->IsSharedBufferWith(*self_ptr);
  return ToPyObject(res);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

303 304 305 306
static PyObject* tensor__share_underline_tensor_to(TensorObject* self,
                                                   PyObject* args,
                                                   PyObject* kwargs) {
  EAGER_TRY
307 308 309
  paddle::experimental::Tensor* src_ptr =
      &(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
  PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
310 311 312
                    platform::errors::InvalidArgument(
                        "Tensor %s has not been initialized! please initialize "
                        "src tensor before share_buffer_with to other.",
313 314
                        self->tensor.name()));
  src_ptr->set_impl(self->tensor.impl());
315 316 317 318 319
  Py_INCREF(Py_None);
  return Py_None;
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

320 321 322 323
static PyObject* tensor__is_shared_underline_tensor_with(TensorObject* self,
                                                         PyObject* args,
                                                         PyObject* kwargs) {
  EAGER_TRY
324 325
  paddle::experimental::Tensor src_tensor =
      CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
326 327 328 329 330 331
  PADDLE_ENFORCE_EQ(src_tensor.initialized(), true,
                    platform::errors::InvalidArgument(
                        "Tensor %s has not been initialized! please initialize "
                        "src tensor before share_buffer_with to other.",
                        src_tensor.name()));
  bool res = false;
332
  if (!self->tensor.defined() || !src_tensor.defined()) {
333 334
    return ToPyObject(res);
  }
335
  res = (self->tensor.impl().get() == src_tensor.impl().get());
336 337 338 339
  return ToPyObject(res);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

340 341 342
static PyObject* tensor_method_detach(TensorObject* self, PyObject* args,
                                      PyObject* kwargs) {
  EAGER_TRY
343
  PADDLE_ENFORCE_EQ(
344
      self->tensor.initialized(), true,
345
      platform::errors::InvalidArgument("Tensor %s has not been initialized!",
346
                                        self->tensor.name()));
347

348
  PyObject* obj = p_tensor_type->tp_alloc(p_tensor_type, 0);
349
  if (obj) {
350 351 352 353 354 355
    auto v = reinterpret_cast<TensorObject*>(obj);
    new (&(v->tensor)) paddle::experimental::Tensor();
    v->tensor.set_impl(self->tensor.impl());
    v->tensor.set_name(egr::Controller::Instance().GenerateUniqueName());
    auto autograd_meta_src = egr::EagerUtils::autograd_meta(&(self->tensor));
    auto autograd_meta = egr::EagerUtils::autograd_meta(&(v->tensor));
356 357 358 359 360 361 362 363 364 365
    autograd_meta->SetPersistable(autograd_meta_src->Persistable());
  } else {
    PADDLE_THROW(platform::errors::Fatal(
        "tp_alloc return null, can not new a PyObject."));
  }

  return obj;
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

366 367 368 369
static PyObject* tensor_method_get_underline_tensor(TensorObject* self,
                                                    PyObject* args,
                                                    PyObject* kwargs) {
  EAGER_TRY
370 371 372
  if (self->tensor.is_dense_tensor()) {
    auto* tensor =
        static_cast<paddle::framework::LoDTensor*>(self->tensor.impl().get());
373 374 375 376 377 378 379 380 381
    VLOG(6) << "tensor: " << tensor->IsInitialized();
    return ToPyObject(tensor);
  } else {
    Py_IncRef(Py_None);
    return Py_None;
  }
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

382
// NOTE(wuweilong): Set value and not change self's original place
383 384
static PyObject* tensor_method_set_value(TensorObject* self, PyObject* args,
                                         PyObject* kwargs) {
385
  EAGER_TRY
386
  VLOG(4) << "Value " << self->tensor.name();
387 388
  pybind11::object numpy_value =
      pybind11::object(pybind11::handle(PyTuple_GET_ITEM(args, 0)), true);
389
  InitTensorWithNumpyValue(self, numpy_value, false);
390 391 392 393 394
  Py_INCREF(Py_None);
  return Py_None;
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

395
PyMethodDef variable_methods[] = {
396
    {"numpy", (PyCFunction)(void (*)(void))tensor_method_numpy,
397 398
     METH_VARARGS | METH_KEYWORDS, NULL},
    {"_is_initialized",
399
     (PyCFunction)(void (*)(void))tensor_method__is_initialized,
400
     METH_VARARGS | METH_KEYWORDS, NULL},
401
    {"_copy_to", (PyCFunction)(void (*)(void))tensor_method__copy_to,
402
     METH_VARARGS | METH_KEYWORDS, NULL},
403
    {"copy_", (PyCFunction)(void (*)(void))tensor_method_copy_,
404
     METH_VARARGS | METH_KEYWORDS, NULL},
405
    {"reconstruct_from_",
406
     (PyCFunction)(void (*)(void))tensor_method_reconstruct_from_,
407
     METH_VARARGS | METH_KEYWORDS, NULL},
408
    {"retain_grads", (PyCFunction)(void (*)(void))tensor_retain_grads,
409
     METH_VARARGS | METH_KEYWORDS, NULL},
410
    {"_clear_gradient", (PyCFunction)(void (*)(void))tensor__clear_gradient,
411
     METH_VARARGS | METH_KEYWORDS, NULL},
412
    {"_zero_grads", (PyCFunction)(void (*)(void))tensor__zero_grads,
413
     METH_VARARGS | METH_KEYWORDS, NULL},
414
    {"_share_buffer_to", (PyCFunction)(void (*)(void))tensor__share_buffer_to,
415
     METH_VARARGS | METH_KEYWORDS, NULL},
416
    {"_is_shared_buffer_with",
417
     (PyCFunction)(void (*)(void))tensor__is_shared_buffer_with,
418
     METH_VARARGS | METH_KEYWORDS, NULL},
419
    {"_share_underline_tensor_to",
420
     (PyCFunction)(void (*)(void))tensor__share_underline_tensor_to,
421 422
     METH_VARARGS | METH_KEYWORDS, NULL},
    {"_is_shared_underline_tensor_with",
423
     (PyCFunction)(void (*)(void))tensor__is_shared_underline_tensor_with,
424
     METH_VARARGS | METH_KEYWORDS, NULL},
425
    {"detach", (PyCFunction)(void (*)(void))tensor_method_detach,
426
     METH_VARARGS | METH_KEYWORDS, NULL},
427
    {"get_tensor",
428
     (PyCFunction)(void (*)(void))tensor_method_get_underline_tensor,
429
     METH_VARARGS | METH_KEYWORDS, NULL},
430
    {"_set_value", (PyCFunction)(void (*)(void))tensor_method_set_value,
431
     METH_VARARGS | METH_KEYWORDS, NULL},
432 433 434 435
    {NULL, NULL, 0, NULL}};

}  // namespace pybind
}  // namespace paddle