eager_properties.cc 13.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// disable numpy compile error
#include <Python.h>
13 14 15 16
// Avoid a problem with copysign defined in pyconfig.h on Windows.
#ifdef copysign
#undef copysign
#endif
17 18 19 20

#include <string>
#include <vector>

21
#include "paddle/fluid/eager/accumulation/accumulation_node.h"
22 23 24
#include "paddle/fluid/eager/api/all.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/utils.h"
25
#include "paddle/fluid/imperative/op_base.h"
26 27 28 29 30 31
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/eager.h"
#include "paddle/fluid/pybind/eager_utils.h"
#include "paddle/fluid/pybind/exception.h"
32 33 34
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
35

36 37 38 39 40
#pragma GCC diagnostic ignored "-Wwrite-strings"

namespace paddle {
namespace pybind {

41
extern PyTypeObject* p_tensor_type;
42

43 44
PyObject* tensor_properties_get_name(TensorObject* self, void* closure) {
  EAGER_TRY
C
co63oc 已提交
45 46
  // NOTE(dev): [why not use egr::Controller::Instance::GenerateUniqueName()?]
  // Because Controller must holder a tracer, but 'tensor.name' maybe called
47 48
  // everywhere such as static graph mode in @to_static, which means tracer is
  // None.
49 50 51 52
  static egr::UniqueNameGenerator name_generator;
  if (self->tensor.name().empty()) {
    self->tensor.set_name(name_generator.Generate());
  }
53
  return ToPyObject(self->tensor.name());
54 55 56
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

57 58
PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
  EAGER_TRY
59 60 61 62
  if (!self->tensor.defined()) {
    // be same to old dygraph
    return ToPyObject(paddle::framework::proto::VarType::LOD_TENSOR);
  }
63
  if (self->tensor.is_dense_tensor()) {
64
    return ToPyObject(paddle::framework::proto::VarType::LOD_TENSOR);
65 66
  } else if (self->tensor.is_selected_rows()) {
    return ToPyObject(paddle::framework::proto::VarType::SELECTED_ROWS);
67 68 69 70
  } else if (egr::IsVariableCompatTensor(self->tensor)) {
    return ToPyObject(static_cast<paddle::framework::proto::VarType::Type>(
        static_cast<const egr::VariableCompatTensor*>(self->tensor.impl().get())
            ->Type()));
71
  } else {
72
    RETURN_PY_NONE
73 74 75 76
  }
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

W
wanghuancoder 已提交
77 78
PyObject* tensor_properties_is_leaf(TensorObject* self, void* closure) {
  EAGER_TRY
79
  return ToPyObject(egr::EagerUtils::IsLeafTensor(self->tensor));
W
wanghuancoder 已提交
80 81 82
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

83 84
int tensor_properties_set_name(TensorObject* self,
                               PyObject* value,
85 86
                               void* closure) {
  EAGER_TRY
87
  self->tensor.set_name(CastPyArg2AttrString(value, 0));
88
  return 0;
0
0x45f 已提交
89
  EAGER_CATCH_AND_THROW_RETURN_NEG
90 91
}

92 93 94
PyObject* tensor_properties_get_stop_gradient(TensorObject* self,
                                              void* closure) {
  EAGER_TRY
95
  auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
96 97 98 99
  return ToPyObject(meta->StopGradient());
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

100 101
PyObject* tensor_properties_get_grad(TensorObject* self, void* closure) {
  EAGER_TRY
102 103
  VLOG(6) << "Get grad for tensor: " << self->tensor.name();
  auto meta = egr::EagerUtils::nullable_autograd_meta(self->tensor);
104
  VLOG(6) << meta << " initialized: " << meta->Grad().initialized();
105
  if (meta && meta->Grad().initialized()) {
106
    return ToPyObject(meta->Grad());
107
  } else {
108
    RETURN_PY_NONE
109
  }
110 111 112
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

113 114
int tensor_properties_set_grad(TensorObject* self,
                               PyObject* value,
115 116
                               void* closure) {
  EAGER_TRY
117
  auto src = CastPyArg2Tensor(value, 0);
118
  PADDLE_ENFORCE(
119
      egr::EagerUtils::IsLeafTensor(self->tensor),
120
      paddle::platform::errors::Fatal("Only leaf Tensor can be set grad."));
121

122
  paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor);
123 124 125 126 127
  PADDLE_ENFORCE(grad != nullptr,
                 paddle::platform::errors::Fatal(
                     "Detected NULL grad"
                     "Please check if you have manually cleared"
                     "the grad inside autograd_meta"));
C
Chen Weihang 已提交
128
  grad->copy_(src, self->tensor.place(), true);
129
  return 0;
0
0x45f 已提交
130
  EAGER_CATCH_AND_THROW_RETURN_NEG
131 132
}

133 134
int tensor_properties_set_stop_gradient(TensorObject* self,
                                        PyObject* value,
135 136
                                        void* closure) {
  EAGER_TRY
137
  auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
138
  meta->SetStopGradient(CastPyArg2AttrBoolean(value, 0));
139 140 141
  if (!meta->GradNode()) {
    meta->SetGradNode(std::make_shared<egr::GradNodeAccumulation>(meta));
  }
142
  return 0;
0
0x45f 已提交
143
  EAGER_CATCH_AND_THROW_RETURN_NEG
144 145
}

146 147
PyObject* tensor_properties_get_persistable(TensorObject* self, void* closure) {
  EAGER_TRY
148
  auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
149 150 151 152
  return ToPyObject(meta->Persistable());
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

153 154
int tensor_properties_set_persistable(TensorObject* self,
                                      PyObject* value,
155 156
                                      void* closure) {
  EAGER_TRY
157
  auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
158 159
  meta->SetPersistable(CastPyArg2AttrBoolean(value, 0));
  return 0;
0
0x45f 已提交
160
  EAGER_CATCH_AND_THROW_RETURN_NEG
161 162
}

L
LiYuRio 已提交
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
PyObject* tensor_properties_get_dist_attr(TensorObject* self, void* closure) {
  EAGER_TRY
  if (self->tensor.is_dist_tensor()) {
#ifdef PADDLE_WITH_DISTRIBUTE
    phi::distributed::auto_parallel::DistTensor* dist_tensor =
        static_cast<phi::distributed::auto_parallel::DistTensor*>(
            self->tensor.impl().get());
    return ToPyObject(dist_tensor->dist_attr().get());
#else
    RETURN_PY_NONE
#endif
  } else {
    RETURN_PY_NONE
  }
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

180 181
PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) {
  EAGER_TRY
182
  std::vector<int64_t> value;
183 184 185
  if (!self->tensor.defined()) {
    return ToPyObject(value);
  }
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
  if (egr::IsVariableCompatTensor(self->tensor)) {
    auto* var_tensor = static_cast<const egr::VariableCompatTensor*>(
        self->tensor.impl().get());
    if (var_tensor->IsType<paddle::framework::Vocab>()) {
      value.emplace_back(static_cast<int64_t>(
          var_tensor->Get<paddle::framework::Vocab>().size()));
    } else if (var_tensor->IsType<paddle::framework::Strings>()) {
      value.emplace_back(static_cast<int64_t>(
          var_tensor->Get<paddle::framework::Strings>().size()));
    } else {
      PADDLE_THROW(paddle::platform::errors::Unavailable(
          "VariableCompatTensor only support get shape from Vocab or "
          "Strings."));
    }
  } else {
    auto ddim = self->tensor.shape();
    size_t rank = static_cast<size_t>(ddim.size());
    value.resize(rank);
    for (size_t i = 0; i < rank; i++) {
      value[i] = ddim[i];
    }
207
  }
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
  if (!egr::IsVariableCompatTensor(self->tensor)) {
    auto desired_layout =
        paddle::imperative::LayoutAutoTune::Instance().GetDesiredLayout();
    auto default_layout =
        paddle::imperative::LayoutAutoTune::Instance().GetDefaultLayout();
    bool change_dim =
        (desired_layout != default_layout &&
         self->tensor.layout() == desired_layout && value.size() == 4);
    VLOG(6) << "eager_properties 'Shape' method, layout autotune "
            << " desired_layout: " << desired_layout
            << " default_layout: " << default_layout
            << " tensor layout: " << self->tensor.layout()
            << " tensor's shape size is : " << value.size();
    std::vector<int64_t> dims = value;
    if (change_dim && phi::DataLayoutToString(desired_layout) == "NCHW") {
      // NCHW -> NHWC
      VLOG(6) << "layout autotune get Shape from NCHW -> NHWC " << value[0]
              << " " << value[1] << " " << value[2] << " " << value[3] << " to "
              << dims[0] << " " << dims[2] << " " << dims[3] << " " << dims[1];
      value[0] = dims[0];
      value[1] = dims[2];
      value[2] = dims[3];
      value[3] = dims[1];
    } else if (change_dim &&
               phi::DataLayoutToString(desired_layout) == "NHWC") {
      // NHWC -> NCHW
      VLOG(6) << "layout autotune get Shape from NHWC -> NCHW " << value[0]
              << " " << value[1] << " " << value[2] << " " << value[3] << " to "
              << dims[0] << " " << dims[3] << " " << dims[1] << " " << dims[2]
              << " " << dims[1];
      value[0] = dims[0];
      value[1] = dims[3];
      value[2] = dims[1];
      value[3] = dims[2];
    }
243 244
  }

245 246 247 248
  return ToPyObject(value);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

249 250 251 252 253 254 255 256 257 258 259
PyObject* tensor_properties_get_layout(TensorObject* self, void* closure) {
  EAGER_TRY
  std::string layout = "";
  if (!self->tensor.defined()) {
    return ToPyObject(layout);
  }

  if (egr::IsVariableCompatTensor(self->tensor)) {
    VLOG(3) << "VariableCompatTensor does not support `layout` method.";
    return ToPyObject(layout);
  } else {
260
    return ToPyObject(phi::DataLayoutToString(self->tensor.layout()));
261 262 263 264 265 266
  }

  return ToPyObject(layout);
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

267 268
PyObject* tensor_properties_get_place(TensorObject* self, void* closure) {
  EAGER_TRY
C
Chen Weihang 已提交
269
  return ToPyObject(self->tensor.place());
270 271 272
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

273 274
PyObject* tensor_properties_get_place_str(TensorObject* self, void* closure) {
  EAGER_TRY
275
  std::stringstream ostr;
C
Chen Weihang 已提交
276
  ostr << self->tensor.place();
277 278 279 280
  return ToPyObject(ostr.str());
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

281 282
PyObject* tensor_properties_get_dtype(TensorObject* self, void* closure) {
  EAGER_TRY
283 284 285 286
  if (!self->tensor.defined()) {
    // be same to old dygraph
    return ToPyObject(framework::proto::VarType::FP32);
  }
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
  if (egr::IsVariableCompatTensor(self->tensor)) {
    auto* var_tensor = static_cast<const egr::VariableCompatTensor*>(
        self->tensor.impl().get());
    if (var_tensor->IsType<paddle::framework::Vocab>()) {
      return ToPyObject(framework::proto::VarType::RAW);
    } else if (var_tensor->IsType<paddle::framework::Strings>()) {
      return ToPyObject(framework::proto::VarType::STRING);
    } else {
      PADDLE_THROW(paddle::platform::errors::Unavailable(
          "VariableCompatTensor only support get shape from Vocab or "
          "Strings."));
    }
  } else {
    return ToPyObject(
        paddle::framework::TransToProtoVarType(self->tensor.type()));
  }
303 304 305
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
PyObject* tensor_properties_get_grad_fn(TensorObject* self, void* closure) {
  EAGER_TRY
  if (!self->tensor.defined()) {
    // Handle undefined tensors if necessary; otherwise, return nullptr or an
    // appropriate PyObject. In this case, I will return Py_None.
    Py_INCREF(Py_None);
    return Py_None;
  }

  // Get GradNode from the tensor
  auto meta = egr::EagerUtils::nullable_autograd_meta(
      self->tensor);  // If meta exists, get the GradNode

  if (meta) {
    // Get the GradNode from meta
    auto grad_node = meta->GradNode();  // Convert GradNode to a Python object
    // The conversion will depend on the structure of GradNode.

    if (!grad_node) {
      Py_INCREF(Py_None);
      return Py_None;
    }

    PyObject* py_grad_node = ToPyObject(grad_node);

    return py_grad_node;
  } else {
    // If meta does not exist, return an appropriate Python object (e.g., None
    // or a special value).
    Py_INCREF(Py_None);
    return Py_None;
  }
  EAGER_CATCH_AND_THROW_RETURN_NULL
}

341
struct PyGetSetDef variable_properties[] = {
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
    {"grad",
     (getter)tensor_properties_get_grad,
     (setter)tensor_properties_set_grad,
     nullptr,
     nullptr},
    {"name",
     (getter)tensor_properties_get_name,
     (setter)tensor_properties_set_name,
     nullptr,
     nullptr},
    {"stop_gradient",
     (getter)tensor_properties_get_stop_gradient,
     (setter)tensor_properties_set_stop_gradient,
     nullptr,
     nullptr},
    {"persistable",
     (getter)tensor_properties_get_persistable,
     (setter)tensor_properties_set_persistable,
     nullptr,
     nullptr},
362
    {"shape", (getter)tensor_properties_get_shape, nullptr, nullptr, nullptr},
363
    {"layout", (getter)tensor_properties_get_layout, nullptr, nullptr, nullptr},
364
    // {"is_leaf", (getter)tensor_properties_get_is_leaf, nullptr,
365 366
    // nullptr,
    //  nullptr},
367
    {"place", (getter)tensor_properties_get_place, nullptr, nullptr, nullptr},
L
LiYuRio 已提交
368 369 370 371 372
    {"dist_attr",
     (getter)tensor_properties_get_dist_attr,
     nullptr,
     nullptr,
     nullptr},
373 374 375 376
    {"_place_str",
     (getter)tensor_properties_get_place_str,
     nullptr,
     nullptr,
377
     nullptr},
378 379
    {"dtype", (getter)tensor_properties_get_dtype, nullptr, nullptr, nullptr},
    {"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr},
W
wanghuancoder 已提交
380
    {"is_leaf", (getter)tensor_properties_is_leaf, nullptr, nullptr, nullptr},
381 382 383 384 385
    {"grad_fn",
     (getter)tensor_properties_get_grad_fn,
     nullptr,
     nullptr,
     nullptr},
386 387
    {nullptr, nullptr, nullptr, nullptr, nullptr}};

J
Jack Zhou 已提交
388 389
// variable_properties for core.eager.StringTensor
struct PyGetSetDef string_tensor_variable_properties[] = {
390 391 392 393 394
    {"name",
     (getter)tensor_properties_get_name,
     (setter)tensor_properties_set_name,
     nullptr,
     nullptr},
J
Jack Zhou 已提交
395
    {"shape", (getter)tensor_properties_get_shape, nullptr, nullptr, nullptr},
396
    {"layout", (getter)tensor_properties_get_layout, nullptr, nullptr, nullptr},
J
Jack Zhou 已提交
397
    {"place", (getter)tensor_properties_get_place, nullptr, nullptr, nullptr},
398 399 400 401
    {"_place_str",
     (getter)tensor_properties_get_place_str,
     nullptr,
     nullptr,
J
Jack Zhou 已提交
402 403 404
     nullptr},
    {nullptr, nullptr, nullptr, nullptr, nullptr}};

405 406
}  // namespace pybind
}  // namespace paddle