From 5c95e5c864d7f492096f95c030d469c7e38fcbbb Mon Sep 17 00:00:00 2001 From: OccupyMars2025 <31559413+OccupyMars2025@users.noreply.github.com> Date: Tue, 6 Sep 2022 14:25:29 +0800 Subject: [PATCH] take some notes about sparse API (#45720) --- paddle/fluid/pybind/eager_functions.cc | 6 +++--- paddle/fluid/pybind/eager_utils.cc | 10 +++++----- paddle/phi/api/include/tensor.h | 2 +- paddle/phi/core/dense_tensor.h | 2 +- paddle/phi/core/tensor_base.h | 6 +++--- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index 9596551136c..16a5cff031d 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -523,8 +523,8 @@ static PyObject* eager_api_sparse_coo_tensor(PyObject* self, std::dynamic_pointer_cast(non_zero_indices.impl()); auto dense_elements = std::dynamic_pointer_cast(non_zero_elements.impl()); - // TODO(zhangkaihuo): After create SparseTensor, call coalesced() to sort and - // merge duplicate indices + // TODO(zhangkaihuo): After creating SparseCooTensor, call coalesced() to sort + // and merge duplicate indices std::shared_ptr coo_tensor = std::make_shared( *dense_indices, *dense_elements, phi::make_ddim(dense_shape)); @@ -537,7 +537,7 @@ static PyObject* eager_api_sparse_coo_tensor(PyObject* self, autograd_meta->SetStopGradient(static_cast(stop_gradient)); if (!autograd_meta->GetMutableGradNode()) { VLOG(3) << "Tensor(" << name - << ") have not GradNode, add GradNodeAccumulation for it."; + << ") doesn't have GradNode, add GradNodeAccumulation to it."; autograd_meta->SetGradNode( std::make_shared(autograd_meta)); } diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 62a28868bb8..c50a80f64dd 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -92,7 +92,7 @@ int TensorDtype2NumpyDtype(phi::DataType dtype) { } bool PyObject_CheckLongOrConvertToLong(PyObject** obj) { - if ((PyLong_Check(*obj) && !PyBool_Check(*obj))) { + if (PyLong_Check(*obj) && !PyBool_Check(*obj)) { return true; } @@ -129,7 +129,7 @@ bool PyObject_CheckStr(PyObject* obj) { return PyUnicode_Check(obj); } bool CastPyArg2AttrBoolean(PyObject* obj, ssize_t arg_pos) { if (obj == Py_None) { return false; // To be compatible with QA integration testing. Some - // test case pass in None. + // test cases pass in None. } else if (obj == Py_True) { return true; } else if (obj == Py_False) { @@ -305,7 +305,7 @@ std::vector CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; for (Py_ssize_t i = 0; i < len; i++) { - item = PyList_GetItem(obj, i); + item = PyList_GET_ITEM(obj, i); if (PyObject_CheckLongOrConvertToLong(&item)) { result.emplace_back(static_cast(PyLong_AsLong(item))); } else { @@ -321,13 +321,13 @@ std::vector CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; for (Py_ssize_t i = 0; i < len; i++) { - item = PyTuple_GetItem(obj, i); + item = PyTuple_GET_ITEM(obj, i); if (PyObject_CheckLongOrConvertToLong(&item)) { result.emplace_back(static_cast(PyLong_AsLong(item))); } else { PADDLE_THROW(platform::errors::InvalidArgument( "argument (position %d) must be " - "list of bool, but got %s at pos %d", + "list of int, but got %s at pos %d", arg_pos + 1, reinterpret_cast(item->ob_type)->tp_name, i)); diff --git a/paddle/phi/api/include/tensor.h b/paddle/phi/api/include/tensor.h index 667ef281b99..67cedaf6710 100644 --- a/paddle/phi/api/include/tensor.h +++ b/paddle/phi/api/include/tensor.h @@ -574,7 +574,7 @@ class PADDLE_API Tensor final { * unified to Tensor, but Tensor itself is heterogeneous. * * Tensor can generally be represented by void* and size_t, place. - * This is suitable for most scenarios including CPU, GPU, HIP, CPU, etc., + * This is suitable for most scenarios including CPU, GPU, HIP, NPU, etc., * but there are a few cases where this definition cannot be described, * such as the Tensor representation in third-party lib such as Metal, * OpenCL, etc., as well as some special Tensor implementations, including diff --git a/paddle/phi/core/dense_tensor.h b/paddle/phi/core/dense_tensor.h index d16a019c7ab..e9a6be66b98 100644 --- a/paddle/phi/core/dense_tensor.h +++ b/paddle/phi/core/dense_tensor.h @@ -29,7 +29,7 @@ namespace phi { class DenseTensorUtils; -/// \brief The Dense tensor store values in a contiguous sequential block +/// \brief The Dense tensor stores values in a contiguous sequential block /// of memory where all values are represented. Tensors or multi-dimensional /// arrays are used in math operators. /// During the entire life cycle of a DenseTensor, its device type and key diff --git a/paddle/phi/core/tensor_base.h b/paddle/phi/core/tensor_base.h index 96594bcb4e9..3dc0e455a63 100644 --- a/paddle/phi/core/tensor_base.h +++ b/paddle/phi/core/tensor_base.h @@ -55,12 +55,12 @@ class TensorBase { virtual bool valid() const = 0; /// \brief Test whether the storage is allocated. - /// return Whether the storage is allocated. + /// \return Whether the storage is allocated. virtual bool initialized() const = 0; // TODO(Aurelius84): This interface is under intermediate state now. // We will remove DataType argument in the future. Please DO NOT - // rely on Datatype to much when design and implement other feature. + // rely on Datatype too much when designing and implementing other features. /// \brief Allocate memory with requested size from allocator. /// \return The mutable data pointer value of type T. @@ -70,7 +70,7 @@ class TensorBase { /// \brief Return the type information of the derived class to support /// safely downcast in non-rtti environment. - /// return The type information of the derived class. + /// \return The type information of the derived class. TypeInfo type_info() const { return type_info_; } private: -- GitLab