diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index 9596551136c201e8c525576b29744f3df3b3d3cb..16a5cff031d65af64f0f69708ba1811ad008887d 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -523,8 +523,8 @@ static PyObject* eager_api_sparse_coo_tensor(PyObject* self, std::dynamic_pointer_cast(non_zero_indices.impl()); auto dense_elements = std::dynamic_pointer_cast(non_zero_elements.impl()); - // TODO(zhangkaihuo): After create SparseTensor, call coalesced() to sort and - // merge duplicate indices + // TODO(zhangkaihuo): After creating SparseCooTensor, call coalesced() to sort + // and merge duplicate indices std::shared_ptr coo_tensor = std::make_shared( *dense_indices, *dense_elements, phi::make_ddim(dense_shape)); @@ -537,7 +537,7 @@ static PyObject* eager_api_sparse_coo_tensor(PyObject* self, autograd_meta->SetStopGradient(static_cast(stop_gradient)); if (!autograd_meta->GetMutableGradNode()) { VLOG(3) << "Tensor(" << name - << ") have not GradNode, add GradNodeAccumulation for it."; + << ") doesn't have GradNode, add GradNodeAccumulation to it."; autograd_meta->SetGradNode( std::make_shared(autograd_meta)); } diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 62a28868bb8dfa0b9404ddb148d8819f8d63038c..c50a80f64dd591e02a346dbae5ed5d49e7e01491 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -92,7 +92,7 @@ int TensorDtype2NumpyDtype(phi::DataType dtype) { } bool PyObject_CheckLongOrConvertToLong(PyObject** obj) { - if ((PyLong_Check(*obj) && !PyBool_Check(*obj))) { + if (PyLong_Check(*obj) && !PyBool_Check(*obj)) { return true; } @@ -129,7 +129,7 @@ bool PyObject_CheckStr(PyObject* obj) { return PyUnicode_Check(obj); } bool CastPyArg2AttrBoolean(PyObject* obj, ssize_t arg_pos) { if (obj == Py_None) { return false; // To be compatible with QA integration testing. Some - // test case pass in None. + // test cases pass in None. } else if (obj == Py_True) { return true; } else if (obj == Py_False) { @@ -305,7 +305,7 @@ std::vector CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; for (Py_ssize_t i = 0; i < len; i++) { - item = PyList_GetItem(obj, i); + item = PyList_GET_ITEM(obj, i); if (PyObject_CheckLongOrConvertToLong(&item)) { result.emplace_back(static_cast(PyLong_AsLong(item))); } else { @@ -321,13 +321,13 @@ std::vector CastPyArg2VectorOfInt(PyObject* obj, size_t arg_pos) { Py_ssize_t len = PyTuple_Size(obj); PyObject* item = nullptr; for (Py_ssize_t i = 0; i < len; i++) { - item = PyTuple_GetItem(obj, i); + item = PyTuple_GET_ITEM(obj, i); if (PyObject_CheckLongOrConvertToLong(&item)) { result.emplace_back(static_cast(PyLong_AsLong(item))); } else { PADDLE_THROW(platform::errors::InvalidArgument( "argument (position %d) must be " - "list of bool, but got %s at pos %d", + "list of int, but got %s at pos %d", arg_pos + 1, reinterpret_cast(item->ob_type)->tp_name, i)); diff --git a/paddle/phi/api/include/tensor.h b/paddle/phi/api/include/tensor.h index 667ef281b99027c7f84a1bf50cee8596cc57039f..67cedaf6710abd5d743a27336ece06c627b64072 100644 --- a/paddle/phi/api/include/tensor.h +++ b/paddle/phi/api/include/tensor.h @@ -574,7 +574,7 @@ class PADDLE_API Tensor final { * unified to Tensor, but Tensor itself is heterogeneous. * * Tensor can generally be represented by void* and size_t, place. - * This is suitable for most scenarios including CPU, GPU, HIP, CPU, etc., + * This is suitable for most scenarios including CPU, GPU, HIP, NPU, etc., * but there are a few cases where this definition cannot be described, * such as the Tensor representation in third-party lib such as Metal, * OpenCL, etc., as well as some special Tensor implementations, including diff --git a/paddle/phi/core/dense_tensor.h b/paddle/phi/core/dense_tensor.h index d16a019c7ab0df756d3db372b77787b498e22aee..e9a6be66b98caba1acc39ad428fb281900c44fe8 100644 --- a/paddle/phi/core/dense_tensor.h +++ b/paddle/phi/core/dense_tensor.h @@ -29,7 +29,7 @@ namespace phi { class DenseTensorUtils; -/// \brief The Dense tensor store values in a contiguous sequential block +/// \brief The Dense tensor stores values in a contiguous sequential block /// of memory where all values are represented. Tensors or multi-dimensional /// arrays are used in math operators. /// During the entire life cycle of a DenseTensor, its device type and key diff --git a/paddle/phi/core/tensor_base.h b/paddle/phi/core/tensor_base.h index 96594bcb4e95b01a236320ffa624e18933512ff6..3dc0e455a6358eddf59d7f13ee8f4aa894aa8351 100644 --- a/paddle/phi/core/tensor_base.h +++ b/paddle/phi/core/tensor_base.h @@ -55,12 +55,12 @@ class TensorBase { virtual bool valid() const = 0; /// \brief Test whether the storage is allocated. - /// return Whether the storage is allocated. + /// \return Whether the storage is allocated. virtual bool initialized() const = 0; // TODO(Aurelius84): This interface is under intermediate state now. // We will remove DataType argument in the future. Please DO NOT - // rely on Datatype to much when design and implement other feature. + // rely on Datatype too much when designing and implementing other features. /// \brief Allocate memory with requested size from allocator. /// \return The mutable data pointer value of type T. @@ -70,7 +70,7 @@ class TensorBase { /// \brief Return the type information of the derived class to support /// safely downcast in non-rtti environment. - /// return The type information of the derived class. + /// \return The type information of the derived class. TypeInfo type_info() const { return type_info_; } private: