diff --git a/paddle/phi/core/allocator.h b/paddle/phi/core/allocator.h index 849fc1548c7ec91a78899777aefa8aa58d61b3df..1d89fd1b4aa88b83d5b23159ebed0ba45290e762 100644 --- a/paddle/phi/core/allocator.h +++ b/paddle/phi/core/allocator.h @@ -70,7 +70,7 @@ class Allocation { // the AlignedAllocator will always allocate memory as size + kAlignment. // The raw pointer might not aligned, so an offset might be added to raw // the pointer. The size of this allocation will be - // `size + kAlignemnt - offset`. + // `size + kAlignment - offset`. size_t size() const noexcept { return size_; } void* operator->() const noexcept { return ptr_; } diff --git a/paddle/phi/core/dense_tensor.cc b/paddle/phi/core/dense_tensor.cc index 3116093a7884d7a37ff3ccbc79b94f1410193234..2c8f36f6c34ae70aaf0ecee659136654b03b7983 100644 --- a/paddle/phi/core/dense_tensor.cc +++ b/paddle/phi/core/dense_tensor.cc @@ -106,7 +106,7 @@ void* DenseTensor::AllocateFrom(Allocator* allocator, phi::errors::InvalidArgument( "Required allocator shall not be nullptr, but received nullptr.")); if (this->dtype() != dtype) { - VLOG(10) << "change data type in mutbale_data, target dtype - " << dtype; + VLOG(10) << "change data type in mutable_data, target dtype - " << dtype; meta_.dtype = dtype; } diff --git a/paddle/phi/core/enforce.h b/paddle/phi/core/enforce.h index 3f17b34b2bc60e96b776a883128e7ceb86b9fac3..6b98fd048859570bad8febcf6242f9c8a6ea2b34 100644 --- a/paddle/phi/core/enforce.h +++ b/paddle/phi/core/enforce.h @@ -351,7 +351,7 @@ struct EnforceNotMet : public std::exception { // Complete error message // e.g. InvalidArgumentError: *** std::string err_str_; - // Simple errror message used when no C++ stack and python compile stack + // Simple error message used when no C++ stack and python compile stack // e.g. (InvalidArgument) *** std::string simple_err_str_; }; diff --git a/paddle/phi/core/errors.h b/paddle/phi/core/errors.h index d1365de5196252d5060a8e87ed772aca40902c3c..b66cd5fd18f0ac3bac0a0db9d509c15fcb6accc4 100644 --- a/paddle/phi/core/errors.h +++ b/paddle/phi/core/errors.h @@ -66,7 +66,7 @@ enum ErrorCode { EXECUTION_TIMEOUT = 8, // Operation is not implemented or not supported/enabled in this service. - // Error type string: "UnimpelmentedError" + // Error type string: "UnimplementedError" UNIMPLEMENTED = 9, // The service is currently unavailable. This is a most likely a diff --git a/paddle/phi/core/flags.cc b/paddle/phi/core/flags.cc index 59c2901d8f4f1f50d211bc91b10dd7b301e8f09d..5d62c6733516f7d61b13c17e5c4650ed98fbc24e 100644 --- a/paddle/phi/core/flags.cc +++ b/paddle/phi/core/flags.cc @@ -1050,10 +1050,10 @@ DEFINE_int32(record_pool_max_size, DEFINE_int32(slotpool_thread_num, 1, "SlotRecordDataset slot pool thread num"); DEFINE_bool(enable_slotpool_wait_release, false, - "enable slotrecord obejct wait release, default false"); + "enable slotrecord object wait release, default false"); DEFINE_bool(enable_slotrecord_reset_shrink, false, - "enable slotrecord obejct reset shrink memory, default false"); + "enable slotrecord object reset shrink memory, default false"); DEFINE_bool(enable_ins_parser_file, false, "enable parser ins file, default false"); @@ -1067,7 +1067,7 @@ PHI_DEFINE_EXPORTED_double(gpugraph_hbm_table_load_factor, PHI_DEFINE_EXPORTED_bool( gpugraph_enable_gpu_direct_access, false, - "enable direct access bwtween multi gpu cards, default false"); + "enable direct access between multi gpu cards, default false"); PHI_DEFINE_EXPORTED_bool( gpugraph_enable_segment_merge_grads, false, @@ -1140,12 +1140,12 @@ PHI_DEFINE_EXPORTED_int32(search_cache_max_number, "search_cache_max_number."); /** - * Preformance related FLAG + * Performance related FLAG * Name: einsum_opt * Since Version: 2.3.0 * Value Range: bool, default=false * Example: - * Note: If True, EinsumOp will be optimimzed by innercache reuse, which + * Note: If True, EinsumOp will be optimized by innercache reuse, which * uses more gpu memory. */ PHI_DEFINE_EXPORTED_bool( @@ -1166,7 +1166,7 @@ PHI_DEFINE_EXPORTED_bool( */ PHI_DEFINE_EXPORTED_string(jit_engine_type, "Predictor", - "Choose default funciton type in JitLayer."); + "Choose default function type in JitLayer."); /** * Custom Device NPU related FLAG