From d89e0367289ff05eabfa695a272a59c77461bf70 Mon Sep 17 00:00:00 2001 From: co63oc Date: Tue, 23 May 2023 14:59:22 +0800 Subject: [PATCH] Fix typos (#53960) --- paddle/fluid/framework/async_executor.cc | 4 +- paddle/fluid/framework/attribute_checker.h | 4 +- paddle/fluid/framework/block_desc.cc | 2 +- .../framework/ir/coalesce_grad_tensor_pass.cc | 4 +- .../framework/ir/constant_folding_pass.cc | 4 +- .../framework/ir/conv_bn_fuse_pass_tester.cc | 2 +- .../fuse_momentum_op_pass.cc | 2 +- .../fuse_sgd_op_pass.cc | 2 +- .../ir/fusion_group/cuda_resources.h | 2 +- .../framework/ir/fusion_group/operation.cc | 2 +- .../operators/collective/allreduce_op.cc | 2 +- .../fluid/operators/collective/barrier_op.cc | 2 +- .../collective/c_comm_init_all_op.cc | 2 +- .../collective/c_comm_init_multitrainer_op.cc | 2 +- .../operators/collective/c_comm_init_op.cc | 6 +-- .../operators/collective/c_embedding_op.cc | 4 +- .../operators/collective/c_gen_bkcl_id_op.cc | 2 +- .../operators/collective/c_gen_nccl_id_op.cc | 2 +- .../operators/collective/gen_bkcl_id_op.cc | 6 +-- .../operators/collective/gen_nccl_id_op.cc | 6 +-- .../operators/collective/global_gather_op.cc | 2 +- .../operators/collective/global_scatter_op.cc | 2 +- paddle/fluid/platform/gen_comm_id_helper.cc | 2 +- paddle/fluid/platform/init.cc | 4 +- paddle/fluid/pybind/cuda_streams_py.cc | 6 +-- paddle/fluid/pybind/custom_device_py.cc | 2 +- paddle/fluid/pybind/eager.cc | 16 ++++---- .../eager_legacy_op_function_generator.cc | 8 ++-- paddle/fluid/pybind/eager_method.cc | 4 +- paddle/fluid/pybind/eager_properties.cc | 4 +- paddle/fluid/pybind/eager_py_layer.cc | 4 +- paddle/fluid/pybind/eager_utils.cc | 2 +- paddle/fluid/pybind/imperative.cc | 12 +++--- paddle/fluid/pybind/jit.cc | 2 +- paddle/fluid/pybind/parallel_executor.cc | 38 +++++++++---------- paddle/fluid/pybind/protobuf.cc | 8 ++-- paddle/fluid/pybind/reader_py.cc | 2 +- paddle/ir/builtin_attribute_storage.h | 2 +- paddle/ir/ir_context.cc | 12 +++--- paddle/ir/operation.cc | 2 +- paddle/ir/operation.h | 2 +- paddle/ir/storage_manager.cc | 2 +- paddle/ir/type_base.h | 2 +- paddle/ir/value_impl.h | 2 +- python/paddle/audio/backends/wave_backend.py | 4 +- python/paddle/audio/datasets/dataset.py | 2 +- python/paddle/autograd/autograd.py | 8 ++-- python/paddle/batch.py | 2 +- .../auto_parallel/operators/dist_dropout.py | 6 +-- .../operators/dist_fused_dropout_add.py | 4 +- 50 files changed, 115 insertions(+), 115 deletions(-) diff --git a/paddle/fluid/framework/async_executor.cc b/paddle/fluid/framework/async_executor.cc index f1670342202..2c9d549838f 100644 --- a/paddle/fluid/framework/async_executor.cc +++ b/paddle/fluid/framework/async_executor.cc @@ -113,14 +113,14 @@ void AsyncExecutor::RunFromFile(const ProgramDesc& main_program, } /* - readerDesc: protobuf description for reader initlization + readerDesc: protobuf description for reader initialization argument: class_name, batch_size, use_slot, queue_size, buffer_size, padding_index reader: 1) each thread has a reader, reader will read input data and put it into input queue - 2) each reader has a Next() iterface, that can fetch an instance + 2) each reader has a Next() interface, that can fetch an instance from the input queue */ // todo: should be factory method for creating datafeed diff --git a/paddle/fluid/framework/attribute_checker.h b/paddle/fluid/framework/attribute_checker.h index 2e5e7bf8939..fbea00bc0f8 100644 --- a/paddle/fluid/framework/attribute_checker.h +++ b/paddle/fluid/framework/attribute_checker.h @@ -44,7 +44,7 @@ class EqualGreaterThanChecker { PADDLE_ENFORCE_GE( value, lower_bound_, - platform::errors::OutOfRange("Check for attribute valur equal or " + platform::errors::OutOfRange("Check for attribute value equal or " "greater than a certain value failed.")); } @@ -92,7 +92,7 @@ class TypedAttrVarInfoChecker { true, platform::errors::InvalidArgument( "Required dtype of Attribute(%s) shall be " - "int32|int64, but recevied %s.", + "int32|int64, but received %s.", var_desc->Name(), dtype)); } diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index a972f69b152..911d5079c7d 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -316,7 +316,7 @@ void BlockDesc::MoveFrom(BlockDesc *block) { // NOTE(GhostScreaming): don't use program->proto()->blocks_size(), // previous assignment of new Variable in vars_ use std::move, // which makes 'var_ptr' which holded by 'block' a nullptr. - // block->Program()->proto() will calls Flush() at firtst, + // block->Program()->proto() will calls Flush() at first, // a null var_ptr will cause segmentation fault. int block_size = static_cast(program->Size()); for (int i = 0; i < block_size; ++i) { diff --git a/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc b/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc index 9f9d8b2bac2..cede39503b3 100644 --- a/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc +++ b/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc @@ -53,7 +53,7 @@ namespace ir { static constexpr double kMB = 1048576.0; // SetFuseParameterGroupsSize and SetFuseParameterMemorySize are used in unit -// test, because it is invalid that seting 'FLAGS_fuse_parameter_memory_size' +// test, because it is invalid that setting 'FLAGS_fuse_parameter_memory_size' // and 'FLAGS_fuse_parameter_groups_size' in unit test. void SetFuseParameterGroupsSize(int group_size) { FLAGS_fuse_parameter_groups_size = group_size; @@ -567,7 +567,7 @@ class CoalesceGradTensorPass : public ir::Pass { // coalesce_tensor op needs to be executed again after the execution // of DropScope(). - // we can make fused_output persistable, so the memeory is not cleared + // we can make fused_output persistable, so the memory is not cleared // and coalesce_tensor op do nothing if the inputs are already continue. result->Get(details::kProgramDescs).emplace_back(); diff --git a/paddle/fluid/framework/ir/constant_folding_pass.cc b/paddle/fluid/framework/ir/constant_folding_pass.cc index 9e3d1d5c08c..04451424644 100644 --- a/paddle/fluid/framework/ir/constant_folding_pass.cc +++ b/paddle/fluid/framework/ir/constant_folding_pass.cc @@ -62,7 +62,7 @@ void ConstantFoldingPass::ApplyImpl(ir::Graph *graph) const { PADDLE_ENFORCE_NOT_NULL( scope, platform::errors::Fatal( - "scope must not be null when applying constant floding.")); + "scope must not be null when applying constant folding.")); std::vector blacklist{"feed", "matrix_multiply", "save"}; @@ -75,7 +75,7 @@ void ConstantFoldingPass::ApplyImpl(ir::Graph *graph) const { continue; bool input_persis = true; - // map is used to record how many time a name string occures in the whole + // map is used to record how many time a name string occurs in the whole // graph's nodes std::unordered_map map; for (auto in_node : op_node->inputs) { diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_bn_fuse_pass_tester.cc index 021d372c2c8..7cd069eea91 100644 --- a/paddle/fluid/framework/ir/conv_bn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass_tester.cc @@ -54,7 +54,7 @@ void TestMain(const std::string& conv_type) { // ------------------------------------------------------------------ // (in, filters, bias_0) conv -> conv_out // (conv_out, scale, - // bias_1, mean, varaince) batch_norm -> (...) + // bias_1, mean, variance) batch_norm -> (...) Layers layers; auto* in = layers.data("in", {1, 3, 20, 20}); auto* filters = layers.data("filters", {3, 3, 2, 2}, true); diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc index bab16feeb8b..4038f39fc53 100644 --- a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc @@ -44,7 +44,7 @@ class FuseMomentumOpPass : public FuseOptimizerOpPass { PADDLE_ENFORCE_GT( momentum_ops.size(), static_cast(0), - platform::errors::InvalidArgument("Momentum ops must not be empyt.")); + platform::errors::InvalidArgument("Momentum ops must not be empty.")); // Check attributions // NOTE: If new attribution is added, the following code maybe need change. diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc index e47b8248d63..e56679bd125 100644 --- a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc @@ -43,7 +43,7 @@ class FuseSgdOpPass : public FuseOptimizerOpPass { PADDLE_ENFORCE_GT( sgd_ops.size(), static_cast(0), - platform::errors::InvalidArgument("SGD ops must not be empyt.")); + platform::errors::InvalidArgument("SGD ops must not be empty.")); // NOTE: fused_var is only exist in scope, so the graph doesn't have // fused_var node. diff --git a/paddle/fluid/framework/ir/fusion_group/cuda_resources.h b/paddle/fluid/framework/ir/fusion_group/cuda_resources.h index 67838b4e3c7..c9f8e560e02 100644 --- a/paddle/fluid/framework/ir/fusion_group/cuda_resources.h +++ b/paddle/fluid/framework/ir/fusion_group/cuda_resources.h @@ -35,7 +35,7 @@ __device__ inline double Sqrt(double x) { return sqrt(x); } )"; -// List some bulit-in functions of __half implemented in cuda_fp16.hpp +// List some built-in functions of __half implemented in cuda_fp16.hpp static constexpr char predefined_cuda_functions_fp16[] = R"( #define __HALF_TO_US(var) *(reinterpret_cast(&(var))) #define __HALF_TO_CUS(var) *(reinterpret_cast(&(var))) diff --git a/paddle/fluid/framework/ir/fusion_group/operation.cc b/paddle/fluid/framework/ir/fusion_group/operation.cc index 98279e73c1a..12699189fce 100644 --- a/paddle/fluid/framework/ir/fusion_group/operation.cc +++ b/paddle/fluid/framework/ir/fusion_group/operation.cc @@ -134,7 +134,7 @@ void OperationMap::InsertUnaryElementwiseOperations() { // cast: // out = static_cast(x) - // TODO(wangchaochaohu): This is not the compelete definition of + // TODO(wangchaochaohu): This is not the complete definition of // cast Op, We need refine it later. insert_handler("cast", "${0}", {}); diff --git a/paddle/fluid/operators/collective/allreduce_op.cc b/paddle/fluid/operators/collective/allreduce_op.cc index 91ca5105471..40305cc2106 100644 --- a/paddle/fluid/operators/collective/allreduce_op.cc +++ b/paddle/fluid/operators/collective/allreduce_op.cc @@ -39,7 +39,7 @@ class AllReduceDelOpMaker : public framework::OpProtoAndCheckerMaker { void Make() { AddInput("X", "(Tensor), tensor to be allreduced."); AddOutput("Out", "(Tensor) the result of allreduced."); - AddAttr("reduce_type", "(int) determin the reduce type.") + AddAttr("reduce_type", "(int) determine the reduce type.") .SetDefault(0); AddAttr( "sync_mode", diff --git a/paddle/fluid/operators/collective/barrier_op.cc b/paddle/fluid/operators/collective/barrier_op.cc index c9066980473..039bd7789ad 100644 --- a/paddle/fluid/operators/collective/barrier_op.cc +++ b/paddle/fluid/operators/collective/barrier_op.cc @@ -33,7 +33,7 @@ class BarrierOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("ring_id", "(int default 0) communication ring id.") .SetDefault(0); AddComment(R"DOC( -Barrier Operator - Barrier among all pariticapitors.)DOC"); +Barrier Operator - Barrier among all participators.)DOC"); } }; diff --git a/paddle/fluid/operators/collective/c_comm_init_all_op.cc b/paddle/fluid/operators/collective/c_comm_init_all_op.cc index 58f1c10d6c6..8c1a0db5714 100644 --- a/paddle/fluid/operators/collective/c_comm_init_all_op.cc +++ b/paddle/fluid/operators/collective/c_comm_init_all_op.cc @@ -124,7 +124,7 @@ class CCommInitAllOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( CCommInitAll operator -Initialize all collective communicatoin context +Initialize all collective communication context )DOC"); AddAttr>( "devices", diff --git a/paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc b/paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc index 83d42c25365..655053708de 100644 --- a/paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc +++ b/paddle/fluid/operators/collective/c_comm_init_multitrainer_op.cc @@ -79,7 +79,7 @@ class CCommInitMultiTrainerOp : public framework::OperatorBase { class CCommInitMultiTrainerOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "Raw variable contains a NCCL UniqueId instaces."); + AddInput("X", "Raw variable contains a NCCL UniqueId instances."); AddComment(R"DOC( CCommInitMultiTrainer operator diff --git a/paddle/fluid/operators/collective/c_comm_init_op.cc b/paddle/fluid/operators/collective/c_comm_init_op.cc index 5a22ad716e1..e3bf8a63805 100644 --- a/paddle/fluid/operators/collective/c_comm_init_op.cc +++ b/paddle/fluid/operators/collective/c_comm_init_op.cc @@ -115,17 +115,17 @@ class CCommInitOp : public framework::OperatorBase { class CCommInitOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("X", "Raw variable contains a NCCL UniqueId instaces."); + AddInput("X", "Raw variable contains a NCCL UniqueId instances."); AddComment(R"DOC( CCommInit operator -Initialize collective communicatoin context within this trainer +Initialize collective communication context within this trainer )DOC"); AddAttr("nranks", "(int) The number of ranks of distributed trainers"); AddAttr("rank", "(int) The rank of the trainer in distributed training."); AddAttr("device_id", - "(int) The deivce_id on which to initialize the communicator." + "(int) The device_id on which to initialize the communicator." "Now, you only have to set this attr manually for pipeline " "training. Otherwise, make it as default.") .SetDefault(-1); diff --git a/paddle/fluid/operators/collective/c_embedding_op.cc b/paddle/fluid/operators/collective/c_embedding_op.cc index 2efd5b46bdc..7eb21ae9a46 100644 --- a/paddle/fluid/operators/collective/c_embedding_op.cc +++ b/paddle/fluid/operators/collective/c_embedding_op.cc @@ -58,7 +58,7 @@ class CEmbeddingOp : public framework::OperatorWithKernel { (height > 0 && width > 0 && start_idx >= 0), true, platform::errors::InvalidArgument( - "height:%ld width:%ld start_idx:%ld must not have negtive values", + "height:%ld width:%ld start_idx:%ld must not have negative values", height, width, start_idx)); @@ -142,7 +142,7 @@ class CEmbeddingOpGrad : public framework::OperatorWithKernel { (height > 0 && width > 0 && start_idx >= 0), true, platform::errors::InvalidArgument( - "height:%ld width:%ld start_idx:%ld must not have negtive values", + "height:%ld width:%ld start_idx:%ld must not have negative values", height, width, start_idx)); diff --git a/paddle/fluid/operators/collective/c_gen_bkcl_id_op.cc b/paddle/fluid/operators/collective/c_gen_bkcl_id_op.cc index 5c7a0e2d325..24cddfb0638 100644 --- a/paddle/fluid/operators/collective/c_gen_bkcl_id_op.cc +++ b/paddle/fluid/operators/collective/c_gen_bkcl_id_op.cc @@ -89,7 +89,7 @@ class CGenBKCLIdOp : public framework::OperatorBase { class CGenBKCLIdOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddOutput("Out", "Raw variable contains a BKCL UniqueId instaces."); + AddOutput("Out", "Raw variable contains a BKCL UniqueId instances."); AddComment(R"DOC( CGenBKCLId operator diff --git a/paddle/fluid/operators/collective/c_gen_nccl_id_op.cc b/paddle/fluid/operators/collective/c_gen_nccl_id_op.cc index 67d15138b7a..8158f709490 100644 --- a/paddle/fluid/operators/collective/c_gen_nccl_id_op.cc +++ b/paddle/fluid/operators/collective/c_gen_nccl_id_op.cc @@ -103,7 +103,7 @@ class CGenNCCLIdOp : public framework::OperatorBase { class CGenNCCLIdOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddOutput("Out", "Raw variable contains a NCCL UniqueId instaces."); + AddOutput("Out", "Raw variable contains a NCCL UniqueId instances."); AddComment(R"DOC( CGenNCCLId operator diff --git a/paddle/fluid/operators/collective/gen_bkcl_id_op.cc b/paddle/fluid/operators/collective/gen_bkcl_id_op.cc index 725a2841218..581e6183fe7 100644 --- a/paddle/fluid/operators/collective/gen_bkcl_id_op.cc +++ b/paddle/fluid/operators/collective/gen_bkcl_id_op.cc @@ -165,7 +165,7 @@ class GenBKCLIdOp : public framework::OperatorBase { class GenBKCLIdOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddOutput("BKCLID", "Raw variable contains a BKCL UniqueId instaces."); + AddOutput("BKCLID", "Raw variable contains a BKCL UniqueId instances."); AddComment(R"DOC( GenBKCLId operator @@ -186,11 +186,11 @@ For trainer 1~n: start a gRPC server to get the UniqueId, once got, stop the ser .SetDefault(1); AddAttr("use_hierarchical_allreduce", "(bool default false) " - "Wheter to use hierarchical allreduce.") + "Whether to use hierarchical allreduce.") .SetDefault(false); AddAttr("hierarchical_allreduce_inter_nranks", "(int default 1) " - "Wheter to use hierarchical allreduce.") + "Whether to use hierarchical allreduce.") .SetDefault(-1); } }; diff --git a/paddle/fluid/operators/collective/gen_nccl_id_op.cc b/paddle/fluid/operators/collective/gen_nccl_id_op.cc index d6eb9116268..3c71bf45068 100644 --- a/paddle/fluid/operators/collective/gen_nccl_id_op.cc +++ b/paddle/fluid/operators/collective/gen_nccl_id_op.cc @@ -222,7 +222,7 @@ class GenNCCLIdOp : public framework::OperatorBase { class GenNCCLIdOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddOutput("NCCLID", "Raw variable contains a NCCL UniqueId instaces."); + AddOutput("NCCLID", "Raw variable contains a NCCL UniqueId instances."); AddComment(R"DOC( GenNCCLId operator @@ -243,11 +243,11 @@ For trainer 1~n: start a gRPC server to get the UniqueId, once got, stop the ser .SetDefault(1); AddAttr("use_hierarchical_allreduce", "(bool default false) " - "Wheter to use hierarchical allreduce.") + "Whether to use hierarchical allreduce.") .SetDefault(false); AddAttr("hierarchical_allreduce_inter_nranks", "(int default 1) " - "Wheter to use hierarchical allreduce.") + "Whether to use hierarchical allreduce.") .SetDefault(-1); } }; diff --git a/paddle/fluid/operators/collective/global_gather_op.cc b/paddle/fluid/operators/collective/global_gather_op.cc index 370701dbb8e..e97a1a81f31 100644 --- a/paddle/fluid/operators/collective/global_gather_op.cc +++ b/paddle/fluid/operators/collective/global_gather_op.cc @@ -77,7 +77,7 @@ class GlobalGatherOpMaker : public framework::OpProtoAndCheckerMaker { .SetDefault(false); AddComment(R"DOC( Global Gather Operator -Gather data in X to n_expert * world_size exeperts according to +Gather data in X to n_expert * world_size experts according to local_count and receive tensors from n_expert * world_size experts according to global_count. )DOC"); diff --git a/paddle/fluid/operators/collective/global_scatter_op.cc b/paddle/fluid/operators/collective/global_scatter_op.cc index e29ca9ab371..d4652b4885c 100644 --- a/paddle/fluid/operators/collective/global_scatter_op.cc +++ b/paddle/fluid/operators/collective/global_scatter_op.cc @@ -81,7 +81,7 @@ class GlobalScatterOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Global Scatter Operator Scatter data in X which has been put together belong to one expert -to n_expert * world_size exeperts according to local_count +to n_expert * world_size experts according to local_count and receive tensors from n_expert * world_size experts according to global_count. )DOC"); diff --git a/paddle/fluid/platform/gen_comm_id_helper.cc b/paddle/fluid/platform/gen_comm_id_helper.cc index 0237d28e52c..365c44fc9ab 100644 --- a/paddle/fluid/platform/gen_comm_id_helper.cc +++ b/paddle/fluid/platform/gen_comm_id_helper.cc @@ -314,7 +314,7 @@ static int ConnectAddr(const std::string& ep, const CommHead head) { CHECK_SYS_CALL(SocketSend(sock, phead, sizeof(head)), "send"); ret_val = SocketRecv(sock, buffer, sizeof(head)); if (ret_val > 0 && memcmp(buffer, phead, sizeof(head)) == 0) { - // recv same message from recver, indicating that the link is correct + // recv same message from receiver, indicating that the link is correct break; // accept client } else { VLOG(3) << "socket read failed with ret_val=" << ret_val; diff --git a/paddle/fluid/platform/init.cc b/paddle/fluid/platform/init.cc index 3fa02f82a6b..81714cf9eb2 100644 --- a/paddle/fluid/platform/init.cc +++ b/paddle/fluid/platform/init.cc @@ -304,9 +304,9 @@ void SignalHandle(const char *data, int size) { signal_info.replace(start_pos, useless_substr.length(), ""); *signal_msg_dunmer_ptr << " [SignalInfo: " << signal_info << "]\n"; - // NOTE3: Final singal error message print. + // NOTE3: Final signal error message print. // Here does not throw an exception, - // otherwise it will casue "terminate called recursively" + // otherwise it will cause "terminate called recursively" std::ostringstream sout; sout << "\n\n--------------------------------------\n"; sout << "C++ Traceback (most recent call last):"; diff --git a/paddle/fluid/pybind/cuda_streams_py.cc b/paddle/fluid/pybind/cuda_streams_py.cc index c3273eaeff1..2b8969e1b81 100644 --- a/paddle/fluid/pybind/cuda_streams_py.cc +++ b/paddle/fluid/pybind/cuda_streams_py.cc @@ -209,7 +209,7 @@ void BindCudaStream(py::module *m_ptr) { Default: None. Returns: - The recored event. + The record event. Examples: .. code-block:: python @@ -263,7 +263,7 @@ void BindCudaStream(py::module *m_ptr) { auto place_tmp = platform::CUDAPlace(curr_device_id); new (&self) phi::CUDAStream(place_tmp, priority - 2, stream_flag); } else { - // seting priority 1(high) and 2(normal) correspond to the actual + // setting priority 1(high) and 2(normal) correspond to the actual // cuda stream priority -1 and 0. new (&self) phi::CUDAStream(*place, priority - 2, stream_flag); } @@ -295,7 +295,7 @@ void BindCudaStream(py::module *m_ptr) { } auto stream_flag = phi::CUDAStream::StreamFlag::kStreamNonBlocking; - // seting priority 1(high) and 2(normal) correspond to the actual + // setting priority 1(high) and 2(normal) correspond to the actual // cuda stream priority -1 and 0. new (&self) phi::CUDAStream( platform::CUDAPlace(device), priority - 2, stream_flag); diff --git a/paddle/fluid/pybind/custom_device_py.cc b/paddle/fluid/pybind/custom_device_py.cc index d138115c45e..42addb0445c 100644 --- a/paddle/fluid/pybind/custom_device_py.cc +++ b/paddle/fluid/pybind/custom_device_py.cc @@ -313,7 +313,7 @@ void BindCustomDevicePy(py::module *m_ptr) { Default: None. Returns: - The recored event. + The record event. Examples: .. code-block:: python diff --git a/paddle/fluid/pybind/eager.cc b/paddle/fluid/pybind/eager.cc index 3029cee9e0d..c27ad828961 100644 --- a/paddle/fluid/pybind/eager.cc +++ b/paddle/fluid/pybind/eager.cc @@ -763,7 +763,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { } } else if (args_num == (Py_ssize_t)1 || args_num == (Py_ssize_t)2 || args_num == (Py_ssize_t)3) { - // 1 to 3 position args, remainting arguments are kwargs + // 1 to 3 position args, remaining arguments are kwargs PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0); if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) { VLOG(6) << "Calling case3's or case4's initializer."; @@ -798,7 +798,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { "constructor.")); } } else if (args_num == (Py_ssize_t)4) { - // 4 position args, remainting arguments are kwargs + // 4 position args, remaining arguments are kwargs PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0); if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) { VLOG(6) << "Calling case3's or case4's initializer."; @@ -808,7 +808,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { } else { PADDLE_THROW(platform::errors::InvalidArgument( "Incompatible constructor arguments, " - "there are 4 position args and remainting arguments arg kwargs," + "there are 4 position args and remaining arguments arg kwargs," "but the first position args should be PyArray. " "Please check your code and make sure the first position args is " "PyArray.")); @@ -856,7 +856,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { "Please check your code and make sure you call the existed " "constructor.")); } - } else { // five position args, remainting arguments are kwargs + } else { // five position args, remaining arguments are kwargs PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0); if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) { VLOG(6) << "Calling case3's or case4's initializer"; @@ -866,7 +866,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { } else { PADDLE_THROW(platform::errors::InvalidArgument( "Incompatible constructor arguments, " - "there are 5 position args and remainting arguments are kwargs," + "there are 5 position args and remaining arguments are kwargs," "but the first position args should be PyArray. " "Please check your code and make sure the first position args is " "PyArray.")); @@ -879,11 +879,11 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { AutoInitTensorByPyArray( py_tensor_ptr, kws_map, args, flag_kwargs, args_num); return 0; - } else { // six position args, remainting arguments are kwargs, but this + } else { // six position args, remaining arguments are kwargs, but this // is not a right way PADDLE_THROW(platform::errors::InvalidArgument( "Incompatible constructor arguments, " - "there are 6 position args and the remainting arguments are kwargs. " + "there are 6 position args and the remaining arguments are kwargs. " "Please check your code and make sure the first position args is " "PyArray.")); } @@ -1050,7 +1050,7 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { } } } else if (args_num == (Py_ssize_t)1) { // case 3 ~ 6 - // 1 position args, remainting arguments are kwargs + // 1 position args, remaining arguments are kwargs PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0); if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) { VLOG(6) << "Calling case3's or case4's string initializer."; diff --git a/paddle/fluid/pybind/eager_legacy_op_function_generator.cc b/paddle/fluid/pybind/eager_legacy_op_function_generator.cc index 083b4dc259f..c1ebc1b998b 100644 --- a/paddle/fluid/pybind/eager_legacy_op_function_generator.cc +++ b/paddle/fluid/pybind/eager_legacy_op_function_generator.cc @@ -131,7 +131,7 @@ static PyObject * %s(PyObject *self, PyObject *args, PyObject *kwargs) const char* PYBIND_ITEM_TEMPLATE = R"( {"%s", (PyCFunction)(void(*)(void))%s, METH_VARARGS | METH_KEYWORDS, "C++ interface function for %s in dygraph."},)"; -// These operators will skip automatical code generatrion and +// These operators will skip automatical code generation and // need to be handwritten in CUSTOM_HANDWRITE_OP_FUNC_FILE std::unordered_set CUSTOM_HANDWRITE_OPS_SET = {"run_program"}; @@ -216,7 +216,7 @@ std::string GenerateOpFunctionsBody( for (auto& output : op_proto->outputs()) { auto& out_name = output.name(); - // skip those dispensable oututs + // skip those dispensable outputs if (output.dispensable() && !FindOutsMap(op_type, out_name)) { continue; } @@ -341,7 +341,7 @@ std::string GenerateOpFunctionsBody( function_args = paddle::string::Sprintf(FUNCTION_ARGS, input_args); } - // generate op funtcion body + // generate op function body auto op_function_str = paddle::string::Sprintf(OP_FUNCTION_TEMPLATE, func_name, ins_cast_str, @@ -410,7 +410,7 @@ GenerateOpFunctions() { continue; } auto& op_type = op_proto->type(); - // Skip operators that will be handwriten in CUSTOM_HANDWRITE_OP_FUNC_FILE. + // Skip operators that will be handwritten in CUSTOM_HANDWRITE_OP_FUNC_FILE. if (CUSTOM_HANDWRITE_OPS_SET.count(op_type)) { continue; } diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index cc2f21c418f..1d61c63c66d 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -295,7 +295,7 @@ static PyObject* tensor_method_numpy(TensorObject* self, VLOG(6) << "Getting DenseTensor's numpy value"; auto dense_tensor = std::dynamic_pointer_cast(self->tensor.impl()); - // TODO(qili93): temporary for ascned npu performance to be removed along + // TODO(qili93): temporary for ascend npu performance to be removed along // with npu_identity op paddle::Tensor temp_tensor(std::make_shared()); if (dense_tensor->storage_properties_initialized()) { @@ -422,7 +422,7 @@ static void IncreaseTensorReferenceCountUntilCopyComplete( // Note(dev): This is an empty callback, the only way is to "reference" // inner memory Holder, so it will not be destructed until the kernels // launched at current stream of given place is finished, such as - // CUDAPinned Mem -> CUDA by cudamemcpyAsync. + // CUDAPinned Mem -> CUDA by cudaMemcpyAsync. auto callback = [tensor, place_]() { VLOG(3) << "Run callback of Tensor:" << tensor.name() << " at place " << place_; diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index 26b43442664..b05250676b1 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -41,8 +41,8 @@ extern PyTypeObject* p_tensor_type; PyObject* tensor_properties_get_name(TensorObject* self, void* closure) { EAGER_TRY - // NOTE(dev): [why not use egr::Controller::Instance::GernerateUniqueName()?] - // Beacause Controller must holder a tracer, but 'tensor.name' maybe called + // NOTE(dev): [why not use egr::Controller::Instance::GenerateUniqueName()?] + // Because Controller must holder a tracer, but 'tensor.name' maybe called // everywhere such as static graph mode in @to_static, which means tracer is // None. static egr::UniqueNameGenerator name_generator; diff --git a/paddle/fluid/pybind/eager_py_layer.cc b/paddle/fluid/pybind/eager_py_layer.cc index e28f9ac1c54..d7e1770e54c 100644 --- a/paddle/fluid/pybind/eager_py_layer.cc +++ b/paddle/fluid/pybind/eager_py_layer.cc @@ -137,7 +137,7 @@ PyObject* pylayer_method_apply(PyObject* cls, PyObject_GetAttrString(cls, "_backward_function"); if (!backward_function) { PADDLE_THROW(paddle::platform::errors::InvalidArgument( - "Get _backward_function faild.")); + "Get _backward_function failed.")); } PyLayerObject* ctx = reinterpret_cast( PyObject_CallFunctionObjArgs(backward_function, nullptr)); @@ -255,7 +255,7 @@ PyObject* pylayer_method_apply(PyObject* cls, auto forward_fn = PyObject_GetAttrString(cls, "forward"); if (!forward_fn) { PADDLE_THROW(paddle::platform::errors::InvalidArgument( - "Get forward function faild.")); + "Get forward function failed.")); } bool trace_backward = egr::Controller::Instance().HasGrad(); egr::Controller::Instance().SetHasGrad(false); diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 59bc745aedc..a78831efc3b 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -1362,7 +1362,7 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj, return paddle::experimental::Scalar(value); } else { PADDLE_THROW(platform::errors::InvalidArgument( - "%s(): argument (position %d) is numpy.ndarry, the inner elements " + "%s(): argument (position %d) is numpy.ndarray, the inner elements " "must be " "numpy.float32/float64 now, but got %s", op_type, diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 7373d19c68d..f6776a3f182 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -481,7 +481,7 @@ static void VarBaseCopy(std::shared_ptr &src, // NOLINT } } else { PADDLE_THROW(platform::errors::InvalidArgument( - "The destion Tensor(%s) can not copy when it is not empty.", + "The destination Tensor(%s) can not copy when it is not empty.", dst.Name())); } } @@ -538,7 +538,7 @@ void BindImperative(py::module *m_ptr) { "lists with different lengths.\n * Check the reader " "function passed to 'set_(sample/sample_list/batch)" "_generator' to locate the data causes this issue.")); - // 2. construcct LoDTensor + // 2. construct LoDTensor phi::DenseTensor t; SetTensorFromPyArray( &t, array, platform::CPUPlace(), true); @@ -578,7 +578,7 @@ void BindImperative(py::module *m_ptr) { "lists with different lengths.\n * Check the reader " "function passed to 'set_(sample/sample_list/batch)" "_generator' to locate the data causes this issue.")); - // 2. construcct LoDTensor + // 2. construct LoDTensor phi::DenseTensor t; SetTensorFromPyArray( &t, array, platform::CPUPlace(), true); @@ -787,7 +787,7 @@ void BindImperative(py::module *m_ptr) { // inplace operator for the VarBase self. self->BumpInplaceVersion(); - // 1. Check argumnets + // 1. Check arguments bool parse_index = true; // Check whether _index can be parsed. @@ -1512,7 +1512,7 @@ void BindImperative(py::module *m_ptr) { } else { PADDLE_THROW(platform::errors::Unimplemented( "Imperative SelectedRows allreduce is not supported when " - "paddle is compiled with NCCL verison lower than v2.2.12. " + "paddle is compiled with NCCL version lower than v2.2.12. " "You can set is_sparse=False for the Layer containing " "this argument, such as Embedding(is_sparse=False).")); } @@ -1586,7 +1586,7 @@ void BindImperative(py::module *m_ptr) { This hook will be called every time the gradient of current Tensor has been fully calculated. There are two differences with `_register_grad_hook`: - 1. This backward hook will be executed after the gradient accumulation completed across batchs, + 1. This backward hook will be executed after the gradient accumulation completed across batches, but the hook registered by `_register_grad_hook` will be executed the gradient accumulation completed in current batch. 2. This backward hook function should have the following signature: diff --git a/paddle/fluid/pybind/jit.cc b/paddle/fluid/pybind/jit.cc index b0de61b1172..fe4961f27df 100644 --- a/paddle/fluid/pybind/jit.cc +++ b/paddle/fluid/pybind/jit.cc @@ -193,7 +193,7 @@ static PyObject *custom_eval_frame_shim(PyFrameObject *frame, int throw_flag) { static PyObject *set_eval_frame(PyObject *new_callback, PyThreadState *tstate) { // Change the eval frame callback and return the old one - // - None: disables: diable custom callback. + // - None: disables: disable custom callback. // - Python callable(): enables custom callback. // NOTE: Cache is not supported now PyObject *old_callback = eval_frame_callback_get(); diff --git a/paddle/fluid/pybind/parallel_executor.cc b/paddle/fluid/pybind/parallel_executor.cc index 3f20a2498f8..9771ba74671 100644 --- a/paddle/fluid/pybind/parallel_executor.cc +++ b/paddle/fluid/pybind/parallel_executor.cc @@ -301,7 +301,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.reduce_ = strategy; }, @@ -332,7 +332,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.gradient_scale_ = strategy; }, @@ -384,7 +384,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.debug_graphviz_path_ = path; }, @@ -412,7 +412,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.enable_sequential_execution_ = b; }, @@ -439,7 +439,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.remove_unnecessary_lock_ = b; }, @@ -515,7 +515,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, " + "BuildStrategy has been finalized, " "cannot be configured again.")); self.build_cinn_pass_ = b; }, @@ -544,7 +544,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.fuse_elewise_add_act_ops_ = b; }, @@ -570,7 +570,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.fuse_gemm_epilogue_ = b; }, @@ -596,7 +596,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.fuse_adamw_ = b; }, @@ -618,7 +618,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.fused_attention_ = b; }, @@ -644,7 +644,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.fused_feedforward_ = b; }, @@ -670,7 +670,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.sequential_run_ = b; }, @@ -695,7 +695,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.fuse_bn_act_ops_ = b; }, @@ -721,7 +721,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.fuse_bn_add_act_ops_ = b; }, @@ -747,7 +747,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.enable_auto_fusion_ = b; }, @@ -776,7 +776,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.fuse_relu_depthwise_conv_ = b; }, @@ -807,7 +807,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, " + "BuildStrategy has been finalized, " "cannot be configured again.")); self.fuse_broadcast_ops_ = b; }, @@ -839,7 +839,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, " + "BuildStrategy has been finalized, " "cannot be configured again.")); self.fuse_all_optimizer_ops_ = b; }) @@ -850,7 +850,7 @@ void BindParallelExecutor(pybind11::module &m) { // NOLINT PADDLE_ENFORCE_NE(self.IsFinalized(), true, platform::errors::PreconditionNotMet( - "BuildStrategy has been finlaized, cannot be " + "BuildStrategy has been finalized, cannot be " "configured again.")); self.sync_batch_norm_ = b; }, diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index b8003a7cc19..10aa31be42e 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -434,8 +434,8 @@ void BindOpDesc(pybind11::module *m) { .def("set_serialized_attr", [](pd::OpDesc &self, const std::string &name, - const pybind11::bytes &seriralized) { - std::string ser(seriralized); + const pybind11::bytes &serialized) { + std::string ser(serialized); self.SetAttr(name, ser); }) .def("_block_attr_id", &pd::OpDesc::GetBlockAttrId) @@ -484,8 +484,8 @@ void BindOpDesc(pybind11::module *m) { return self.to(); case phi::DataType::COMPLEX64: case phi::DataType::COMPLEX128: - // to paddle's complex to avoid ambiguious - // when converting bfloat16 or float16 to std::copmplex + // to paddle's complex to avoid ambiguous + // when converting bfloat16 or float16 to std::complex return static_cast>( self.to>()); default: diff --git a/paddle/fluid/pybind/reader_py.cc b/paddle/fluid/pybind/reader_py.cc index 0640a070cbc..f8154e4de8c 100644 --- a/paddle/fluid/pybind/reader_py.cc +++ b/paddle/fluid/pybind/reader_py.cc @@ -320,7 +320,7 @@ class MultiDeviceFeedReader { PADDLE_ENFORCE_EQ(status, Status::kSuccess, platform::errors::NotFound( - "The function executed sucessfully, but " + "The function executed successfully, but " "the result status is not Status::kSuccess")); } diff --git a/paddle/ir/builtin_attribute_storage.h b/paddle/ir/builtin_attribute_storage.h index a61b2d561df..d91c4c44f63 100644 --- a/paddle/ir/builtin_attribute_storage.h +++ b/paddle/ir/builtin_attribute_storage.h @@ -46,7 +46,7 @@ namespace ir { }; /// -/// \brief Define Parameteric AttributeStorage for StrAttribute. +/// \brief Define Parametric AttributeStorage for StrAttribute. /// struct StrAttributeStorage : public AttributeStorage { using ParamKey = std::string; diff --git a/paddle/ir/ir_context.cc b/paddle/ir/ir_context.cc index 03907599690..ac13097ec25 100644 --- a/paddle/ir/ir_context.cc +++ b/paddle/ir/ir_context.cc @@ -66,7 +66,7 @@ class IrContextImpl { std::lock_guard guard(registed_abstract_types_lock_); auto iter = registed_abstract_types_.find(type_id); if (iter != registed_abstract_types_.end()) { - VLOG(4) << "Fonund a cached abstract_type of: [TypeId_hash=" + VLOG(4) << "Found a cached abstract_type of: [TypeId_hash=" << std::hash()(type_id) << ", AbstractType_ptr=" << iter->second << "]."; return iter->second; @@ -89,7 +89,7 @@ class IrContextImpl { std::lock_guard guard(registed_abstract_attributes_lock_); auto iter = registed_abstract_attributes_.find(type_id); if (iter != registed_abstract_attributes_.end()) { - VLOG(4) << "Fonund a cached abstract_attribute of: [TypeId_hash=" + VLOG(4) << "Found a cached abstract_attribute of: [TypeId_hash=" << std::hash()(type_id) << ", AbstractAttribute_ptr=" << iter->second << "]."; return iter->second; @@ -110,7 +110,7 @@ class IrContextImpl { std::lock_guard guard(registed_op_infos_lock_); auto iter = registed_op_infos_.find(name); if (iter != registed_op_infos_.end()) { - VLOG(4) << "Fonund a cached operation of: [name=" << name + VLOG(4) << "Found a cached operation of: [name=" << name << ", OpInfoImpl ptr=" << iter->second << "]."; return iter->second; } @@ -129,11 +129,11 @@ class IrContextImpl { std::lock_guard guard(registed_dialect_lock_); auto iter = registed_dialect_.find(name); if (iter != registed_dialect_.end()) { - VLOG(4) << "Fonund a cached dialect of: [name=" << name + VLOG(4) << "Found a cached dialect of: [name=" << name << ", dialect_ptr=" << iter->second << "]."; return iter->second; } - LOG(WARNING) << "No cache fonund dialect of: [name=" << name << "]."; + LOG(WARNING) << "No cache found dialect of: [name=" << name << "]."; return nullptr; } @@ -156,7 +156,7 @@ class IrContextImpl { // AttributeStorage uniquer and cache instances. StorageManager registed_attribute_storage_manager_; - // The dialcet registered in the context. + // The dialect registered in the context. std::unordered_map registed_dialect_; ir::SpinLock registed_dialect_lock_; diff --git a/paddle/ir/operation.cc b/paddle/ir/operation.cc index a9e844af47d..f1f1a341104 100644 --- a/paddle/ir/operation.cc +++ b/paddle/ir/operation.cc @@ -118,7 +118,7 @@ void Operation::destroy() { } reinterpret_cast(base_ptr)->~Operation(); base_ptr += sizeof(Operation); - // 2.3. Deconstruct OpOpOerand. + // 2.3. Deconstruct OpOperand. for (size_t idx = 0; idx < num_operands_; idx++) { reinterpret_cast(base_ptr)->~OpOperandImpl(); base_ptr += sizeof(detail::OpOperandImpl); diff --git a/paddle/ir/operation.h b/paddle/ir/operation.h index 0b7da942d6a..01b1966a099 100644 --- a/paddle/ir/operation.h +++ b/paddle/ir/operation.h @@ -39,7 +39,7 @@ class alignas(8) Operation final { static Operation *create(const OperationArgument &op_argument); /// - /// \brief Destroy the operation objects and free memeory by create(). + /// \brief Destroy the operation objects and free memory by create(). /// void destroy(); diff --git a/paddle/ir/storage_manager.cc b/paddle/ir/storage_manager.cc index a1fe0c1d3e0..3dab38a49a3 100644 --- a/paddle/ir/storage_manager.cc +++ b/paddle/ir/storage_manager.cc @@ -72,7 +72,7 @@ StorageManager::StorageBase *StorageManager::GetParametricStorageImpl( std::function equal_func, std::function constructor) { std::lock_guard guard(parametric_instance_lock_); - VLOG(4) << "Try to get a parameteretric storage of: [TypeId_hash=" + VLOG(4) << "Try to get a parametric storage of: [TypeId_hash=" << std::hash()(type_id) << ", param_hash=" << hash_value << "]."; if (parametric_instance_.find(type_id) == parametric_instance_.end()) diff --git a/paddle/ir/type_base.h b/paddle/ir/type_base.h index 3ecde46d2b6..752b7e4b9b9 100644 --- a/paddle/ir/type_base.h +++ b/paddle/ir/type_base.h @@ -77,7 +77,7 @@ class AbstractType { private: /// /// \brief The constructor is set to private and provides the user with the - /// get method to obtain and manage the AstractType. + /// get method to obtain and manage the AbstractType. /// /// \param type_id The type id of the AbstractType. /// \param dialect The Dialect which the type registered to. diff --git a/paddle/ir/value_impl.h b/paddle/ir/value_impl.h index 145c937b0cf..e06e086145c 100644 --- a/paddle/ir/value_impl.h +++ b/paddle/ir/value_impl.h @@ -55,7 +55,7 @@ class OpOperandImpl { }; /// -/// \brief ValueImpl is the base class of all drived Value classes such as +/// \brief ValueImpl is the base class of all derived Value classes such as /// OpResultImpl. This class defines all the information and usage interface in /// the IR Value. Each Value include three attributes: /// (1) type: ir::Type; (2) UD-chain of value: OpOperandImpl*, first operand diff --git a/python/paddle/audio/backends/wave_backend.py b/python/paddle/audio/backends/wave_backend.py index 4a358f787c0..956820acbf9 100644 --- a/python/paddle/audio/backends/wave_backend.py +++ b/python/paddle/audio/backends/wave_backend.py @@ -180,7 +180,7 @@ def save( filepath: saved path src: the audio tensor sample_rate: the number of samples of audio per second. - channels_first: src channel infomation + channels_first: src channel information if True, means input tensor is (channels, time) if False, means input tensor is (time, channels) encoding: audio encoding format, wave_backend only support PCM16 now. @@ -216,7 +216,7 @@ def save( # only support PCM16 if bits_per_sample not in (None, 16): - raise ValueError("Invalid bits_per_sample, only supprt 16 bit") + raise ValueError("Invalid bits_per_sample, only support 16 bit") sample_width = int(bits_per_sample / 8) # 2 diff --git a/python/paddle/audio/datasets/dataset.py b/python/paddle/audio/datasets/dataset.py index 95a3419840a..888821e466f 100644 --- a/python/paddle/audio/datasets/dataset.py +++ b/python/paddle/audio/datasets/dataset.py @@ -44,7 +44,7 @@ class AudioClassificationDataset(paddle.io.Dataset): files (:obj:`List[str]`): A list of absolute path of audio files. labels (:obj:`List[int]`): Labels of audio files. feat_type (:obj:`str`, `optional`, defaults to `raw`): - It identifies the feature type that user wants to extrace of an audio file. + It identifies the feature type that user wants to extract an audio file. """ super().__init__() diff --git a/python/paddle/autograd/autograd.py b/python/paddle/autograd/autograd.py index 96f8ca1bbfa..7ad3f182c15 100644 --- a/python/paddle/autograd/autograd.py +++ b/python/paddle/autograd/autograd.py @@ -187,7 +187,7 @@ class Hessian(Jacobian): class _Jacobian: """The base class for computing Jacobian matrix. - ``_Jacobian`` implementes the core logic of multidimensional index and lazy + ``_Jacobian`` implements the core logic of multidimensional index and lazy evaluation for Jacobian matrix, subclass only need to overwrite following methods: @@ -436,7 +436,7 @@ def _multi_index(indexes, shape): index.start + shape[i] if index.start < 0 else index.start, index.stop + shape[i] if index.stop < 0 else index.stop, # Negative step means index backward, no need to convert to - # positive interger. + # positive integer. index.step, ) ) @@ -690,9 +690,9 @@ def _grad_for_jacobian(ys, xs, v=None): inputs. """ if paddle.in_dynamic_mode(): - # paddle.grad returns a list though the inputs is a signle Tensor. The + # paddle.grad returns a list though the inputs is a single Tensor. The # follow code snippet fixes the problem by return the first element of - # xs_grad when the xs is a signle Tensor. + # xs_grad when the xs is a single Tensor. xs_grad = paddle.grad(ys, xs, v, create_graph=True, allow_unused=True) if ( isinstance(xs, paddle.fluid.framework.Variable) diff --git a/python/paddle/batch.py b/python/paddle/batch.py index 13ba5a00c0c..958166bc149 100644 --- a/python/paddle/batch.py +++ b/python/paddle/batch.py @@ -67,7 +67,7 @@ def batch(reader, batch_size, drop_last=False): batch_size = int(batch_size) if batch_size <= 0: raise ValueError( - "batch_size should be a positive integeral value, " + "batch_size should be a positive integer value, " "but got batch_size={}".format(batch_size) ) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_dropout.py b/python/paddle/distributed/auto_parallel/operators/dist_dropout.py index e43870b2688..dde852e613e 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_dropout.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_dropout.py @@ -82,7 +82,7 @@ class DistributedDropoutImpl0(DistributedElementwiseImpl0): and src_op.attr("seed") ): _logger.info( - "Auto Parallel Random Control Skiped Since manul seed is set by user: {}".format( + "Auto Parallel Random Control Skipped Since manul seed is set by user: {}".format( src_op ) ) @@ -90,7 +90,7 @@ class DistributedDropoutImpl0(DistributedElementwiseImpl0): pass # NOTE Adopt for recompute # If user already set seed, We should not modify it. But if the seed is added by recompute pass, it should be under control. - # TODO in future recompute pass should happen after parallel partitione. and remove this at that time. + # TODO in future recompute pass should happen after parallel partition. and remove this at that time. elif len(kwargs['Seed']) > 0 or len(src_op.input("Seed")) > 0: seed_var_name = kwargs['Seed'][0] if seed_var_name.startswith('rc_seed'): @@ -115,7 +115,7 @@ class DistributedDropoutImpl0(DistributedElementwiseImpl0): pre_op._set_attr("force_cpu", True) else: _logger.info( - "Auto Parallel Random Control Skiped Since manul seed is set by user: {}".format( + "Auto Parallel Random Control Skipped Since manul seed is set by user: {}".format( src_op ) ) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_fused_dropout_add.py b/python/paddle/distributed/auto_parallel/operators/dist_fused_dropout_add.py index 8bb46b6e0a9..12612540a9a 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_fused_dropout_add.py +++ b/python/paddle/distributed/auto_parallel/operators/dist_fused_dropout_add.py @@ -87,7 +87,7 @@ class DistributedDropoutImpl0(DistributedElementwiseImpl0): and src_op.attr("seed") ): _logger.info( - "Auto Parallel Random Control Skiped Since manul seed is set by user: {}".format( + "Auto Parallel Random Control Skipped Since manul seed is set by user: {}".format( src_op ) ) @@ -120,7 +120,7 @@ class DistributedDropoutImpl0(DistributedElementwiseImpl0): pre_op._set_attr("force_cpu", True) else: _logger.info( - "Auto Parallel Random Control Skiped Since manul seed is set by user: {}".format( + "Auto Parallel Random Control Skipped Since manul seed is set by user: {}".format( src_op ) ) -- GitLab