From 39a1ab692d94ef0722b2db946479e6226fb3eb26 Mon Sep 17 00:00:00 2001 From: chenxujun Date: Wed, 8 Mar 2023 14:22:43 +0800 Subject: [PATCH] Fix typos (#51338) --- paddle/fluid/distributed/collective/process_group_nccl.cc | 2 +- paddle/fluid/distributed/collective/reducer.cc | 6 +++--- .../fluid/distributed/fleet_executor/compute_interceptor.cc | 2 +- paddle/fluid/distributed/index_dataset/index_wrapper.cc | 4 ++-- paddle/fluid/distributed/ps.proto | 2 +- .../distributed/ps/service/communicator/communicator.cc | 4 ++-- .../distributed/ps/service/communicator/communicator.h | 4 ++-- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/distributed/collective/process_group_nccl.cc b/paddle/fluid/distributed/collective/process_group_nccl.cc index 9f9fa42589a..22a6b97c1e0 100644 --- a/paddle/fluid/distributed/collective/process_group_nccl.cc +++ b/paddle/fluid/distributed/collective/process_group_nccl.cc @@ -129,7 +129,7 @@ ncclComm_t ProcessGroupNCCL::NCCLComm(const Place& place) const { iter, place_to_comm_ctx_.end(), phi::errors::NotFound( - "Cannot find the NCCL commmunicator in this process group.")); + "Cannot find the NCCL communicator in this process group.")); return iter->second->nccl_comm(); } diff --git a/paddle/fluid/distributed/collective/reducer.cc b/paddle/fluid/distributed/collective/reducer.cc index 9bc230b1650..f10f6ffd5df 100644 --- a/paddle/fluid/distributed/collective/reducer.cc +++ b/paddle/fluid/distributed/collective/reducer.cc @@ -803,7 +803,7 @@ void EagerReducer::MarkVarReady(const size_t var_index, "parameters participate in the backward calculation " "again at a later time (e.g. after the forward function, " "the loss calculation uses the unused " - "paramters of the forward and trigger backward), " + "parameters of the forward and trigger backward), " "its gradient will be wrong."; PADDLE_ENFORCE_EQ(has_marked_unused_vars_, @@ -868,7 +868,7 @@ void EagerReducer::MarkVarReady(const size_t var_index, "parameters without generating gradients during training. " "For example, if is_sparese=True is used in Embedding, " "the current step of this parameter cannot generate gradient " - "because of stop_gradient/detatch, where error will occur.", + "because of stop_gradient/detach, where error will occur.", var_index, tensors_[var_index].name())); @@ -996,7 +996,7 @@ void EagerReducer::ProcessUnusedDenseVars() { // NOTE(haohongxiang): Calling SetFakeEmpty here is to make sure that // gradient accumulation can continue normally after clear_gradients() - // especiall in cases including complex control flow. + // especially in cases including complex control flow. std::static_pointer_cast( GetGradNodeFromTensor(&tensors_[var_index])) ->SetFakeEmpty(false); diff --git a/paddle/fluid/distributed/fleet_executor/compute_interceptor.cc b/paddle/fluid/distributed/fleet_executor/compute_interceptor.cc index a6d14f09e10..3e4356d71aa 100644 --- a/paddle/fluid/distributed/fleet_executor/compute_interceptor.cc +++ b/paddle/fluid/distributed/fleet_executor/compute_interceptor.cc @@ -192,7 +192,7 @@ void ComputeInterceptor::RunOps() { microbatch_scopes_.size(), platform::errors::InvalidArgument( "Step out of range. There are %ld " - "microbatch_scopes, but recevice scope index %ld", + "microbatch_scopes, but receive scope index %ld", microbatch_scopes_.size(), cur_scope_id_)); } diff --git a/paddle/fluid/distributed/index_dataset/index_wrapper.cc b/paddle/fluid/distributed/index_dataset/index_wrapper.cc index dc438b35c8c..8f7e65b3b46 100644 --- a/paddle/fluid/distributed/index_dataset/index_wrapper.cc +++ b/paddle/fluid/distributed/index_dataset/index_wrapper.cc @@ -52,7 +52,7 @@ int TreeIndex::Load(const std::string filename) { platform::errors::InvalidArgument( "Read from file: %s failed. Valid Format is " "an integer representing the length of the following string, " - "and the string itself.We got an iteger[% d], " + "and the string itself.We got an integer[% d], " "but the following string's length is [%d].", filename, num, @@ -75,7 +75,7 @@ int TreeIndex::Load(const std::string filename) { // PADDLE_ENFORCE_NE(node.id(), 0, // platform::errors::InvalidArgument( - // "Node'id should not be equel to zero.")); + // "Node'id should not be equal to zero.")); if (node.is_leaf()) { id_codes_map_[node.id()] = code; } diff --git a/paddle/fluid/distributed/ps.proto b/paddle/fluid/distributed/ps.proto index 5d4ab954bbd..27a93a9787f 100644 --- a/paddle/fluid/distributed/ps.proto +++ b/paddle/fluid/distributed/ps.proto @@ -81,7 +81,7 @@ message ServerServiceParameter { optional string server_class = 1 [ default = "DownpourBrpcPsServer" ]; optional string client_class = 2 [ default = "DownpourBrpcPsClient" ]; optional string service_class = 3 [ default = "DownpourPsService"]; - optional uint32 start_server_port = 4 [ default = 0 ]; //will find a avaliable port from it + optional uint32 start_server_port = 4 [ default = 0 ]; //will find a available port from it optional uint32 server_thread_num = 5 [ default = 12 ]; } diff --git a/paddle/fluid/distributed/ps/service/communicator/communicator.cc b/paddle/fluid/distributed/ps/service/communicator/communicator.cc index 0876e1c7048..b3cf5159c26 100644 --- a/paddle/fluid/distributed/ps/service/communicator/communicator.cc +++ b/paddle/fluid/distributed/ps/service/communicator/communicator.cc @@ -174,7 +174,7 @@ void Communicator::RpcSendDenseParam(const std::vector &varnames, float *w = tensor->mutable_data(place); paddle::distributed::Region reg(w, tensor->numel()); regions.emplace_back(reg); - VLOG(1) << "rpc_send_dense_param Var " << t << " talbe_id " << table_id + VLOG(1) << "rpc_send_dense_param Var " << t << " table_id " << table_id << " Temp_data[0] " << w[0] << " Temp_data[-1] " << w[tensor->numel() - 1]; } @@ -1514,7 +1514,7 @@ void FLCommunicator::InitBrpcClient( if (_worker_ptr.get() == nullptr) { VLOG(0) << "fl-ps > FLCommunicator::InitBrpcClient get _worker_ptr"; _worker_ptr = - fleet->worker_ptr_; // FleetWrapper::InitWorker must be excuted + fleet->worker_ptr_; // FleetWrapper::InitWorker must be executed // before, but no need for Coordinator } if (coordinator_client_ptr_ == nullptr) { diff --git a/paddle/fluid/distributed/ps/service/communicator/communicator.h b/paddle/fluid/distributed/ps/service/communicator/communicator.h index 04ed31f67d2..5247b9a3a35 100644 --- a/paddle/fluid/distributed/ps/service/communicator/communicator.h +++ b/paddle/fluid/distributed/ps/service/communicator/communicator.h @@ -277,7 +277,7 @@ class Communicator { virtual void RpcRecvSparse(const std::string &varname, int table_id, Scope *scope); - // 7. send gloabl step + // 7. send global step virtual void SendGlobalStep(const CommContext &ctx, int batches, Scope *send_scope); @@ -572,7 +572,7 @@ class SyncCommunicator : public HalfAsyncCommunicator { : HalfAsyncCommunicator(envs) {} void InitEnvs() { - // enfore to recv after send + // enforce to recv after send independent_recv_ = false; min_send_grad_num_before_recv_ = 0; max_merge_var_num_ = std::stoi(envs.at("communicator_max_merge_var_num")); -- GitLab