diff --git a/paddle/fluid/eager/pylayer/py_layer_node.cc b/paddle/fluid/eager/pylayer/py_layer_node.cc index e601f17d88fd391725cd4355827a479d375c5a92..3be77e5984313b97d66a4118739dc7d90976bf09 100644 --- a/paddle/fluid/eager/pylayer/py_layer_node.cc +++ b/paddle/fluid/eager/pylayer/py_layer_node.cc @@ -165,7 +165,7 @@ GradNodePyLayer::operator()( this->OutputMeta()[i][0].IsStopGradient(), true, paddle::platform::errors::InvalidArgument( - "%s's backward function should not return empyt at %d position.", + "%s's backward function should not return empty at %d position.", name(), i)); grad_out.push_back({}); diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.cc b/paddle/fluid/framework/details/all_reduce_op_handle.cc index 4a7b1d602d6ae18fbea24ca7a4aaa5c7889f7b5e..2afeef1efec403ed0ac1d7a845f3f57802983efd 100644 --- a/paddle/fluid/framework/details/all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/all_reduce_op_handle.cc @@ -135,7 +135,7 @@ void AllReduceOpHandle::AllReduceImpl( if (i == 0) { numel = static_cast(lod_tensor.numel()); - // only enforce place0, we will enforce other palce numel == place0 numel + // only enforce place0, we will enforce other place numel == place0 numel PADDLE_ENFORCE_GT( numel, 0, diff --git a/paddle/fluid/framework/details/bind_threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/bind_threaded_ssa_graph_executor.cc index 577d458ba7da5800607affae6f529bc37e1bc9db..f36f29a5b02177292bd0987da924f60e97b9f378 100644 --- a/paddle/fluid/framework/details/bind_threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/bind_threaded_ssa_graph_executor.cc @@ -144,7 +144,7 @@ FetchResultType BindThreadedSSAGraphExecutor::RunMainStream( while (cur_count < op_deps->size()) { cur_count++; auto cur_op = ready_ops->Pop(); - // when execption, get cur_op == nullptr + // when exception, get cur_op == nullptr if (cur_op == nullptr) { std::lock_guard lock(mutex_); exec_op_count_ = op_deps->size(); diff --git a/paddle/fluid/framework/details/eager_deletion_op_handle.cc b/paddle/fluid/framework/details/eager_deletion_op_handle.cc index ade3a6f842e04a210087e9c20b53c390e4e29c25..4012263f688cb5cc3875df8f9614045b931bd2d0 100644 --- a/paddle/fluid/framework/details/eager_deletion_op_handle.cc +++ b/paddle/fluid/framework/details/eager_deletion_op_handle.cc @@ -59,7 +59,7 @@ EagerDeletionOpHandle::EagerDeletionOpHandle( #endif PADDLE_ENFORCE_NOT_NULL( event_, - platform::errors::InvalidArgument("The cuda envet created is NULL.")); + platform::errors::InvalidArgument("The cuda event created is NULL.")); } } #endif diff --git a/paddle/fluid/framework/details/fetch_async_op_handle.cc b/paddle/fluid/framework/details/fetch_async_op_handle.cc index 922dbb488018e76080bece86b608c8220e7c28ca..5088843bb98d3979699fe6c56bfc80a218613aaa 100644 --- a/paddle/fluid/framework/details/fetch_async_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_async_op_handle.cc @@ -203,7 +203,7 @@ void FetchAsyncOpHandle::FetchMergedLodTensor( } // slice and memcpy - // for 0D tensor, can't concat eath tensor, stack them. for 1+D tensor, concat + // for 0D tensor, can't concat each tensor, stack them. for 1+D tensor, concat // them int begin = 0; int end = 0; diff --git a/paddle/fluid/framework/details/multi_devices_helper.h b/paddle/fluid/framework/details/multi_devices_helper.h index a3a3b993b7ec14fb95f4d59ea6bd86edec13207e..60c3f35a4f7f720c90742e7dfae5f58add5b92d0 100644 --- a/paddle/fluid/framework/details/multi_devices_helper.h +++ b/paddle/fluid/framework/details/multi_devices_helper.h @@ -44,8 +44,8 @@ namespace details { // all variable in each devices. // The outside vector is the device vector. Each element of this vector is a // map from variable name to variables. The variables, who have the same name, -// will have a differsent version. The offset in the -// `std::vector` is the version of varaibles. +// will have a different version. The offset in the +// `std::vector` is the version of variables. typedef std::vector>> GraphVars; constexpr char kGraphVars[] = "vars"; diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index c9021c84cdf2de8b04b7ec725356b5585dcddee8..9afe56e4babd45bbb0680981bf7d20317b0e8a9a 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -154,10 +154,10 @@ class OpHandleBase { std::vector local_exec_scopes_; bool skip_running_ = false; - // NOTE(Aurelius84): Indicate whether scope held in OpHandle is chanageable. - // Ophandle's scope noramlly keep same in most cases, except running + // NOTE(Aurelius84): Indicate whether scope held in OpHandle is changeable. + // Ophandle's scope normally keep same in most cases, except running // run_program_op from @to_static. - // The scope may be chanaged while each training iteration. + // The scope may be changed while each training iteration. // See https://github.com/PaddlePaddle/Paddle/pull/32283 bool is_variant_scope_ = false;