未验证 提交 e34c79c7 编写于 作者: C chenxujun 提交者: GitHub

Fix typos (#51379)

上级 6737226f
......@@ -165,7 +165,7 @@ GradNodePyLayer::operator()(
this->OutputMeta()[i][0].IsStopGradient(),
true,
paddle::platform::errors::InvalidArgument(
"%s's backward function should not return empyt at %d position.",
"%s's backward function should not return empty at %d position.",
name(),
i));
grad_out.push_back({});
......
......@@ -135,7 +135,7 @@ void AllReduceOpHandle::AllReduceImpl(
if (i == 0) {
numel = static_cast<int64_t>(lod_tensor.numel());
// only enforce place0, we will enforce other palce numel == place0 numel
// only enforce place0, we will enforce other place numel == place0 numel
PADDLE_ENFORCE_GT(
numel,
0,
......
......@@ -144,7 +144,7 @@ FetchResultType BindThreadedSSAGraphExecutor::RunMainStream(
while (cur_count < op_deps->size()) {
cur_count++;
auto cur_op = ready_ops->Pop();
// when execption, get cur_op == nullptr
// when exception, get cur_op == nullptr
if (cur_op == nullptr) {
std::lock_guard<std::mutex> lock(mutex_);
exec_op_count_ = op_deps->size();
......
......@@ -59,7 +59,7 @@ EagerDeletionOpHandle::EagerDeletionOpHandle(
#endif
PADDLE_ENFORCE_NOT_NULL(
event_,
platform::errors::InvalidArgument("The cuda envet created is NULL."));
platform::errors::InvalidArgument("The cuda event created is NULL."));
}
}
#endif
......
......@@ -203,7 +203,7 @@ void FetchAsyncOpHandle::FetchMergedLodTensor(
}
// slice and memcpy
// for 0D tensor, can't concat eath tensor, stack them. for 1+D tensor, concat
// for 0D tensor, can't concat each tensor, stack them. for 1+D tensor, concat
// them
int begin = 0;
int end = 0;
......
......@@ -44,8 +44,8 @@ namespace details {
// all variable in each devices.
// The outside vector is the device vector. Each element of this vector is a
// map from variable name to variables. The variables, who have the same name,
// will have a differsent version. The offset in the
// `std::vector<VarHandle*>` is the version of varaibles.
// will have a different version. The offset in the
// `std::vector<VarHandle*>` is the version of variables.
typedef std::vector<std::unordered_map<std::string, std::vector<VarHandle *>>>
GraphVars;
constexpr char kGraphVars[] = "vars";
......
......@@ -154,10 +154,10 @@ class OpHandleBase {
std::vector<Scope *> local_exec_scopes_;
bool skip_running_ = false;
// NOTE(Aurelius84): Indicate whether scope held in OpHandle is chanageable.
// Ophandle's scope noramlly keep same in most cases, except running
// NOTE(Aurelius84): Indicate whether scope held in OpHandle is changeable.
// Ophandle's scope normally keep same in most cases, except running
// run_program_op from @to_static.
// The scope may be chanaged while each training iteration.
// The scope may be changed while each training iteration.
// See https://github.com/PaddlePaddle/Paddle/pull/32283
bool is_variant_scope_ = false;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册