diff --git a/paddle/fluid/eager/to_static/run_program_op_node.h b/paddle/fluid/eager/to_static/run_program_op_node.h index b4deb4e4ac306fe40040c2af6f0d66eac26dd6be..548af27a8ac4ff621714cc4078c3c3fdda683756 100644 --- a/paddle/fluid/eager/to_static/run_program_op_node.h +++ b/paddle/fluid/eager/to_static/run_program_op_node.h @@ -443,11 +443,11 @@ inline void RunProgramAPI( VLOG(4) << "don't require any grad, set this scope can reused"; VLOG(4) << "is_test: " << is_test << ", require_any_grad: " << require_any_grad; - global_inner_scope->SetCanReuesd(true); + global_inner_scope->SetCanReused(true); details::GcScope(global_inner_scope); } else { VLOG(4) << "not test, set this scope can not reused"; - global_inner_scope->SetCanReuesd(false); + global_inner_scope->SetCanReused(false); } } @@ -582,7 +582,7 @@ inline void RunProgramGradAPI( *backward_global_block, global_inner_scope); VLOG(4) << "after backward gc all vars"; - global_inner_scope->SetCanReuesd(true); + global_inner_scope->SetCanReused(true); details::GcScope(global_inner_scope); } } @@ -599,9 +599,9 @@ class GradNodeRunProgram : public egr::GradNodeBase { // Normally out_scope_vec.size() == 1. for safty, we add for-loop here. for (size_t i = 0; i < out_scope_vec->size(); ++i) { paddle::framework::Scope *global_inner_scope = out_scope_vec->at(i); - global_inner_scope->SetCanReuesd(true); + global_inner_scope->SetCanReused(true); details::GcScope(global_inner_scope); - VLOG(4) << "global_inner_scope SetCanReuesd"; + VLOG(4) << "global_inner_scope SetCanReused"; } } } diff --git a/paddle/fluid/framework/scope.h b/paddle/fluid/framework/scope.h index b87a294878051ed0a69023747a4cf1b1b1e15384..b0b418c85a303e12563b93183b5f2a373b86d256 100644 --- a/paddle/fluid/framework/scope.h +++ b/paddle/fluid/framework/scope.h @@ -122,9 +122,9 @@ class Scope { std::string Rename(const std::string& origin_name) const; // only for dygraph_to_static - bool CanReuesd() const { return can_reused_; } + bool CanReused() const { return can_reused_; } - void SetCanReuesd(bool can_reused) { can_reused_ = can_reused; } + void SetCanReused(bool can_reused) { can_reused_ = can_reused; } protected: struct KeyHasher { diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 0a01e9e52f5b33d32a4c16491ac5245636d646b0..05eccd45c8fa64822aa2ed433eac3f3fa63f8354 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1210,7 +1210,7 @@ All parameter, weight, gradient are variables in Paddle. Delete all sub-scopes of the current scope. )DOC") .def("_kids", &Scope::kids) - .def_property("_can_reuesd", &Scope::CanReuesd, &Scope::SetCanReuesd); + .def_property("_can_reused", &Scope::CanReused, &Scope::SetCanReused); m.def( "Scope", diff --git a/python/paddle/jit/dy2static/partial_program.py b/python/paddle/jit/dy2static/partial_program.py index ad2e62b9e04613fb5ecff577816d9cc76f9ec2b2..042977988d4b35ed9e2b8d827acd4ccd060696f5 100644 --- a/python/paddle/jit/dy2static/partial_program.py +++ b/python/paddle/jit/dy2static/partial_program.py @@ -259,7 +259,7 @@ class PartialProgramLayer: return scope else: for scope in self._scope_cache[program_id]: - if scope._can_reuesd: + if scope._can_reused: return scope scope = core.Scope() self._scope_cache[program_id].append(scope) diff --git a/python/paddle/nn/functional/distance.py b/python/paddle/nn/functional/distance.py index e8e209be18a947d7bac5a8718509bae9b8404f33..067a49816dd22fcd16576fd555cf689b2970fec7 100644 --- a/python/paddle/nn/functional/distance.py +++ b/python/paddle/nn/functional/distance.py @@ -70,7 +70,7 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None): """ if in_dygraph_mode(): sub = _C_ops.subtract(x, y) - # p_norm op has not uesd epsilon, so change it to the following. + # p_norm op has not used epsilon, so change it to the following. if epsilon != 0.0: epsilon = paddle.fluid.dygraph.base.to_variable( [epsilon], dtype=sub.dtype diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index 5cf69fb42b6c52e0b1dfd0d42926e4a17a81fa90..e1c3d985a68db74de32d4a2a285907f5951d6931 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -127,7 +127,7 @@ def batch_norm( """ Applies Batch Normalization as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift . - nn.functional.batch_norm is uesd for nn.BatchNorm1D, nn.BatchNorm2D, nn.BatchNorm3D. Please use above API for BatchNorm. + nn.functional.batch_norm is used for nn.BatchNorm1D, nn.BatchNorm2D, nn.BatchNorm3D. Please use above API for BatchNorm. Parameters: x(Tesnor): input value. It's data type should be float32, float64.