提交 82571deb 编写于 作者: Y yangyaming

Change `customize_loss_grad` to `use_default_grad_scale`.

上级 b8b2e897
......@@ -58,7 +58,7 @@ ParallelExecutor::ParallelExecutor(
const std::unordered_set<std::string> &bcast_vars,
const ProgramDesc &main_program, const std::string &loss_var_name,
Scope *scope, const std::vector<Scope *> &local_scopes, bool allow_op_delay,
bool customize_scale_loss)
bool use_default_grad_scale)
: member_(new ParallelExecutorPrivate(places)) {
member_->global_scope_ = scope;
......@@ -93,11 +93,11 @@ ParallelExecutor::ParallelExecutor(
#ifdef PADDLE_WITH_CUDA
details::MultiDevSSAGraphBuilder builder(
member_->places_, loss_var_name, params, member_->local_scopes_,
customize_scale_loss, member_->nccl_ctxs_.get());
use_default_grad_scale, member_->nccl_ctxs_.get());
#else
details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name,
params, member_->local_scopes_,
customize_scale_loss);
use_default_grad_scale);
#endif
auto graph = builder.Build(main_program);
......
......@@ -40,7 +40,7 @@ class ParallelExecutor {
const ProgramDesc& main_program,
const std::string& loss_var_name, Scope* scope,
const std::vector<Scope*>& local_scopes,
bool allow_op_delay, bool customize_scale_loss);
bool allow_op_delay, bool use_default_grad_scale);
~ParallelExecutor();
......
......@@ -502,11 +502,11 @@ All parameter, weight, gradient are variables in Paddle.
const std::unordered_set<std::string> &bcast_vars,
const ProgramDesc &main_program, const std::string &loss_var_name,
Scope *scope, std::vector<Scope *> &local_scopes,
bool allow_op_delay, bool customize_loss_grad) {
new (&self) ParallelExecutor(num_threads, use_event, places,
params, bcast_vars, main_program,
loss_var_name, scope, local_scopes,
allow_op_delay, customize_loss_grad);
bool allow_op_delay, bool use_default_grad_scale) {
new (&self) ParallelExecutor(
num_threads, use_event, places, params, bcast_vars,
main_program, loss_var_name, scope, local_scopes,
allow_op_delay, use_default_grad_scale);
})
.def("bcast_params", &ParallelExecutor::BCastParamsToGPUs)
// NOTE: even we return a vec<Scope*>* to Python use reference policy.
......
......@@ -30,7 +30,7 @@ class ParallelExecutor(object):
num_threads=None,
allow_op_delay=False,
share_vars_from=None,
customize_loss_grad=False):
use_default_grad_scale=True):
"""
ParallelExecutor can run program in parallel.
......@@ -46,6 +46,10 @@ class ParallelExecutor(object):
improve performance in some cases, defalut False.
share_vars_from(ParallelExecutor, default None): If provied,
it will share variables from the specified ParallelExecutor.
use_default_grad_scale(bool, default True): If set True, a default
scale value equal to `1./device_count` would be multiplied to
the gradients. Otherwise, a customized scale value should be
feeded to the network.
Returns:
A ParallelExecutor object.
......@@ -124,7 +128,7 @@ class ParallelExecutor(object):
scope,
local_scopes,
allow_op_delay,
customize_loss_grad)
use_default_grad_scale)
self.scope = scope
def run(self, fetch_list, feed=None, feed_dict=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册