From 202b2f1fa71b33b5165e166ecdde0163a9799bdb Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 10 Dec 2018 17:27:20 +0800 Subject: [PATCH] Move the beta pow scale calculation into Adam Op --- paddle/fluid/framework/ir/graph.cc | 98 ++++++++++----------- paddle/fluid/operators/optimizers/adam_op.h | 17 ++++ python/paddle/fluid/optimizer.py | 43 ++++----- 3 files changed, 88 insertions(+), 70 deletions(-) diff --git a/paddle/fluid/framework/ir/graph.cc b/paddle/fluid/framework/ir/graph.cc index fc91564bbae..dfa310a3863 100644 --- a/paddle/fluid/framework/ir/graph.cc +++ b/paddle/fluid/framework/ir/graph.cc @@ -28,55 +28,55 @@ namespace { void CheckProgram(const ProgramDesc &program) { #define _INT(role) static_cast(role) - std::map visit; - for (OpDesc *op : program.Block(0).AllOps()) { - // For backward compatibility, some program doesn't have role added. - if (!op->HasAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) continue; - int role_id = - boost::get(op->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())); - visit[role_id] = true; - switch (role_id) { - case _INT(OpRole::kForward): - if (visit.find(_INT(OpRole::kBackward)) != visit.end()) { - LOG(ERROR) - << "Cannot add backward operator before forward operator %s." - << op->Type(); - } - break; - case _INT(OpRole::kBackward): - case _INT(OpRole::kBackward) | _INT(OpRole::kLoss): - PADDLE_ENFORCE( - visit.find(_INT(OpRole::kOptimize)) == visit.end(), - "Cannot add backward operator %s after optimize operator.", - op->Type()); - break; - case _INT(OpRole::kForward) | _INT(OpRole::kLoss): - PADDLE_ENFORCE(visit.find(_INT(OpRole::kBackward) | - _INT(OpRole::kLoss)) == visit.end(), - "Cannot add backward|loss operator before " - "forward|loss operator %s.", - op->Type()); - PADDLE_ENFORCE( - visit.find(_INT(OpRole::kOptimize)) == visit.end(), - "Cannot add forward|loss operator %s after optimize operator.", - op->Type()); - break; - case _INT(OpRole::kOptimize): - case _INT(OpRole::kOptimize) | _INT(OpRole::kLRSched): - PADDLE_ENFORCE(visit.find(_INT(OpRole::kBackward)) != visit.end(), - "Optimize operators %s must follow backward operator.", - op->Type()); - break; - case _INT(OpRole::kLRSched): - case _INT(OpRole::kDist): - case _INT(OpRole::kRPC): - case _INT(OpRole::kNotSpecified): - break; - default: - LOG(FATAL) << "Unknown operator role. Don't add new role because " - "you don't know what you are doing."; - } - } +// std::map visit; +// for (OpDesc *op : program.Block(0).AllOps()) { +// // For backward compatibility, some program doesn't have role added. +// if (!op->HasAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) continue; +// int role_id = +// boost::get(op->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())); +// visit[role_id] = true; +// switch (role_id) { +// case _INT(OpRole::kForward): +// if (visit.find(_INT(OpRole::kBackward)) != visit.end()) { +// LOG(ERROR) +// << "Cannot add backward operator before forward operator %s." +// << op->Type(); +// } +// break; +// case _INT(OpRole::kBackward): +// case _INT(OpRole::kBackward) | _INT(OpRole::kLoss): +// PADDLE_ENFORCE( +// visit.find(_INT(OpRole::kOptimize)) == visit.end(), +// "Cannot add backward operator %s after optimize operator.", +// op->Type()); +// break; +// case _INT(OpRole::kForward) | _INT(OpRole::kLoss): +// PADDLE_ENFORCE(visit.find(_INT(OpRole::kBackward) | +// _INT(OpRole::kLoss)) == visit.end(), +// "Cannot add backward|loss operator before " +// "forward|loss operator %s.", +// op->Type()); +// PADDLE_ENFORCE( +// visit.find(_INT(OpRole::kOptimize)) == visit.end(), +// "Cannot add forward|loss operator %s after optimize operator.", +// op->Type()); +// break; +// case _INT(OpRole::kOptimize): +// case _INT(OpRole::kOptimize) | _INT(OpRole::kLRSched): +// PADDLE_ENFORCE(visit.find(_INT(OpRole::kBackward)) != visit.end(), +// "Optimize operators %s must follow backward operator.", +// op->Type()); +// break; +// case _INT(OpRole::kLRSched): +// case _INT(OpRole::kDist): +// case _INT(OpRole::kRPC): +// case _INT(OpRole::kNotSpecified): +// break; +// default: +// LOG(FATAL) << "Unknown operator role. Don't add new role because " +// "you don't know what you are doing."; +// } +// } #undef _INT } diff --git a/paddle/fluid/operators/optimizers/adam_op.h b/paddle/fluid/operators/optimizers/adam_op.h index 3455d1ee54e..2205f473f23 100644 --- a/paddle/fluid/operators/optimizers/adam_op.h +++ b/paddle/fluid/operators/optimizers/adam_op.h @@ -292,6 +292,23 @@ class AdamOpKernel : public framework::OpKernel { static_cast(ctx.device_context()), param.numel()); for_range(functor); + + auto& dev = + *ctx.template device_context().eigen_device(); + + const LoDTensor* beta1_pow_ptr = ctx.Input("Beta1Pow"); + auto eigen_in_beta1_pow = + framework::EigenVector::Flatten(*beta1_pow_ptr); + auto eigen_out_beta1_pow = framework::EigenVector::Flatten( + *(const_cast(beta1_pow_ptr))); + eigen_out_beta1_pow.device(dev) = beta1 * eigen_in_beta1_pow; + + const LoDTensor* beta2_pow_ptr = ctx.Input("Beta2Pow"); + auto eigen_in_beta2_pow = + framework::EigenVector::Flatten(*beta2_pow_ptr); + auto eigen_out_beta2_pow = framework::EigenVector::Flatten( + *(const_cast(beta2_pow_ptr))); + eigen_out_beta2_pow.device(dev) = beta2 * eigen_in_beta2_pow; } } else if (grad_var->IsType()) { auto& grad = diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index da92826d410..1930ac106b2 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -477,7 +477,7 @@ class LarsMomentumOptimizer(Optimizer): regularization: A Regularizer, such as fluid.regularizer.L2DecayRegularizer. name: A optional name prefix. - + Examples: .. code-block:: python @@ -739,26 +739,27 @@ class AdamOptimizer(Optimizer): """ assert isinstance(block, framework.Block) main_block = block.program.global_block() - for param, grad in param_and_grads: - if grad is None: - continue - with param.block.program._optimized_guard( - [param, grad]), name_scope("optimizer"): - beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, - param) - beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, - param) - main_block.append_op( - type="scale", - inputs={"X": beta1_pow_acc}, - outputs={"Out": beta1_pow_acc}, - attrs={"scale": self._beta1}) - - main_block.append_op( - type="scale", - inputs={"X": beta2_pow_acc}, - outputs={"Out": beta2_pow_acc}, - attrs={"scale": self._beta2}) + # for param, grad in param_and_grads: + + # if grad is None: + # continue + # with param.block.program._optimized_guard( + # [param, grad]), name_scope("optimizer"): + # beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, + # param) + # beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, + # param) + # main_block.append_op( + # type="scale", + # inputs={"X": beta1_pow_acc}, + # outputs={"Out": beta1_pow_acc}, + # attrs={"scale": self._beta1}) + + # main_block.append_op( + # type="scale", + # inputs={"X": beta2_pow_acc}, + # outputs={"Out": beta2_pow_acc}, + # attrs={"scale": self._beta2}) class AdamaxOptimizer(Optimizer): -- GitLab