提交 5822f7f1 编写于 作者: M minqiyang

Polish code

test=develop
上级 fff44af8
......@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
syntax = "proto2";
/* option optimize_for = LITE_RUNTIME; */
option optimize_for = LITE_RUNTIME;
package paddle.framework.proto;
// Any incompatible changes to ProgramDesc and its dependencies should
......
......@@ -192,7 +192,6 @@ std::vector<Variable*> OpBase::ApplyGrad(framework::Scope* scope) {
LOG(ERROR) << "tracer doesn't support yet";
}
}
VLOG(3) << "op grad output var " << outvar << " is inited";
}
grad_op_desc_->InferShape(*block_);
......
......@@ -52,7 +52,7 @@ class Tracer {
const std::vector<VarBase*>& outputs, framework::BlockDesc* block,
const bool stop_gradient) {
framework::OpDesc* op_desc = op->op_desc_;
LOG(ERROR) << "tracer tracing " << op_desc->Type();
VLOG(3) << "tracer tracing " << op_desc->Type();
op_desc->InferShape(*block);
op_desc->InferVarType(block);
std::unique_ptr<framework::OperatorBase> op_base =
......@@ -61,10 +61,7 @@ class Tracer {
*op->input_vars_ = inputs;
for (VarBase* input : inputs) {
const std::string vname = input->var_desc_->Name();
LOG(ERROR) << "input: " << vname;
LOG(ERROR) << "input var: " << input->var_;
framework::Variable* var = root_scope_->Var(vname);
LOG(ERROR) << "var_ in tracer pointer: " << var;
input->var_ = var;
if (!var->IsInitialized()) {
framework::VarDesc* var_desc = block->FindVar(vname);
......@@ -102,7 +99,7 @@ class Tracer {
outputs[i]->pre_op_out_idx_ = i;
}
LOG(ERROR) << "tracer running " << op_desc->Type();
VLOG(3) << "tracer running " << op_desc->Type();
op_base->Run(*root_scope_, platform::CPUPlace());
if (!stop_gradient) {
framework::OpDesc* grad_op_desc;
......
......@@ -29,8 +29,6 @@ class SGDOpKernel : public framework::OpKernel<T> {
const auto *param_var = ctx.InputVar("Param");
const auto *grad_var = ctx.InputVar("Grad");
LOG(ERROR) << "grad_var: " << grad_var;
if (param_var->IsType<framework::LoDTensor>()) {
const auto *param = ctx.Input<framework::Tensor>("Param");
auto *param_out = ctx.Output<framework::Tensor>("ParamOut");
......@@ -41,11 +39,8 @@ class SGDOpKernel : public framework::OpKernel<T> {
const auto *grad = ctx.Input<framework::Tensor>("Grad");
auto p = framework::EigenVector<T>::Flatten(*param);
LOG(ERROR) << "param flattened";
auto g = framework::EigenVector<T>::Flatten(*grad);
LOG(ERROR) << "grad flattened";
auto o = framework::EigenVector<T>::Flatten(*param_out);
LOG(ERROR) << "paramout flattened";
auto *lr = learning_rate->data<T>();
o = p - lr[0] * g;
......
......@@ -117,19 +117,14 @@ PYBIND11_MODULE(core, m) {
[](imperative::VarBase &self, framework::Scope *scope) {
self.RunBackward(scope);
})
.def("_grad_var",
[](const imperative::VarBase &self) {
LOG(ERROR) << "grad_var_ pointer: " << self.grads_;
return self.grads_;
},
py::return_value_policy::reference)
.def("_grad_name", &imperative::VarBase::GradName)
.def("_grad", &imperative::VarBase::Grad)
.def("_print_var_pointer",
[](const imperative::VarBase &self) {
LOG(ERROR) << self.var_desc_->Name()
<< " print_var pointer: " << self.var_;
})
.def_property("grad_value",
[](const imperative::VarBase &self) { return self.grads_; },
[](imperative::VarBase &self, framework::Variable *grad) {
self.grads_ = grad;
},
py::return_value_policy::reference)
.def_property("value",
[](const imperative::VarBase &self) { return self.var_; },
[](imperative::VarBase &self, framework::Variable *var) {
......
......@@ -361,6 +361,7 @@ class Variable(object):
self.block.vars[name] = self
self.op = None
self.stop_gradient = stop_gradient
self.is_data = is_data
if _in_imperative_mode():
self._ivar = core.VarBase()
......@@ -368,7 +369,6 @@ class Variable(object):
self._ivar.stop_gradient = stop_gradient
def _numpy(self):
print("get_variable_tensor", self.desc.name())
scope = _imperative_tracer().get_scope()
tensor = core.get_variable_tensor(scope, self.desc.name())
return np.array(tensor)
......@@ -597,8 +597,7 @@ class Operator(object):
type=None,
inputs=None,
outputs=None,
attrs=None,
stop_gradient=False):
attrs=None):
self.block = block
self.desc = desc
# note: not add self.attrs here:
......@@ -640,7 +639,6 @@ class Operator(object):
if inputs is not None:
for in_proto in proto.inputs:
print("create op: find_name", in_proto.name)
found = find_name(inputs, in_proto.name)
assert found or in_proto.dispensable, "Input {} not found".format(
in_proto.name)
......@@ -1178,7 +1176,6 @@ class Block(object):
def create_var(self, *args, **kwargs):
var = Variable(block=self, *args, **kwargs)
if 'initializer' in kwargs:
print("initializer, ", type(kwargs['initializer']))
kwargs['initializer'](var, self)
return var
......@@ -1293,16 +1290,6 @@ class Block(object):
"""
op_desc = self.desc.append_op()
op = Operator(block=self, desc=op_desc, *args, **kwargs)
print("op inputs: ", [v._numpy() for v in op.inputs])
print("op inputs: ", [v for v in op.inputs])
import sys
sys.stdout.flush()
for v in op.inputs:
v._ivar._print_var_pointer()
print("print var pointer end")
import sys
sys.stdout.flush()
if _in_imperative_mode():
_imperative_tracer().trace(op.iop, [v._ivar for v in op.inputs],
[v._ivar for v in op.outputs], self.desc,
......@@ -1360,10 +1347,6 @@ class Block(object):
_imperative_tracer().trace(op.iop, [v._ivar for v in op.inputs],
[v._ivar for v in op.outputs], self.desc,
kwargs.get("stop_gradient", False))
print([v.name for v in op.outputs])
for v in op.outputs:
v._ivar._print_var_pointer()
print("fill_constant end")
self.ops.insert(0, op)
return op
......
......@@ -153,7 +153,6 @@ class ConstantInitializer(Initializer):
assert isinstance(var, framework.Variable)
assert isinstance(block, framework.Block)
# Initialization Ops should be prepended and not appended
print("fill_constant")
op = block._prepend_op(
type="fill_constant",
outputs={"Out": var},
......
......@@ -22,6 +22,7 @@ import numpy as np
from .framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating
from . import unique_name
from paddle.fluid.imperative import base as imperative_base
from paddle.fluid.imperative.base import to_variable
from paddle.fluid.initializer import Constant, Xavier
from .param_attr import ParamAttr, WeightNormParamAttr
......@@ -369,7 +370,10 @@ class LayerHelper(object):
def set_variable_initializer(self, var, initializer):
assert isinstance(var, Variable)
return self.startup_program.global_block().create_var(
if imperative_base.enabled():
initializer(var, self.startup_program.global_block())
else:
self.startup_program.global_block().create_var(
name=var.name,
type=var.type,
dtype=var.dtype,
......
......@@ -132,13 +132,6 @@ def create_global_var(shape,
persistable=persistable,
name=name,
stop_gradient=True)
print("set_variable_initializer, ", var.name)
if imperative_base.enabled():
var = helper.set_variable_initializer(
var, initializer=Constant(
value=float(value), force_cpu=force_cpu))
print("get var", var)
else:
helper.set_variable_initializer(
var, initializer=Constant(
value=float(value), force_cpu=force_cpu))
......
......@@ -109,7 +109,6 @@ class Optimizer(object):
# create learning rate variable for every parameter
param = param_and_grad[0]
param_lr = param.optimize_attr['learning_rate']
print("param_lr: ", param_lr, self._global_learning_rate()._numpy())
if type(param_lr) == Variable:
return param_lr
else:
......@@ -311,15 +310,12 @@ class Optimizer(object):
parameters = program.global_block().all_parameters()
params_grads = []
for param in parameters:
# create gradient variable
grad_var = Variable(
block=loss.block,
name=param._ivar._grad_name(),
stop_gradient=True)
grad_var._value = param._ivar._grad_var()
print("create grad var: ", grad_var.name)
print("grad_var value: ", grad_var._numpy())
import sys
sys.stdout.flush()
params_grads.append((param, grad_var))
optimize_ops = self._create_optimization_pass(params_grads, loss,
......@@ -381,10 +377,6 @@ class SGDOptimizer(Optimizer):
def _append_optimize_op(self, block, param_and_grad):
assert isinstance(block, framework.Block)
print("append sgd")
import sys
sys.stdout.flush()
# create the optimize op
sgd_op = block.append_op(
type=self.type,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册