提交 a21f4e38 编写于 作者: M minqiyang

Polish code

test=develop
上级 8ce198b2
......@@ -199,13 +199,6 @@ framework::LoDTensor& VarBase::GradValue() {
}
std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
VLOG(3) << "ApplyGrad to Op: " << op_desc_->Type();
for (auto it : input_vars_) {
for (VarBase* var : it.second) {
VLOG(3) << "Op Input: " << it.first << " : " << var->var_desc_->Name();
}
}
if (!grad_op_desc_ && backward_id_ <= 0) {
LOG(WARNING) << "op with no grad: " << op_desc_->Type();
return {};
......@@ -256,9 +249,6 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
for (size_t i = 0; i < outputs.size(); ++i) {
framework::Variable* grad = outputs[i];
framework::Variable* orig_grad = origin_outputs[i];
LOG(ERROR) << "Add grad of " << it.first << " " << i << " "
<< orig_grad->GetMutable<framework::LoDTensor>()->mutable_data(
expected_place_);
AddGradTo(grad, orig_grad, expected_place_);
delete grad;
}
......
......@@ -159,7 +159,6 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
} else {
VarBase* var = vars[var_it->second];
if (!var->grads_->var_->IsInitialized()) {
LOG(ERROR) << "Init grad input " << it.first << " " << grad_invar;
InitVar(var->var_, var->grads_->var_,
prepared_op.GetDeviceContext());
}
......@@ -181,9 +180,6 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs,
VarBase* var = vars[var_it->second];
if (!var->grads_->var_->IsInitialized()) {
InitVar(var->var_, var->grads_->var_, prepared_op.GetDeviceContext());
LOG(ERROR) << "Init grad output " << it.first << " " << grad_outvar
<< var->grads_->var_->GetMutable<framework::LoDTensor>()
->mutable_data(platform::CPUPlace());
}
grad_out_vars.push_back(var->grads_->var_);
}
......
......@@ -386,7 +386,6 @@ class Variable(object):
def _numpy(self):
tensor = self._ivar._cpu_tensor()
print('shapex', self.name, tensor.shape())
return np.array(tensor)
def _backward(self):
......
......@@ -144,7 +144,7 @@ class Conv2D(layers.Layer):
attrs={'axis': 1})
# Currently, we don't support inplace in imperative mode
return self._helper.append_activation(pre_act)
return self._helper.append_activation(pre_act, force_no_inplace=True)
class Pool2D(layers.Layer):
......@@ -286,7 +286,8 @@ class FC(layers.Layer):
else:
pre_activation = pre_bias
# Currently, we don't support inplace in imperative mode
return self._helper.append_activation(pre_activation)
return self._helper.append_activation(
pre_activation, force_no_inplace=True)
class BatchNorm(layers.Layer):
......@@ -418,4 +419,5 @@ class BatchNorm(layers.Layer):
})
# Currently, we don't support inplace in imperative mode
return self._helper.append_activation(batch_norm_out)
return self._helper.append_activation(
batch_norm_out, force_no_inplace=True)
......@@ -419,7 +419,7 @@ class LayerHelper(object):
attrs={'axis': dim_start})
return tmp
def append_activation(self, input_var):
def append_activation(self, input_var, force_no_inplace=False):
act = self.kwargs.get('act', None)
if act is None:
return input_var
......@@ -436,11 +436,9 @@ class LayerHelper(object):
tmp = input_var
# NOTE(dzhwinter): some activation support inplace compution.
# NOTE(minqiyang): currently, we don't support inplace in imperative mode
# if core.IsInplace(act_type) and no_inplace:
# print("inplace", act_type)
# tmp = input_var
# else:
print("not inplace", act_type)
if not force_no_inplace and core.IsInplace(act_type):
tmp = input_var
else:
tmp = self.create_variable_for_type_inference(dtype=input_var.dtype)
self.append_op(
type=act_type,
......
......@@ -228,7 +228,6 @@ class TestImperativeResnet(unittest.TestCase):
dy_x_data = np.array(
[x[0].reshape(3, 224, 224) for x in data]).astype('float32')
print('dy input shape', dy_x_data.shape)
y_data = np.array([x[1] for x in data]).astype('int64').reshape(
batch_size, 1)
......@@ -240,8 +239,6 @@ class TestImperativeResnet(unittest.TestCase):
loss = fluid.layers.cross_entropy(input=out, label=label)
avg_loss = fluid.layers.mean(x=loss)
print('shapex ', avg_loss.shape)
dy_out = avg_loss._numpy()
if batch_id == 0:
......@@ -291,9 +288,6 @@ class TestImperativeResnet(unittest.TestCase):
avg_loss = fluid.layers.mean(x=loss)
optimizer.minimize(avg_loss)
print('avg_loss shape', avg_loss.shape)
print(fluid.default_main_program())
# initialize params and fetch them
static_param_init_value = {}
static_param_name_list = []
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册