From a21f4e38c3f8ef847ce1b72f4c042d03e6281f77 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 21 Jan 2019 12:45:45 +0800 Subject: [PATCH] Polish code test=develop --- paddle/fluid/imperative/layer.cc | 10 ---------- paddle/fluid/imperative/tracer.cc | 4 ---- python/paddle/fluid/framework.py | 1 - python/paddle/fluid/imperative/nn.py | 8 +++++--- python/paddle/fluid/layer_helper.py | 12 +++++------- .../fluid/tests/unittests/test_imperative_resnet.py | 6 ------ 6 files changed, 10 insertions(+), 31 deletions(-) diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 27734f978..c5676e2f5 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -199,13 +199,6 @@ framework::LoDTensor& VarBase::GradValue() { } std::map> OpBase::ApplyGrad() { - VLOG(3) << "ApplyGrad to Op: " << op_desc_->Type(); - for (auto it : input_vars_) { - for (VarBase* var : it.second) { - VLOG(3) << "Op Input: " << it.first << " : " << var->var_desc_->Name(); - } - } - if (!grad_op_desc_ && backward_id_ <= 0) { LOG(WARNING) << "op with no grad: " << op_desc_->Type(); return {}; @@ -256,9 +249,6 @@ std::map> OpBase::ApplyGrad() { for (size_t i = 0; i < outputs.size(); ++i) { framework::Variable* grad = outputs[i]; framework::Variable* orig_grad = origin_outputs[i]; - LOG(ERROR) << "Add grad of " << it.first << " " << i << " " - << orig_grad->GetMutable()->mutable_data( - expected_place_); AddGradTo(grad, orig_grad, expected_place_); delete grad; } diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index bfa47ea76..3c102912c 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -159,7 +159,6 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs, } else { VarBase* var = vars[var_it->second]; if (!var->grads_->var_->IsInitialized()) { - LOG(ERROR) << "Init grad input " << it.first << " " << grad_invar; InitVar(var->var_, var->grads_->var_, prepared_op.GetDeviceContext()); } @@ -181,9 +180,6 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs, VarBase* var = vars[var_it->second]; if (!var->grads_->var_->IsInitialized()) { InitVar(var->var_, var->grads_->var_, prepared_op.GetDeviceContext()); - LOG(ERROR) << "Init grad output " << it.first << " " << grad_outvar - << var->grads_->var_->GetMutable() - ->mutable_data(platform::CPUPlace()); } grad_out_vars.push_back(var->grads_->var_); } diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 2d6fbab6b..46fbf8857 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -386,7 +386,6 @@ class Variable(object): def _numpy(self): tensor = self._ivar._cpu_tensor() - print('shapex', self.name, tensor.shape()) return np.array(tensor) def _backward(self): diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 140c0ff03..23ef35bad 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -144,7 +144,7 @@ class Conv2D(layers.Layer): attrs={'axis': 1}) # Currently, we don't support inplace in imperative mode - return self._helper.append_activation(pre_act) + return self._helper.append_activation(pre_act, force_no_inplace=True) class Pool2D(layers.Layer): @@ -286,7 +286,8 @@ class FC(layers.Layer): else: pre_activation = pre_bias # Currently, we don't support inplace in imperative mode - return self._helper.append_activation(pre_activation) + return self._helper.append_activation( + pre_activation, force_no_inplace=True) class BatchNorm(layers.Layer): @@ -418,4 +419,5 @@ class BatchNorm(layers.Layer): }) # Currently, we don't support inplace in imperative mode - return self._helper.append_activation(batch_norm_out) + return self._helper.append_activation( + batch_norm_out, force_no_inplace=True) diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index f9c9b896b..df5591fb2 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -419,7 +419,7 @@ class LayerHelper(object): attrs={'axis': dim_start}) return tmp - def append_activation(self, input_var): + def append_activation(self, input_var, force_no_inplace=False): act = self.kwargs.get('act', None) if act is None: return input_var @@ -436,12 +436,10 @@ class LayerHelper(object): tmp = input_var # NOTE(dzhwinter): some activation support inplace compution. # NOTE(minqiyang): currently, we don't support inplace in imperative mode - # if core.IsInplace(act_type) and no_inplace: - # print("inplace", act_type) - # tmp = input_var - # else: - print("not inplace", act_type) - tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) + if not force_no_inplace and core.IsInplace(act_type): + tmp = input_var + else: + tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) self.append_op( type=act_type, inputs={"X": [input_var]}, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index 8e2ea735c..af821dfc0 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -228,7 +228,6 @@ class TestImperativeResnet(unittest.TestCase): dy_x_data = np.array( [x[0].reshape(3, 224, 224) for x in data]).astype('float32') - print('dy input shape', dy_x_data.shape) y_data = np.array([x[1] for x in data]).astype('int64').reshape( batch_size, 1) @@ -240,8 +239,6 @@ class TestImperativeResnet(unittest.TestCase): loss = fluid.layers.cross_entropy(input=out, label=label) avg_loss = fluid.layers.mean(x=loss) - print('shapex ', avg_loss.shape) - dy_out = avg_loss._numpy() if batch_id == 0: @@ -291,9 +288,6 @@ class TestImperativeResnet(unittest.TestCase): avg_loss = fluid.layers.mean(x=loss) optimizer.minimize(avg_loss) - print('avg_loss shape', avg_loss.shape) - print(fluid.default_main_program()) - # initialize params and fetch them static_param_init_value = {} static_param_name_list = [] -- GitLab