diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 27734f978eb2f3e5aba8eec14e00ccd083a78ea8..c5676e2f5e7040c463fd3c82f2b709de7f84aab6 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -199,13 +199,6 @@ framework::LoDTensor& VarBase::GradValue() { } std::map> OpBase::ApplyGrad() { - VLOG(3) << "ApplyGrad to Op: " << op_desc_->Type(); - for (auto it : input_vars_) { - for (VarBase* var : it.second) { - VLOG(3) << "Op Input: " << it.first << " : " << var->var_desc_->Name(); - } - } - if (!grad_op_desc_ && backward_id_ <= 0) { LOG(WARNING) << "op with no grad: " << op_desc_->Type(); return {}; @@ -256,9 +249,6 @@ std::map> OpBase::ApplyGrad() { for (size_t i = 0; i < outputs.size(); ++i) { framework::Variable* grad = outputs[i]; framework::Variable* orig_grad = origin_outputs[i]; - LOG(ERROR) << "Add grad of " << it.first << " " << i << " " - << orig_grad->GetMutable()->mutable_data( - expected_place_); AddGradTo(grad, orig_grad, expected_place_); delete grad; } diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index bfa47ea76956b2dd0474a89dfac378cb515acb56..3c102912c5c3a9ea6a7dcde50881ef3223527d52 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -159,7 +159,6 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs, } else { VarBase* var = vars[var_it->second]; if (!var->grads_->var_->IsInitialized()) { - LOG(ERROR) << "Init grad input " << it.first << " " << grad_invar; InitVar(var->var_, var->grads_->var_, prepared_op.GetDeviceContext()); } @@ -181,9 +180,6 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs, VarBase* var = vars[var_it->second]; if (!var->grads_->var_->IsInitialized()) { InitVar(var->var_, var->grads_->var_, prepared_op.GetDeviceContext()); - LOG(ERROR) << "Init grad output " << it.first << " " << grad_outvar - << var->grads_->var_->GetMutable() - ->mutable_data(platform::CPUPlace()); } grad_out_vars.push_back(var->grads_->var_); } diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 2d6fbab6beb7ea5a1e4b1c21fb49261f37e7f3c2..46fbf8857f310cceddbc219beaf11bcc97885bfc 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -386,7 +386,6 @@ class Variable(object): def _numpy(self): tensor = self._ivar._cpu_tensor() - print('shapex', self.name, tensor.shape()) return np.array(tensor) def _backward(self): diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 140c0ff037d453641cc119301269121025e17cbd..23ef35bad8f5dc6285ddf148f5c55a2be82b2ae7 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -144,7 +144,7 @@ class Conv2D(layers.Layer): attrs={'axis': 1}) # Currently, we don't support inplace in imperative mode - return self._helper.append_activation(pre_act) + return self._helper.append_activation(pre_act, force_no_inplace=True) class Pool2D(layers.Layer): @@ -286,7 +286,8 @@ class FC(layers.Layer): else: pre_activation = pre_bias # Currently, we don't support inplace in imperative mode - return self._helper.append_activation(pre_activation) + return self._helper.append_activation( + pre_activation, force_no_inplace=True) class BatchNorm(layers.Layer): @@ -418,4 +419,5 @@ class BatchNorm(layers.Layer): }) # Currently, we don't support inplace in imperative mode - return self._helper.append_activation(batch_norm_out) + return self._helper.append_activation( + batch_norm_out, force_no_inplace=True) diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index f9c9b896ba0545d517dadf29739b8105cf952df4..df5591fb2a711edca9b69762ca044c493382b66e 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -419,7 +419,7 @@ class LayerHelper(object): attrs={'axis': dim_start}) return tmp - def append_activation(self, input_var): + def append_activation(self, input_var, force_no_inplace=False): act = self.kwargs.get('act', None) if act is None: return input_var @@ -436,12 +436,10 @@ class LayerHelper(object): tmp = input_var # NOTE(dzhwinter): some activation support inplace compution. # NOTE(minqiyang): currently, we don't support inplace in imperative mode - # if core.IsInplace(act_type) and no_inplace: - # print("inplace", act_type) - # tmp = input_var - # else: - print("not inplace", act_type) - tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) + if not force_no_inplace and core.IsInplace(act_type): + tmp = input_var + else: + tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) self.append_op( type=act_type, inputs={"X": [input_var]}, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index 8e2ea735c02dd9858aea462a9e039da841eb0ed7..af821dfc0682f737ef1d306f42ce592b98f0903f 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -228,7 +228,6 @@ class TestImperativeResnet(unittest.TestCase): dy_x_data = np.array( [x[0].reshape(3, 224, 224) for x in data]).astype('float32') - print('dy input shape', dy_x_data.shape) y_data = np.array([x[1] for x in data]).astype('int64').reshape( batch_size, 1) @@ -240,8 +239,6 @@ class TestImperativeResnet(unittest.TestCase): loss = fluid.layers.cross_entropy(input=out, label=label) avg_loss = fluid.layers.mean(x=loss) - print('shapex ', avg_loss.shape) - dy_out = avg_loss._numpy() if batch_id == 0: @@ -291,9 +288,6 @@ class TestImperativeResnet(unittest.TestCase): avg_loss = fluid.layers.mean(x=loss) optimizer.minimize(avg_loss) - print('avg_loss shape', avg_loss.shape) - print(fluid.default_main_program()) - # initialize params and fetch them static_param_init_value = {} static_param_name_list = []