diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 15e237a0e8ec3e8560d51ca4d63f296cfbca805d..ef6d8f4016a057fd9256499067f72dd9d11c452f 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -115,7 +115,6 @@ framework::Variable* CreateVariable(const std::string& name, varname = string::Sprintf("%s@%d", varname, id); } - LOG(ERROR) << "creating var " << varname; VLOG(3) << "creating var " << varname; framework::Variable* var = scope->Var(varname); framework::LoDTensor* tensor = var->GetMutable(); @@ -183,8 +182,6 @@ std::vector OpBase::ApplyGrad(framework::Scope* scope) { << framework::vectorize(var->Get().dims()).size(); } - LOG(ERROR) << "grad_op_desc_" << grad_op_desc_->Proto()->DebugString(); - for (const std::string& outvar : grad_op_desc_->OutputArgumentNames()) { VLOG(3) << "op grad output var " << outvar; block_->FindRecursiveOrCreateVar(outvar); @@ -194,8 +191,6 @@ std::vector OpBase::ApplyGrad(framework::Scope* scope) { framework::VarDesc* var_desc = block_->FindVar(outvar); if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { var->GetMutable(); - // framework::Tensor* tensor = var->GetMutable(); - // tensor->mutable_data(platform::CPUPlace()); } else { LOG(ERROR) << "tracer doesn't support yet"; } diff --git a/paddle/fluid/operators/cross_entropy_op.h b/paddle/fluid/operators/cross_entropy_op.h index 2500c0443f5f276a85ee3e93a0ec08b4eea7a728..f123e11542d85c904a81fe2a87f59ab52511cc15 100644 --- a/paddle/fluid/operators/cross_entropy_op.h +++ b/paddle/fluid/operators/cross_entropy_op.h @@ -110,8 +110,6 @@ class CrossEntropyGradientOpKernel : public framework::OpKernel { auto* dy = ctx.Input(framework::GradVarName("Y")); auto* label = ctx.Input("Label"); auto* dx = ctx.Output(framework::GradVarName("X")); - LOG(ERROR) << "CROSS ENTROPY GRAD DX: " - << ctx.op().Output(framework::GradVarName("X")); T* dx_data = dx->mutable_data(ctx.GetPlace()); // Following computation only depends on the last dimension size. So it's diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index dde08a79d229df7711abef674e992c984fde3022..3dc23bd06062b838283bea309c66774b9c4f9852 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1281,8 +1281,6 @@ class Block(object): """ op_desc = self.desc.append_op() op = Operator(block=self, desc=op_desc, *args, **kwargs) - print("append_op", kwargs.get("type"), kwargs.get("stop_gradient", - False)) if _in_imperative_mode(): _imperative_tracer().trace(op.iop, [v._ivar for v in op.inputs], [v._ivar for v in op.outputs], self.desc, @@ -1336,8 +1334,6 @@ class Block(object): def _prepend_op(self, *args, **kwargs): op_desc = self.desc._prepend_op() op = Operator(self, op_desc, *args, **kwargs) - print("prepend_op", kwargs.get("type"), kwargs.get("stop_gradient", - False)) if _in_imperative_mode(): _imperative_tracer().trace(op.iop, [v._ivar for v in op.inputs], [v._ivar for v in op.outputs], self.desc, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py index 85b613bddca3e8b047b1acfdb4eb24f8658368f1..9d1e07999894bc4b2c08d4a096e30d5592fe0dbc 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py @@ -115,9 +115,7 @@ class TestImperativeMnist(unittest.TestCase): label._stop_gradient = True predict = mnist(img) - print(predict.shape, predict.dtype, label.shape, label.dtype) out = fluid.layers.cross_entropy(predict, label) - print(out.shape, out.dtype) out._backward() filter_grad = mnist._simple_img_conv_pool_1._conv2d._filter_param._gradient( )