From bfa78cacdfdf7988159419256432d5550a59c730 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sun, 11 Feb 2018 00:11:56 +0000 Subject: [PATCH] clean up log(info) --- paddle/operators/nccl_op.cc | 7 ------- 1 file changed, 7 deletions(-) diff --git a/paddle/operators/nccl_op.cc b/paddle/operators/nccl_op.cc index 8e4edb78bba..ae912d7f362 100644 --- a/paddle/operators/nccl_op.cc +++ b/paddle/operators/nccl_op.cc @@ -40,28 +40,21 @@ class NCCLInitOp : public framework::OperatorBase { // A parallel do may not use all the gpus. For example, the batch size is 7 // in the last batch while we have 8 gpu. In this case, parallel_do will // create 7 parallel scopes, so should ncclInitOp create 7 gpu peers - LOG(INFO) << "---------------"; auto ¶llel_scopes = scope.FindVar(Input(kParallelScopes)) ->Get>(); - LOG(INFO) << "---------------"; std::vector gpus(parallel_scopes.size()); for (int i = 0; i < static_cast(parallel_scopes.size()); ++i) { gpus[i] = i; } - LOG(INFO) << "---------------"; PADDLE_ENFORCE(!gpus.empty(), "NCCL init with 0 gpus."); - LOG(INFO) << "---------------"; if (scope.FindVar(name) == nullptr) { PADDLE_THROW("Output(Communicator) is needed for ncclInit operator."); } - LOG(INFO) << "---------------"; platform::Communicator *comm = scope.FindVar(name)->GetMutable(); - LOG(INFO) << "---------------"; comm->InitAll(gpus); - LOG(INFO) << "---------------"; } }; -- GitLab