diff --git a/paddle/operators/nccl_op.cc b/paddle/operators/nccl_op.cc index 8e4edb78bba61ba9deaf5248040b352405cc0856..ae912d7f362efc82b50948791d7f6e81cb997d14 100644 --- a/paddle/operators/nccl_op.cc +++ b/paddle/operators/nccl_op.cc @@ -40,28 +40,21 @@ class NCCLInitOp : public framework::OperatorBase { // A parallel do may not use all the gpus. For example, the batch size is 7 // in the last batch while we have 8 gpu. In this case, parallel_do will // create 7 parallel scopes, so should ncclInitOp create 7 gpu peers - LOG(INFO) << "---------------"; auto ¶llel_scopes = scope.FindVar(Input(kParallelScopes)) ->Get>(); - LOG(INFO) << "---------------"; std::vector gpus(parallel_scopes.size()); for (int i = 0; i < static_cast(parallel_scopes.size()); ++i) { gpus[i] = i; } - LOG(INFO) << "---------------"; PADDLE_ENFORCE(!gpus.empty(), "NCCL init with 0 gpus."); - LOG(INFO) << "---------------"; if (scope.FindVar(name) == nullptr) { PADDLE_THROW("Output(Communicator) is needed for ncclInit operator."); } - LOG(INFO) << "---------------"; platform::Communicator *comm = scope.FindVar(name)->GetMutable(); - LOG(INFO) << "---------------"; comm->InitAll(gpus); - LOG(INFO) << "---------------"; } };