From 026c61c02700df2481d3e1dd7a2349844197937e Mon Sep 17 00:00:00 2001 From: Dong Zhihong Date: Tue, 24 Oct 2017 14:27:56 -0700 Subject: [PATCH] "fix allreduce python test" --- python/paddle/v2/framework/tests/test_nccl_allreduce_op.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/python/paddle/v2/framework/tests/test_nccl_allreduce_op.py b/python/paddle/v2/framework/tests/test_nccl_allreduce_op.py index f79dcd664b2..0a9163dd55c 100644 --- a/python/paddle/v2/framework/tests/test_nccl_allreduce_op.py +++ b/python/paddle/v2/framework/tests/test_nccl_allreduce_op.py @@ -36,9 +36,6 @@ input_data = [ ] output_data = allreduce(input_data, gpus) -# output_vars = [g_scope.var("Out_"+str(i)).get_tensor() -# for i in range(len(gpus))] - def thread_allreduce_op(thread_id, gpu_id): i = gpu_id @@ -53,9 +50,6 @@ def thread_allreduce_op(thread_id, gpu_id): op = create_op(scope, "ncclAllReduce", inputs, outputs, attrs={}) place = core.GPUPlace(gpus[i]) set_input(scope, op, inputs, place) - # # print scope.find_var("Out").get_tensor() - # # print scope.find_var("X").get_tensor() - print scope.find_var("Communicator").get_communicator() ctx = core.DeviceContext.create(place) -- GitLab