提交 f8141d90 编写于 作者: Y Yu Yang

Debug

上级 389ea18a
......@@ -345,6 +345,7 @@ struct NCCLAllReduceOpHandle : public OpHandle {
}
void Wait(platform::DeviceContext *waited_dev) override {
VLOG(3) << "Wait NCCL AllReduce";
this->dev_ctx_.at(waited_dev->GetPlace())->Wait();
}
};
......
......@@ -72,12 +72,12 @@ class ParallelExecutor(unittest.TestCase):
first_loss = numpy.array(fluid.global_scope().find_var('fetched_var')
.get_lod_tensor_array()[0])
print first_loss
#
# for i in xrange(10):
# exe.run([], 'fetched_var')
# exe.run([loss.name], 'fetched_var')
# last_loss = numpy.array(fluid.global_scope().find_var('fetched_var')
# .get_lod_tensor_array()[0])
#
# print first_loss, last_loss
# self.assertGreater(first_loss[0], last_loss[0])
for i in xrange(10):
exe.run([], 'fetched_var')
exe.run([loss.name], 'fetched_var')
last_loss = numpy.array(fluid.global_scope().find_var('fetched_var')
.get_lod_tensor_array()[0])
print first_loss, last_loss
self.assertGreater(first_loss[0], last_loss[0])
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册