diff --git a/paddle/fluid/operators/optimizers/adam_op.h b/paddle/fluid/operators/optimizers/adam_op.h index aabb71c556a03c442494711a3899945919b832ea..7dd5a8783a5479d0fba2794f3121cfbd5041b016 100644 --- a/paddle/fluid/operators/optimizers/adam_op.h +++ b/paddle/fluid/operators/optimizers/adam_op.h @@ -357,6 +357,9 @@ class AdamOpKernel : public framework::OpKernel { if (inner_op_parallelism > 1 && FLAGS_min_param_size_to_use_multithread > 0 && param.numel() > FLAGS_min_param_size_to_use_multithread) { + VLOG(3) << "use multi thread, inner_op_parallelism=" + << inner_op_parallelism << " min_param_size_to_use_multithread" + << FLAGS_min_param_size_to_use_multithread; std::vector> fs; int64_t block_size = param.numel() / inner_op_parallelism; for (int i = 0; i < inner_op_parallelism; ++i) {