diff --git a/paddle/fluid/operators/ref_by_trainer_id_op.h b/paddle/fluid/operators/ref_by_trainer_id_op.h index d84c22ff6148083dad1ed6dc5a3044dfbc211e53..2ce577544ae2437b9297da2190fd09b435d5173c 100644 --- a/paddle/fluid/operators/ref_by_trainer_id_op.h +++ b/paddle/fluid/operators/ref_by_trainer_id_op.h @@ -26,7 +26,7 @@ class RefByTrainerIdKernel : public framework::OpKernel { auto* out = context.Output("Out"); auto in_list = context.MultiInput("X"); auto* trainer_id_t = context.Input("TrainerId"); - int64_t trainer_id; + int64_t trainer_id = 0; auto* trainer_id_data = trainer_id_t->data(); if (platform::is_gpu_place(context.GetPlace())) { #ifdef PADDLE_WITH_CUDA @@ -38,7 +38,6 @@ class RefByTrainerIdKernel : public framework::OpKernel { } else { trainer_id = *trainer_id_data; } - printf("after get trainer_id %lu\n", trainer_id); PADDLE_ENFORCE_LT(trainer_id, in_list.size()); out->mutable_data(context.GetPlace()); out->ShareDataWith(*(in_list[trainer_id])); diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 6ef799a1f42e0815e19cb29ffb7e7b14bc85872a..7c7fba76718e911907bb9bef69b3e8688bbf52fc 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -1588,7 +1588,6 @@ to transpile() call.") ref_inputs = [] for p, p_bak in self.param_bak_list: if p.name == param_var.name: - print("#### ref inputs: ", param_var.name, p_bak.name) ref_inputs.append(p_bak) block.append_op( type="ref_by_trainer_id",