diff --git a/paddle/fluid/framework/executor_thread_worker.cc b/paddle/fluid/framework/executor_thread_worker.cc index c360fe5d8aeb8882ec9abc0e8cb38262e24bb1b3..046bed29c3efaca08b8dbb527990c23cffa648bc 100644 --- a/paddle/fluid/framework/executor_thread_worker.cc +++ b/paddle/fluid/framework/executor_thread_worker.cc @@ -195,8 +195,11 @@ void ExecutorThreadWorker::TrainFiles() { thread_scope_->DropKids(); } - for (int i = 0; i < fetch_var_num; ++i) { - fetch_values_[i] = fetch_values_[i] / batch_cnt; + if (batch_cnt) { + // when the number of files is less than the number of threads + for (int i = 0; i < fetch_var_num; ++i) { + fetch_values_[i] = fetch_values_[i] / batch_cnt; + } } } diff --git a/python/paddle/fluid/async_executor.py b/python/paddle/fluid/async_executor.py index 671fe463e570211365af07d9c3c95dad895f61a3..db55e4d070ea6c36251a7f77609db3391092c6b2 100644 --- a/python/paddle/fluid/async_executor.py +++ b/python/paddle/fluid/async_executor.py @@ -42,9 +42,6 @@ class DataFeedDesc(object): def set_batch_size(self, batch_size): self.proto_desc.batch = batch_size - def get_slot(self, name): - return self.proto_desc.multi_slot_desc.slots[self.__name_to_index[name]] - def set_dense_slots(self, dense_slots_name): for name in dense_slots_name: self.proto_desc.multi_slot_desc.slots[self.__name_to_index[name]].dense = True @@ -156,6 +153,6 @@ class AsyncExecutor(object): fetch = [fetch] fetch_var_names = [var.name for var in fetch] - evaluation = self.executor.run_from_files(program_desc, data_feed, filelist, thread_num, fetch_var_names) + evaluation = self.executor.run_from_files(program_desc, data_feed.desc(), filelist, thread_num, fetch_var_names) return evaluation