提交 6cc22b73 编写于 作者: H HexToString

fix parallel bug

上级 4117c18f
...@@ -215,6 +215,7 @@ TaskHandler<TaskT> TaskExecutor<TaskT>::schedule( ...@@ -215,6 +215,7 @@ TaskHandler<TaskT> TaskExecutor<TaskT>::schedule(
LOG(ERROR) << "Failed get TaskT from object pool"; LOG(ERROR) << "Failed get TaskT from object pool";
return TaskHandler<TaskT>::valid_handle(); return TaskHandler<TaskT>::valid_handle();
} }
task->clear();
/* /*
if (!BatchTasks<TaskT>::check_valid(in, out, _overrun)) { if (!BatchTasks<TaskT>::check_valid(in, out, _overrun)) {
......
...@@ -99,7 +99,40 @@ struct Task { ...@@ -99,7 +99,40 @@ struct Task {
outLodTensorVector.clear(); outLodTensorVector.clear();
} }
~Task() { ~Task() {
read_fd = -1;
write_fd = -1;
owner_tid = -1;
inVectorT_ptr = NULL;
outVectorT_ptr = NULL;
set_feed_lod_index.clear();
set_feed_nobatch_index.clear();
vector_fetch_lod_index.clear();
set_fetch_nobatch_index.clear();
rem = -1;
total_feed_batch = 0;
taskmeta_num = 0;
index.store(0, butil::memory_order_relaxed);
THREAD_MUTEX_DESTROY(&task_mut); THREAD_MUTEX_DESTROY(&task_mut);
fetch_init = false;
outLodTensorVector.clear();
}
void clear(){
read_fd = -1;
write_fd = -1;
owner_tid = -1;
inVectorT_ptr = NULL;
outVectorT_ptr = NULL;
set_feed_lod_index.clear();
set_feed_nobatch_index.clear();
vector_fetch_lod_index.clear();
set_fetch_nobatch_index.clear();
rem = -1;
total_feed_batch = 0;
taskmeta_num = 0;
index.store(0, butil::memory_order_relaxed);
THREAD_MUTEX_INIT(&task_mut, NULL);
fetch_init = false;
outLodTensorVector.clear(); outLodTensorVector.clear();
} }
...@@ -323,7 +356,7 @@ struct Task { ...@@ -323,7 +356,7 @@ struct Task {
size_t feedvar_index = vector_fetch_lod_index[index]; size_t feedvar_index = vector_fetch_lod_index[index];
// 由于PaddleTensor的resize实现,是每次都会清空,所以必须先统计总长度。 // 由于PaddleTensor的resize实现,是每次都会清空,所以必须先统计总长度。
for (size_t taskmeta_index = 0; taskmeta_index < taskmeta_num; for (size_t taskmeta_index = 0; taskmeta_index < taskmeta_num;
++taskmeta_num) { ++taskmeta_index) {
data_length += data_length +=
outLodTensorVector[taskmeta_index][index].data.length(); outLodTensorVector[taskmeta_index][index].data.length();
lod_length += outLodTensorVector[taskmeta_index][index].lod[0].size(); lod_length += outLodTensorVector[taskmeta_index][index].lod[0].size();
...@@ -347,7 +380,7 @@ struct Task { ...@@ -347,7 +380,7 @@ struct Task {
size_t once_lod_length = 0; size_t once_lod_length = 0;
size_t last_lod_value = fetchVarTensor.lod[0][lod_length_offset]; size_t last_lod_value = fetchVarTensor.lod[0][lod_length_offset];
for (size_t taskmeta_index = 0; taskmeta_index < taskmeta_num; for (size_t taskmeta_index = 0; taskmeta_index < taskmeta_num;
++taskmeta_num) { ++taskmeta_index) {
void* dst_ptr = fetchVarTensor.data.data() + data_length_offset; void* dst_ptr = fetchVarTensor.data.data() + data_length_offset;
void* source_ptr = void* source_ptr =
outLodTensorVector[taskmeta_index][index].data.data(); outLodTensorVector[taskmeta_index][index].data.data();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册