提交 60d18a8f 编写于 作者: S sneaxiy

fix ut, test=develop

上级 011f0644
......@@ -400,6 +400,7 @@ ir::Graph *BuildStrategy::Apply(ir::Graph *graph,
continue;
}
} else if (pass->Type() == "set_reader_device_count_pass") {
pass->Erase(kPlaces);
pass->SetNotOwned<const std::vector<platform::Place>>(kPlaces, &places);
}
VLOG(1) << "Start Apply Pass " << pass->Type();
......
......@@ -121,12 +121,6 @@ class OrderedMultiDeviceLoDTensorBlockingQueue {
return CurQueue()->Push(lod_tensor_vec);
}
bool Push(std::vector<framework::LoDTensor>&& lod_tensor_vec) {
return CurQueue()->Push(std::move(lod_tensor_vec));
}
inline size_t Cap() const { return capacity_; }
inline size_t Size() const {
size_t size = 0;
if (queues_) {
......@@ -137,15 +131,6 @@ class OrderedMultiDeviceLoDTensorBlockingQueue {
return size;
}
inline void ReOpen() {
if (queues_) {
for (auto& item : *queues_) {
item->ReOpen();
}
}
data_index_ = 0;
}
inline void Close() {
{
std::lock_guard<std::mutex> lock(init_mutex_);
......
......@@ -318,7 +318,6 @@ void BindReader(py::module *module) {
},
py::call_guard<py::gil_scoped_release>())
.def("size", &reader::OrderedMultiDeviceLoDTensorBlockingQueue::Size)
.def("capacity", &reader::OrderedMultiDeviceLoDTensorBlockingQueue::Cap)
.def("close", &reader::OrderedMultiDeviceLoDTensorBlockingQueue::Close)
.def("kill", &reader::OrderedMultiDeviceLoDTensorBlockingQueue::Kill)
.def("wait_for_inited",
......
......@@ -354,5 +354,5 @@ set_tests_properties(test_parallel_executor_test_while_train test_parallel_execu
test_parallel_executor_feed_persistable_var
test_parallel_executor_crf_auto_growth test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass
test_data_norm_op test_imperative_using_non_zero_gpu test_fuse_bn_act_pass
test_optimizer_in_control_flow
test_optimizer_in_control_flow test_dataloader_keep_order
test_buffer_shared_memory_reuse_pass PROPERTIES LABELS "RUN_TYPE=DIST")
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册