未验证 提交 85ea9cef 编写于 作者: N niuliling123 提交者: GitHub

fix autotune (#42249)

fix autotune in reader.py
bug原因:创建dataloader的使用直接使用原始dataloader 等于的方式: new_dataloader = old_dataloader,属于深拷贝,导致后续修改new_dataloader的dataset用于num_workers选择时,会造成old_dataloader的dataset同步被修改。导致模型运行的dataset与实际dataset不相等
上级 c79b019b
...@@ -19,6 +19,7 @@ import numpy as np ...@@ -19,6 +19,7 @@ import numpy as np
import threading import threading
import paddle import paddle
import time import time
import copy
from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, _non_static_mode, cpu_places, _current_expected_place, _in_eager_without_dygraph_check from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, _non_static_mode, cpu_places, _current_expected_place, _in_eager_without_dygraph_check
from .executor import global_scope from .executor import global_scope
...@@ -214,7 +215,7 @@ class AuToTune(object): ...@@ -214,7 +215,7 @@ class AuToTune(object):
return sub_dataset return sub_dataset
def get_autotune_loader(self): def get_autotune_loader(self):
loader = self.loader loader = copy.copy(self.loader)
batch_size = self.loader.batch_sampler.batch_size batch_size = self.loader.batch_sampler.batch_size
if isinstance(self.loader.batch_sampler, if isinstance(self.loader.batch_sampler,
paddle.io.DistributedBatchSampler): paddle.io.DistributedBatchSampler):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册