diff --git a/python/paddle/distributed/auto_parallel/engine.py b/python/paddle/distributed/auto_parallel/engine.py index a2e1477f8873c9ec69e72cd3cfccede580695386..2eaefee6cd5fb24512546c0fadeced4ba85c75da 100644 --- a/python/paddle/distributed/auto_parallel/engine.py +++ b/python/paddle/distributed/auto_parallel/engine.py @@ -240,8 +240,8 @@ class Engine: else: specs.append(spec.batch(batch_size)) elif isinstance(item, (Variable, core.VarBase, core.eager.Tensor)): - _adjust_item_spec(num_shards, spec) spec = InputSpec.from_tensor(item, name) + _adjust_item_spec(num_shards, spec) if batch_size is None: specs.append(spec) else: @@ -1508,10 +1508,10 @@ class Engine: strict (bool, optional): Whether to skip the loading of mismatch parameter or raise an error when mismatch happens (not found the parameter in file storing model states of or receives a - mismatch shape). Default: False. + mismatch shape). Default: True. load_optimizer (bool, optional): If True, the stored optimizer states is restored. Otherwise, the optimizer states is initialized - from scratch. Default: False. + from scratch. Default: True. Returns: None diff --git a/python/paddle/distributed/passes/auto_parallel_fp16.py b/python/paddle/distributed/passes/auto_parallel_fp16.py index 541901f0c7665add0306d78e786f29d5f0be3396..35a4831af3acef07cc43b2a93868df76bb8e8aba 100644 --- a/python/paddle/distributed/passes/auto_parallel_fp16.py +++ b/python/paddle/distributed/passes/auto_parallel_fp16.py @@ -181,7 +181,8 @@ class FP16State(object): try: var = block.var(var_name) except ValueError as e: - var = self.program.global_block().var(var_name) + var = block._var_recursive(var_name) + # var = self.program.global_block().var(var_name) # NOTE(JZ-LIANG) "array_" is a hack to adopt for ernie3.0 inference, since there is # a trick which make the LOD_TENSOR_ARRAY to the float32 in while block to reset the LOD_TENSOR_ARRAY