diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 7fd63e26e232f06abefff17f8c2108fa307d53a0..c299877baeae7589e532cedbf95d1d1b6d0c7555 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -2854,10 +2854,6 @@ class Program(object): self._use_hierarchical_allreduce = False self._hierarchical_allreduce_inter_nranks = 0 - # @deprecated(the python memory optimize transpiler is deprecated) - # whether the program is optimized by memory_optimize_transpiler - self.__is_mem_optimized = False - # if this program has been optimized by distributed optimizer # fleet_opt will be given a value self._fleet_opt = None @@ -2869,16 +2865,6 @@ class Program(object): # appending gradients times self._appending_grad_times = 0 - @property - def _is_mem_optimized(self): - # if the program is optimized, operator input/outputs - # maybe same, which conflict with save_inference_model. - return self.__is_mem_optimized - - @_is_mem_optimized.setter - def _is_mem_optimized(self, target): - self.__is_mem_optimized = target - @property def _op_role(self): """ diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 5b3e1aca8d938a49a6ae0dea45ce122fddbe7b40..b536bc554cc99f617551b40c5b71d5932c1b6537 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -1028,13 +1028,6 @@ def save_inference_model(dirname, if main_program is None: main_program = default_main_program() - if main_program._is_mem_optimized: - warnings.warn( - "save_inference_model must put before you call memory_optimize. \ - the memory_optimize will modify the original program, \ - is not suitable for saving inference model \ - we save the original program as inference model.", - RuntimeWarning) elif not isinstance(main_program, Program): raise TypeError("program should be as Program type or None")