diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 44895ff3393d5af8be9e8dd32e63fc8bc88a8a20..f751a9b4cab4ddccea90059532c6346ba752279b 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -50,7 +50,6 @@ paddle.fluid.DataFeedDesc.set_use_slots (ArgSpec(args=['self', 'use_slots_name'] paddle.fluid.CompiledProgram ('paddle.fluid.compiler.CompiledProgram', ('document', '598d294107d44d7620bce76527a92c37')) paddle.fluid.CompiledProgram.__init__ (ArgSpec(args=['self', 'program_or_graph', 'build_strategy'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.CompiledProgram.with_data_parallel (ArgSpec(args=['self', 'loss_name', 'build_strategy', 'exec_strategy', 'share_vars_from', 'places'], varargs=None, keywords=None, defaults=(None, None, None, None, None)), ('document', '1c7c6171bbf6d77f2fce0166aa0ec43b')) -paddle.fluid.CompiledProgram.with_inference_optimize (ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=None), ('document', '9e5b009d850191a010e859189c127fd8')) paddle.fluid.ExecutionStrategy ('paddle.fluid.core_avx.ExecutionStrategy', ('document', '535ce28c4671176386e3cd283a764084')) paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core_avx.ParallelExecutor.ExecutionStrategy) -> None paddle.fluid.BuildStrategy ('paddle.fluid.core_avx.BuildStrategy', ('document', 'eec64b9b7cba58b0a63687b4c34ffe56')) diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 0b9c7124f52a0f347bb9e40af273fdf4e155a81b..69658dbfb7327f6dc35cf714c1c694fe62c3638a 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -237,7 +237,7 @@ class CompiledProgram(object): return self - def with_inference_optimize(self, config): + def _with_inference_optimize(self, config): """ Add inference optimize Args: diff --git a/python/paddle/fluid/sampcd_processor.py b/python/paddle/fluid/sampcd_processor.py index f632f8069a026d314e9e5eb05325834e468bb364..6597b9378236f1b56ba7df32b0d7ba360fdb6a1b 100644 --- a/python/paddle/fluid/sampcd_processor.py +++ b/python/paddle/fluid/sampcd_processor.py @@ -740,7 +740,7 @@ wlist_inneed = [ "DistributeTranspilerConfig.min_block_size", "ExecutionStrategy.allow_op_delay", "load", "Accuracy.update", "ChunkEvaluator.update", "ExecutionStrategy.num_iteration_per_drop_scope", - "ExecutionStrategy.num_threads", "CompiledProgram.with_inference_optimize", + "ExecutionStrategy.num_threads", "CompiledProgram._with_inference_optimize", "CompositeMetric.add_metric", "CompositeMetric.update", "CompositeMetric.eval", "DetectionMAP.get_map_var", "MetricBase", "MetricBase.reset", "MetricBase.get_config", "MetricBase.update", diff --git a/python/paddle/fluid/tests/book/test_word2vec.py b/python/paddle/fluid/tests/book/test_word2vec.py index 487a29c8391231471737a25d521770ebbca18673..cfa6b1a74006c8b0f9792eaa302f1d11a0dab4ee 100644 --- a/python/paddle/fluid/tests/book/test_word2vec.py +++ b/python/paddle/fluid/tests/book/test_word2vec.py @@ -214,7 +214,7 @@ def infer(use_cuda, save_dirname=None): infer_config.device = 0 infer_config.fraction_of_gpu_memory = 0.15 compiled_program = fluid.compiler.CompiledProgram(inference_program) - compiled_program.with_inference_optimize(infer_config) + compiled_program._with_inference_optimize(infer_config) assert compiled_program._is_inference is True infer_outputs = exe.run(compiled_program, feed=infer_inputs) np_data = np.array(results[0])