diff --git a/doc/fluid/api/transpiler.rst b/doc/fluid/api/transpiler.rst index 943d39331d26c05764c90cb24f6774997c976bfe..d2ac04f1449c32cb414cea1b76d7469bbe9ccb85 100644 --- a/doc/fluid/api/transpiler.rst +++ b/doc/fluid/api/transpiler.rst @@ -14,6 +14,15 @@ DistributeTranspiler :members: :noindex: +.. _api_fluid_transpiler_InferenceTranspiler: + +InferenceTranspiler +------------------- + +.. autoclass:: paddle.fluid.transpiler.InferenceTranspiler + :members: + :noindex: + .. _api_fluid_transpiler_memory_optimize: memory_optimize diff --git a/python/paddle/fluid/transpiler/inference_transpiler.py b/python/paddle/fluid/transpiler/inference_transpiler.py index d32c69d148dfa1633ce344611ca3fe7879a234e9..b8afeae5ebd6ef7948a7c0c2775f419af461da04 100644 --- a/python/paddle/fluid/transpiler/inference_transpiler.py +++ b/python/paddle/fluid/transpiler/inference_transpiler.py @@ -19,7 +19,7 @@ from ..framework import Program from ..executor import global_scope -class InferenceTranspiler: +class InferenceTranspiler(object): ''' Convert the fluid program to optimized inference program.