未验证 提交 6e2d4321 编写于 作者: C Candy2Tang 提交者: GitHub

[xdoctest][task 143] Reformat example code with google style in...

[xdoctest][task 143] Reformat example code with google style in quantization/quantization_pass.py (#56240)

* [xdoctest][task 143] test=docs_preview

* test=document_fix

* fix indent

* fix indent in python/paddle/static/quantization/quantization_pass.py

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 7fbe3424
...@@ -175,18 +175,17 @@ class QuantizationTransformPass: ...@@ -175,18 +175,17 @@ class QuantizationTransformPass:
Examples: Examples:
.. code-block:: python .. code-block:: python
# The original graph will be rewrite.
import paddle.static as static >>> # The original graph will be rewrite.
from paddle.static.quantization \ >>> import paddle.static as static
import QuantizationTransformPass >>> from paddle.static.quantization import QuantizationTransformPass
from paddle.fluid.framework import IrGraph >>> from paddle.fluid.framework import IrGraph
from paddle.framework import core >>> from paddle.framework import core
graph = IrGraph(core.Graph(static.Program().desc), for_test=False) >>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
place = paddle.CPUPlace() >>> place = paddle.CPUPlace()
transform_pass = QuantizationTransformPass(static.global_scope(), >>> transform_pass = QuantizationTransformPass(static.global_scope(), place)
place) >>> transform_pass.apply(graph)
transform_pass.apply(graph)
""" """
self._scope = scope self._scope = scope
self._place = _get_paddle_place(place) self._place = _get_paddle_place(place)
...@@ -2436,7 +2435,7 @@ class QuantizationTransformPassV2(QuantizationTransformPass): ...@@ -2436,7 +2435,7 @@ class QuantizationTransformPassV2(QuantizationTransformPass):
preprocess method works or not. The function's input is non-quantized preprocess method works or not. The function's input is non-quantized
activation and function returns processed activation to be quantized. activation and function returns processed activation to be quantized.
If None, the activation will be quantized directly. Default is None. If None, the activation will be quantized directly. Default is None.
optimizer_func(function): Fuction return a optimizer. When 'is_test' is optimizer_func(function): Function return a optimizer. When 'is_test' is
False and user want to use self-defined quantization function and False and user want to use self-defined quantization function and
preprocess function, this function must be set. Default is None. preprocess function, this function must be set. Default is None.
executor(paddle.Executor): If user want to use self-defined quantization executor(paddle.Executor): If user want to use self-defined quantization
...@@ -2445,18 +2444,19 @@ class QuantizationTransformPassV2(QuantizationTransformPass): ...@@ -2445,18 +2444,19 @@ class QuantizationTransformPassV2(QuantizationTransformPass):
Examples: Examples:
.. code-block:: python .. code-block:: python
# The original graph will be rewrite.
import paddle >>> # The original graph will be rewrite.
from paddle.static.quantization \ >>> import paddle
import QuantizationTransformPassV2 >>> import paddle.static as static
from paddle.fluid.framework import IrGraph >>> from paddle.static.quantization import QuantizationTransformPassV2
from paddle.framework import core >>> from paddle.fluid.framework import IrGraph
>>> from paddle.framework import core
graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
place = paddle.CPUPlace() >>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
scope = paddle.static.global_scope() >>> place = paddle.CPUPlace()
transform_pass = QuantizationTransformPassV2(scope, place) >>> scope = paddle.static.global_scope()
transform_pass.apply(graph) >>> transform_pass = QuantizationTransformPassV2(scope, place)
>>> transform_pass.apply(graph)
""" """
self._scope = scope self._scope = scope
self._place = _get_paddle_place(place) self._place = _get_paddle_place(place)
...@@ -2837,18 +2837,19 @@ class AddQuantDequantPassV2: ...@@ -2837,18 +2837,19 @@ class AddQuantDequantPassV2:
Examples: Examples:
.. code-block:: python .. code-block:: python
# The original graph will be rewrite.
import paddle >>> # The original graph will be rewrite.
from paddle.static.quantization \ >>> import paddle
import AddQuantDequantPassV2 >>> import paddle.static as static
from paddle.fluid.framework import IrGraph >>> from paddle.static.quantization import AddQuantDequantPassV2
from paddle.framework import core >>> from paddle.fluid.framework import IrGraph
>>> from paddle.framework import core
graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
place = paddle.CPUPlace() >>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
scope = paddle.static.global_scope() >>> place = paddle.CPUPlace()
add_quant_dequant_pass = AddQuantDequantPassV2(scope, place) >>> scope = paddle.static.global_scope()
add_quant_dequant_pass.apply(graph) >>> add_quant_dequant_pass = AddQuantDequantPassV2(scope, place)
>>> add_quant_dequant_pass.apply(graph)
""" """
self._scope = scope self._scope = scope
self._place = _get_paddle_place(place) self._place = _get_paddle_place(place)
...@@ -3019,18 +3020,19 @@ class ReplaceFakeQuantDequantPass: ...@@ -3019,18 +3020,19 @@ class ReplaceFakeQuantDequantPass:
Examples: Examples:
.. code-block:: python .. code-block:: python
# The original graph will be rewrite.
import paddle >>> # The original graph will be rewrite.
from paddle.static.quantization \ >>> import paddle
import ReplaceFakeQuantDequantPass >>> import paddle.static as static
from paddle.fluid.framework import IrGraph >>> from paddle.static.quantization import ReplaceFakeQuantDequantPass
from paddle.framework import core >>> from paddle.fluid.framework import IrGraph
>>> from paddle.framework import core
graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
place = paddle.CPUPlace() >>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
scope = paddle.static.global_scope() >>> place = paddle.CPUPlace()
replace_pass = ReplaceFakeQuantDequantPass(scope, place) >>> scope = paddle.static.global_scope()
replace_pass.apply(graph) >>> replace_pass = ReplaceFakeQuantDequantPass(scope, place)
>>> replace_pass.apply(graph)
""" """
self._place = _get_paddle_place(place) self._place = _get_paddle_place(place)
self._scope = scope self._scope = scope
...@@ -3175,18 +3177,19 @@ class QuantWeightPass: ...@@ -3175,18 +3177,19 @@ class QuantWeightPass:
Examples: Examples:
.. code-block:: python .. code-block:: python
# The original graph will be rewrite.
import paddle >>> # The original graph will be rewrite.
from paddle.static.quantization \ >>> import paddle
import QuantWeightPass >>> import paddle.static as static
from paddle.fluid.framework import IrGraph >>> from paddle.static.quantization import QuantWeightPass
from paddle.framework import core >>> from paddle.fluid.framework import IrGraph
>>> from paddle.framework import core
graph = IrGraph(core.Graph(paddle.static.Program().desc), for_test=False)
place = paddle.CPUPlace() >>> graph = IrGraph(core.Graph(paddle.static.Program().desc), for_test=False)
scope = paddle.static.global_scope() >>> place = paddle.CPUPlace()
quant_weight_pass = QuantWeightPass(scope, place) >>> scope = paddle.static.global_scope()
quant_weight_pass.apply(graph) >>> quant_weight_pass = QuantWeightPass(scope, place)
>>> quant_weight_pass.apply(graph)
""" """
def __init__( def __init__(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册