未验证 提交 6e2d4321 编写于 作者: C Candy2Tang 提交者: GitHub

[xdoctest][task 143] Reformat example code with google style in...

[xdoctest][task 143] Reformat example code with google style in quantization/quantization_pass.py (#56240)

* [xdoctest][task 143] test=docs_preview

* test=document_fix

* fix indent

* fix indent in python/paddle/static/quantization/quantization_pass.py

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 7fbe3424
...@@ -68,17 +68,17 @@ class QuantConfig: ...@@ -68,17 +68,17 @@ class QuantConfig:
weight(QuanterFactory): The global quantizer used to quantize the weights. weight(QuanterFactory): The global quantizer used to quantize the weights.
Examples: Examples:
.. code-block:: python .. code-block:: python
>>> from paddle.quantization import QuantConfig >>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver >>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9) >>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=quanter, weight=quanter) >>> q_config = QuantConfig(activation=quanter, weight=quanter)
>>> print(q_config) >>> print(q_config)
Global config: Global config:
activation: FakeQuanterWithAbsMaxObserver(name=None,moving_rate=0.9,bit_length=8,dtype=float32) activation: FakeQuanterWithAbsMaxObserver(name=None,moving_rate=0.9,bit_length=8,dtype=float32)
weight: FakeQuanterWithAbsMaxObserver(name=None,moving_rate=0.9,bit_length=8,dtype=float32) weight: FakeQuanterWithAbsMaxObserver(name=None,moving_rate=0.9,bit_length=8,dtype=float32)
""" """
...@@ -112,27 +112,27 @@ class QuantConfig: ...@@ -112,27 +112,27 @@ class QuantConfig:
weight(QuanterFactory): Quanter used for weights. weight(QuanterFactory): Quanter used for weights.
Examples: Examples:
.. code-block:: python .. code-block:: python
>>> import paddle >>> import paddle
>>> from paddle.nn import Linear >>> from paddle.nn import Linear
>>> from paddle.quantization import QuantConfig >>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver >>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> class Model(paddle.nn.Layer): >>> class Model(paddle.nn.Layer):
... def __init__(self): ... def __init__(self):
... super().__init__() ... super().__init__()
... self.fc = Linear(576, 120) ... self.fc = Linear(576, 120)
>>> model = Model() >>> model = Model()
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9) >>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None) >>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_layer_config([model.fc], activation=quanter, weight=quanter) >>> q_config.add_layer_config([model.fc], activation=quanter, weight=quanter)
>>> # doctest: +SKIP >>> # doctest: +SKIP
>>> print(q_config) >>> print(q_config)
Global config: Global config:
None None
Layer prefix config: Layer prefix config:
{'linear_0': <paddle.quantization.config.SingleLayerConfig object at 0x7fe41a680ee0>} {'linear_0': <paddle.quantization.config.SingleLayerConfig object at 0x7fe41a680ee0>}
""" """
if isinstance(layer, list): if isinstance(layer, list):
...@@ -161,27 +161,27 @@ class QuantConfig: ...@@ -161,27 +161,27 @@ class QuantConfig:
weight(QuanterFactory): Quanter used for weights. weight(QuanterFactory): Quanter used for weights.
Examples: Examples:
.. code-block:: python .. code-block:: python
>>> import paddle >>> import paddle
>>> from paddle.nn import Linear >>> from paddle.nn import Linear
>>> from paddle.quantization import QuantConfig >>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver >>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> class Model(paddle.nn.Layer): >>> class Model(paddle.nn.Layer):
... def __init__(self): ... def __init__(self):
... super().__init__() ... super().__init__()
... self.fc = Linear(576, 120) ... self.fc = Linear(576, 120)
>>> model = Model() >>> model = Model()
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9) >>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None) >>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_name_config([model.fc.full_name()], activation=quanter, weight=quanter) >>> q_config.add_name_config([model.fc.full_name()], activation=quanter, weight=quanter)
>>> # doctest: +SKIP >>> # doctest: +SKIP
>>> print(q_config) >>> print(q_config)
Global config: Global config:
None None
Layer prefix config: Layer prefix config:
{'linear_0': <paddle.quantization.config.SingleLayerConfig object at 0x7fe41a680fd0>} {'linear_0': <paddle.quantization.config.SingleLayerConfig object at 0x7fe41a680fd0>}
""" """
if isinstance(layer_name, str): if isinstance(layer_name, str):
......
...@@ -81,7 +81,7 @@ def quanter(class_name): ...@@ -81,7 +81,7 @@ def quanter(class_name):
class_name (str) - The name of factory class to be declared. class_name (str) - The name of factory class to be declared.
Examples: Examples:
.. code-block:: python .. code-block:: python
>>> # doctest: +SKIP >>> # doctest: +SKIP
>>> # Given codes in ./customized_quanter.py >>> # Given codes in ./customized_quanter.py
......
...@@ -43,19 +43,19 @@ class QuantInt8MkldnnPass: ...@@ -43,19 +43,19 @@ class QuantInt8MkldnnPass:
Examples: Examples:
.. code-block:: python .. code-block:: python
>>> # The original graph will be rewrite. >>> # The original graph will be rewrite.
>>> import paddle >>> import paddle
>>> from paddle import static >>> from paddle import static
>>> from paddle.static.quantization import QuantInt8MkldnnPass >>> from paddle.static.quantization import QuantInt8MkldnnPass
>>> from paddle.framework import IrGraph >>> from paddle.framework import IrGraph
>>> from paddle.framework import core >>> from paddle.framework import core
>>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False) >>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
>>> place = paddle.CPUPlace() >>> place = paddle.CPUPlace()
>>> mkldnn_pass = QuantInt8MkldnnPass(static.global_scope(), place) >>> mkldnn_pass = QuantInt8MkldnnPass(static.global_scope(), place)
>>> mkldnn_pass.apply(graph) >>> mkldnn_pass.apply(graph)
""" """
self._scope = _scope self._scope = _scope
......
...@@ -174,19 +174,18 @@ class QuantizationTransformPass: ...@@ -174,19 +174,18 @@ class QuantizationTransformPass:
Examples: Examples:
.. code-block:: python .. code-block:: python
# The original graph will be rewrite.
import paddle.static as static >>> # The original graph will be rewrite.
from paddle.static.quantization \ >>> import paddle.static as static
import QuantizationTransformPass >>> from paddle.static.quantization import QuantizationTransformPass
from paddle.fluid.framework import IrGraph >>> from paddle.fluid.framework import IrGraph
from paddle.framework import core >>> from paddle.framework import core
graph = IrGraph(core.Graph(static.Program().desc), for_test=False) >>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
place = paddle.CPUPlace() >>> place = paddle.CPUPlace()
transform_pass = QuantizationTransformPass(static.global_scope(), >>> transform_pass = QuantizationTransformPass(static.global_scope(), place)
place) >>> transform_pass.apply(graph)
transform_pass.apply(graph)
""" """
self._scope = scope self._scope = scope
self._place = _get_paddle_place(place) self._place = _get_paddle_place(place)
...@@ -2436,7 +2435,7 @@ class QuantizationTransformPassV2(QuantizationTransformPass): ...@@ -2436,7 +2435,7 @@ class QuantizationTransformPassV2(QuantizationTransformPass):
preprocess method works or not. The function's input is non-quantized preprocess method works or not. The function's input is non-quantized
activation and function returns processed activation to be quantized. activation and function returns processed activation to be quantized.
If None, the activation will be quantized directly. Default is None. If None, the activation will be quantized directly. Default is None.
optimizer_func(function): Fuction return a optimizer. When 'is_test' is optimizer_func(function): Function return a optimizer. When 'is_test' is
False and user want to use self-defined quantization function and False and user want to use self-defined quantization function and
preprocess function, this function must be set. Default is None. preprocess function, this function must be set. Default is None.
executor(paddle.Executor): If user want to use self-defined quantization executor(paddle.Executor): If user want to use self-defined quantization
...@@ -2444,19 +2443,20 @@ class QuantizationTransformPassV2(QuantizationTransformPass): ...@@ -2444,19 +2443,20 @@ class QuantizationTransformPassV2(QuantizationTransformPass):
Default is None. Default is None.
Examples: Examples:
.. code-block:: python .. code-block:: python
# The original graph will be rewrite.
import paddle >>> # The original graph will be rewrite.
from paddle.static.quantization \ >>> import paddle
import QuantizationTransformPassV2 >>> import paddle.static as static
from paddle.fluid.framework import IrGraph >>> from paddle.static.quantization import QuantizationTransformPassV2
from paddle.framework import core >>> from paddle.fluid.framework import IrGraph
>>> from paddle.framework import core
graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
place = paddle.CPUPlace() >>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
scope = paddle.static.global_scope() >>> place = paddle.CPUPlace()
transform_pass = QuantizationTransformPassV2(scope, place) >>> scope = paddle.static.global_scope()
transform_pass.apply(graph) >>> transform_pass = QuantizationTransformPassV2(scope, place)
>>> transform_pass.apply(graph)
""" """
self._scope = scope self._scope = scope
self._place = _get_paddle_place(place) self._place = _get_paddle_place(place)
...@@ -2836,19 +2836,20 @@ class AddQuantDequantPassV2: ...@@ -2836,19 +2836,20 @@ class AddQuantDequantPassV2:
scale_dict(dict, optional): calibration ranges of tensors output. scale_dict(dict, optional): calibration ranges of tensors output.
Examples: Examples:
.. code-block:: python .. code-block:: python
# The original graph will be rewrite.
import paddle >>> # The original graph will be rewrite.
from paddle.static.quantization \ >>> import paddle
import AddQuantDequantPassV2 >>> import paddle.static as static
from paddle.fluid.framework import IrGraph >>> from paddle.static.quantization import AddQuantDequantPassV2
from paddle.framework import core >>> from paddle.fluid.framework import IrGraph
>>> from paddle.framework import core
graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
place = paddle.CPUPlace() >>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
scope = paddle.static.global_scope() >>> place = paddle.CPUPlace()
add_quant_dequant_pass = AddQuantDequantPassV2(scope, place) >>> scope = paddle.static.global_scope()
add_quant_dequant_pass.apply(graph) >>> add_quant_dequant_pass = AddQuantDequantPassV2(scope, place)
>>> add_quant_dequant_pass.apply(graph)
""" """
self._scope = scope self._scope = scope
self._place = _get_paddle_place(place) self._place = _get_paddle_place(place)
...@@ -3018,19 +3019,20 @@ class ReplaceFakeQuantDequantPass: ...@@ -3018,19 +3019,20 @@ class ReplaceFakeQuantDequantPass:
quant_bits(int, optional): quantization bit number for activation. Default is 8. quant_bits(int, optional): quantization bit number for activation. Default is 8.
Examples: Examples:
.. code-block:: python .. code-block:: python
# The original graph will be rewrite.
import paddle >>> # The original graph will be rewrite.
from paddle.static.quantization \ >>> import paddle
import ReplaceFakeQuantDequantPass >>> import paddle.static as static
from paddle.fluid.framework import IrGraph >>> from paddle.static.quantization import ReplaceFakeQuantDequantPass
from paddle.framework import core >>> from paddle.fluid.framework import IrGraph
>>> from paddle.framework import core
graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
place = paddle.CPUPlace() >>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
scope = paddle.static.global_scope() >>> place = paddle.CPUPlace()
replace_pass = ReplaceFakeQuantDequantPass(scope, place) >>> scope = paddle.static.global_scope()
replace_pass.apply(graph) >>> replace_pass = ReplaceFakeQuantDequantPass(scope, place)
>>> replace_pass.apply(graph)
""" """
self._place = _get_paddle_place(place) self._place = _get_paddle_place(place)
self._scope = scope self._scope = scope
...@@ -3175,18 +3177,19 @@ class QuantWeightPass: ...@@ -3175,18 +3177,19 @@ class QuantWeightPass:
Examples: Examples:
.. code-block:: python .. code-block:: python
# The original graph will be rewrite.
import paddle >>> # The original graph will be rewrite.
from paddle.static.quantization \ >>> import paddle
import QuantWeightPass >>> import paddle.static as static
from paddle.fluid.framework import IrGraph >>> from paddle.static.quantization import QuantWeightPass
from paddle.framework import core >>> from paddle.fluid.framework import IrGraph
>>> from paddle.framework import core
graph = IrGraph(core.Graph(paddle.static.Program().desc), for_test=False)
place = paddle.CPUPlace() >>> graph = IrGraph(core.Graph(paddle.static.Program().desc), for_test=False)
scope = paddle.static.global_scope() >>> place = paddle.CPUPlace()
quant_weight_pass = QuantWeightPass(scope, place) >>> scope = paddle.static.global_scope()
quant_weight_pass.apply(graph) >>> quant_weight_pass = QuantWeightPass(scope, place)
>>> quant_weight_pass.apply(graph)
""" """
def __init__( def __init__(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册