未验证 提交 6e2d4321 编写于 作者: C Candy2Tang 提交者: GitHub

[xdoctest][task 143] Reformat example code with google style in...

[xdoctest][task 143] Reformat example code with google style in quantization/quantization_pass.py (#56240)

* [xdoctest][task 143] test=docs_preview

* test=document_fix

* fix indent

* fix indent in python/paddle/static/quantization/quantization_pass.py

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 7fbe3424
......@@ -68,17 +68,17 @@ class QuantConfig:
weight(QuanterFactory): The global quantizer used to quantize the weights.
Examples:
.. code-block:: python
.. code-block:: python
>>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=quanter, weight=quanter)
>>> print(q_config)
Global config:
activation: FakeQuanterWithAbsMaxObserver(name=None,moving_rate=0.9,bit_length=8,dtype=float32)
weight: FakeQuanterWithAbsMaxObserver(name=None,moving_rate=0.9,bit_length=8,dtype=float32)
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=quanter, weight=quanter)
>>> print(q_config)
Global config:
activation: FakeQuanterWithAbsMaxObserver(name=None,moving_rate=0.9,bit_length=8,dtype=float32)
weight: FakeQuanterWithAbsMaxObserver(name=None,moving_rate=0.9,bit_length=8,dtype=float32)
"""
......@@ -112,27 +112,27 @@ class QuantConfig:
weight(QuanterFactory): Quanter used for weights.
Examples:
.. code-block:: python
>>> import paddle
>>> from paddle.nn import Linear
>>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> class Model(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
... self.fc = Linear(576, 120)
>>> model = Model()
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_layer_config([model.fc], activation=quanter, weight=quanter)
>>> # doctest: +SKIP
>>> print(q_config)
Global config:
None
Layer prefix config:
{'linear_0': <paddle.quantization.config.SingleLayerConfig object at 0x7fe41a680ee0>}
.. code-block:: python
>>> import paddle
>>> from paddle.nn import Linear
>>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> class Model(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
... self.fc = Linear(576, 120)
>>> model = Model()
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_layer_config([model.fc], activation=quanter, weight=quanter)
>>> # doctest: +SKIP
>>> print(q_config)
Global config:
None
Layer prefix config:
{'linear_0': <paddle.quantization.config.SingleLayerConfig object at 0x7fe41a680ee0>}
"""
if isinstance(layer, list):
......@@ -161,27 +161,27 @@ class QuantConfig:
weight(QuanterFactory): Quanter used for weights.
Examples:
.. code-block:: python
>>> import paddle
>>> from paddle.nn import Linear
>>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> class Model(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
... self.fc = Linear(576, 120)
>>> model = Model()
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_name_config([model.fc.full_name()], activation=quanter, weight=quanter)
>>> # doctest: +SKIP
>>> print(q_config)
Global config:
None
Layer prefix config:
{'linear_0': <paddle.quantization.config.SingleLayerConfig object at 0x7fe41a680fd0>}
.. code-block:: python
>>> import paddle
>>> from paddle.nn import Linear
>>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> class Model(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
... self.fc = Linear(576, 120)
>>> model = Model()
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_name_config([model.fc.full_name()], activation=quanter, weight=quanter)
>>> # doctest: +SKIP
>>> print(q_config)
Global config:
None
Layer prefix config:
{'linear_0': <paddle.quantization.config.SingleLayerConfig object at 0x7fe41a680fd0>}
"""
if isinstance(layer_name, str):
......
......@@ -81,7 +81,7 @@ def quanter(class_name):
class_name (str) - The name of factory class to be declared.
Examples:
.. code-block:: python
.. code-block:: python
>>> # doctest: +SKIP
>>> # Given codes in ./customized_quanter.py
......
......@@ -43,19 +43,19 @@ class QuantInt8MkldnnPass:
Examples:
.. code-block:: python
>>> # The original graph will be rewrite.
>>> import paddle
>>> from paddle import static
>>> from paddle.static.quantization import QuantInt8MkldnnPass
>>> from paddle.framework import IrGraph
>>> from paddle.framework import core
>>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
>>> place = paddle.CPUPlace()
>>> mkldnn_pass = QuantInt8MkldnnPass(static.global_scope(), place)
>>> mkldnn_pass.apply(graph)
.. code-block:: python
>>> # The original graph will be rewrite.
>>> import paddle
>>> from paddle import static
>>> from paddle.static.quantization import QuantInt8MkldnnPass
>>> from paddle.framework import IrGraph
>>> from paddle.framework import core
>>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
>>> place = paddle.CPUPlace()
>>> mkldnn_pass = QuantInt8MkldnnPass(static.global_scope(), place)
>>> mkldnn_pass.apply(graph)
"""
self._scope = _scope
......
......@@ -174,19 +174,18 @@ class QuantizationTransformPass:
Examples:
.. code-block:: python
# The original graph will be rewrite.
import paddle.static as static
from paddle.static.quantization \
import QuantizationTransformPass
from paddle.fluid.framework import IrGraph
from paddle.framework import core
graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
place = paddle.CPUPlace()
transform_pass = QuantizationTransformPass(static.global_scope(),
place)
transform_pass.apply(graph)
.. code-block:: python
>>> # The original graph will be rewrite.
>>> import paddle.static as static
>>> from paddle.static.quantization import QuantizationTransformPass
>>> from paddle.fluid.framework import IrGraph
>>> from paddle.framework import core
>>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
>>> place = paddle.CPUPlace()
>>> transform_pass = QuantizationTransformPass(static.global_scope(), place)
>>> transform_pass.apply(graph)
"""
self._scope = scope
self._place = _get_paddle_place(place)
......@@ -2436,7 +2435,7 @@ class QuantizationTransformPassV2(QuantizationTransformPass):
preprocess method works or not. The function's input is non-quantized
activation and function returns processed activation to be quantized.
If None, the activation will be quantized directly. Default is None.
optimizer_func(function): Fuction return a optimizer. When 'is_test' is
optimizer_func(function): Function return a optimizer. When 'is_test' is
False and user want to use self-defined quantization function and
preprocess function, this function must be set. Default is None.
executor(paddle.Executor): If user want to use self-defined quantization
......@@ -2444,19 +2443,20 @@ class QuantizationTransformPassV2(QuantizationTransformPass):
Default is None.
Examples:
.. code-block:: python
# The original graph will be rewrite.
import paddle
from paddle.static.quantization \
import QuantizationTransformPassV2
from paddle.fluid.framework import IrGraph
from paddle.framework import core
graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
place = paddle.CPUPlace()
scope = paddle.static.global_scope()
transform_pass = QuantizationTransformPassV2(scope, place)
transform_pass.apply(graph)
.. code-block:: python
>>> # The original graph will be rewrite.
>>> import paddle
>>> import paddle.static as static
>>> from paddle.static.quantization import QuantizationTransformPassV2
>>> from paddle.fluid.framework import IrGraph
>>> from paddle.framework import core
>>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
>>> place = paddle.CPUPlace()
>>> scope = paddle.static.global_scope()
>>> transform_pass = QuantizationTransformPassV2(scope, place)
>>> transform_pass.apply(graph)
"""
self._scope = scope
self._place = _get_paddle_place(place)
......@@ -2836,19 +2836,20 @@ class AddQuantDequantPassV2:
scale_dict(dict, optional): calibration ranges of tensors output.
Examples:
.. code-block:: python
# The original graph will be rewrite.
import paddle
from paddle.static.quantization \
import AddQuantDequantPassV2
from paddle.fluid.framework import IrGraph
from paddle.framework import core
graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
place = paddle.CPUPlace()
scope = paddle.static.global_scope()
add_quant_dequant_pass = AddQuantDequantPassV2(scope, place)
add_quant_dequant_pass.apply(graph)
.. code-block:: python
>>> # The original graph will be rewrite.
>>> import paddle
>>> import paddle.static as static
>>> from paddle.static.quantization import AddQuantDequantPassV2
>>> from paddle.fluid.framework import IrGraph
>>> from paddle.framework import core
>>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
>>> place = paddle.CPUPlace()
>>> scope = paddle.static.global_scope()
>>> add_quant_dequant_pass = AddQuantDequantPassV2(scope, place)
>>> add_quant_dequant_pass.apply(graph)
"""
self._scope = scope
self._place = _get_paddle_place(place)
......@@ -3018,19 +3019,20 @@ class ReplaceFakeQuantDequantPass:
quant_bits(int, optional): quantization bit number for activation. Default is 8.
Examples:
.. code-block:: python
# The original graph will be rewrite.
import paddle
from paddle.static.quantization \
import ReplaceFakeQuantDequantPass
from paddle.fluid.framework import IrGraph
from paddle.framework import core
graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
place = paddle.CPUPlace()
scope = paddle.static.global_scope()
replace_pass = ReplaceFakeQuantDequantPass(scope, place)
replace_pass.apply(graph)
.. code-block:: python
>>> # The original graph will be rewrite.
>>> import paddle
>>> import paddle.static as static
>>> from paddle.static.quantization import ReplaceFakeQuantDequantPass
>>> from paddle.fluid.framework import IrGraph
>>> from paddle.framework import core
>>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False)
>>> place = paddle.CPUPlace()
>>> scope = paddle.static.global_scope()
>>> replace_pass = ReplaceFakeQuantDequantPass(scope, place)
>>> replace_pass.apply(graph)
"""
self._place = _get_paddle_place(place)
self._scope = scope
......@@ -3175,18 +3177,19 @@ class QuantWeightPass:
Examples:
.. code-block:: python
# The original graph will be rewrite.
import paddle
from paddle.static.quantization \
import QuantWeightPass
from paddle.fluid.framework import IrGraph
from paddle.framework import core
graph = IrGraph(core.Graph(paddle.static.Program().desc), for_test=False)
place = paddle.CPUPlace()
scope = paddle.static.global_scope()
quant_weight_pass = QuantWeightPass(scope, place)
quant_weight_pass.apply(graph)
>>> # The original graph will be rewrite.
>>> import paddle
>>> import paddle.static as static
>>> from paddle.static.quantization import QuantWeightPass
>>> from paddle.fluid.framework import IrGraph
>>> from paddle.framework import core
>>> graph = IrGraph(core.Graph(paddle.static.Program().desc), for_test=False)
>>> place = paddle.CPUPlace()
>>> scope = paddle.static.global_scope()
>>> quant_weight_pass = QuantWeightPass(scope, place)
>>> quant_weight_pass.apply(graph)
"""
def __init__(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册