未验证 提交 80d3a20b 编写于 作者: C Candy2Tang 提交者: GitHub

[xdoctest][task 122] Reformat example code with google style in...

[xdoctest][task 122] Reformat example code with google style in python/paddle/quantization/qat.py (#56233)

* [xdoctest][task 122] test=docs_preview

* test=document_fix

* fix indent

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 e79133e8
...@@ -28,11 +28,12 @@ class QAT(Quantization): ...@@ -28,11 +28,12 @@ class QAT(Quantization):
Examples: Examples:
.. code-block:: python .. code-block:: python
from paddle.quantization import QAT, QuantConfig
from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver >>> from paddle.quantization import QAT, QuantConfig
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9) >>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
q_config = QuantConfig(activation=quanter, weight=quanter) >>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
qat = QAT(q_config) >>> q_config = QuantConfig(activation=quanter, weight=quanter)
>>> qat = QAT(q_config)
""" """
def __init__(self, config: QuantConfig): def __init__(self, config: QuantConfig):
...@@ -53,16 +54,59 @@ class QAT(Quantization): ...@@ -53,16 +54,59 @@ class QAT(Quantization):
Examples: Examples:
.. code-block:: python .. code-block:: python
from paddle.quantization import QAT, QuantConfig
from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
from paddle.vision.models import LeNet
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9) >>> from paddle.quantization import QAT, QuantConfig
q_config = QuantConfig(activation=quanter, weight=quanter) >>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
qat = QAT(q_config) >>> from paddle.vision.models import LeNet
model = LeNet()
quant_model = qat.quantize(model) >>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
print(quant_model) >>> q_config = QuantConfig(activation=quanter, weight=quanter)
>>> qat = QAT(q_config)
>>> model = LeNet()
>>> quant_model = qat.quantize(model)
>>> print(quant_model)
LeNet(
(features): Sequential(
(0): QuantedConv2D(
(weight_quanter): FakeQuanterWithAbsMaxObserverLayer()
(activation_quanter): FakeQuanterWithAbsMaxObserverLayer()
)
(1): ObserveWrapper(
(_observer): FakeQuanterWithAbsMaxObserverLayer()
(_observed): ReLU()
)
(2): ObserveWrapper(
(_observer): FakeQuanterWithAbsMaxObserverLayer()
(_observed): MaxPool2D(kernel_size=2, stride=2, padding=0)
)
(3): QuantedConv2D(
(weight_quanter): FakeQuanterWithAbsMaxObserverLayer()
(activation_quanter): FakeQuanterWithAbsMaxObserverLayer()
)
(4): ObserveWrapper(
(_observer): FakeQuanterWithAbsMaxObserverLayer()
(_observed): ReLU()
)
(5): ObserveWrapper(
(_observer): FakeQuanterWithAbsMaxObserverLayer()
(_observed): MaxPool2D(kernel_size=2, stride=2, padding=0)
)
)
(fc): Sequential(
(0): QuantedLinear(
(weight_quanter): FakeQuanterWithAbsMaxObserverLayer()
(activation_quanter): FakeQuanterWithAbsMaxObserverLayer()
)
(1): QuantedLinear(
(weight_quanter): FakeQuanterWithAbsMaxObserverLayer()
(activation_quanter): FakeQuanterWithAbsMaxObserverLayer()
)
(2): QuantedLinear(
(weight_quanter): FakeQuanterWithAbsMaxObserverLayer()
(activation_quanter): FakeQuanterWithAbsMaxObserverLayer()
)
)
)
""" """
assert ( assert (
model.training model.training
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册