未验证 提交 80d3a20b 编写于 作者: C Candy2Tang 提交者: GitHub

[xdoctest][task 122] Reformat example code with google style in...

[xdoctest][task 122] Reformat example code with google style in python/paddle/quantization/qat.py (#56233)

* [xdoctest][task 122] test=docs_preview

* test=document_fix

* fix indent

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 e79133e8
......@@ -28,11 +28,12 @@ class QAT(Quantization):
Examples:
.. code-block:: python
from paddle.quantization import QAT, QuantConfig
from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
q_config = QuantConfig(activation=quanter, weight=quanter)
qat = QAT(q_config)
>>> from paddle.quantization import QAT, QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=quanter, weight=quanter)
>>> qat = QAT(q_config)
"""
def __init__(self, config: QuantConfig):
......@@ -52,17 +53,60 @@ class QAT(Quantization):
Return: The prepared model for quantization-aware training.
Examples:
.. code-block:: python
from paddle.quantization import QAT, QuantConfig
from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
from paddle.vision.models import LeNet
.. code-block:: python
>>> from paddle.quantization import QAT, QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> from paddle.vision.models import LeNet
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
q_config = QuantConfig(activation=quanter, weight=quanter)
qat = QAT(q_config)
model = LeNet()
quant_model = qat.quantize(model)
print(quant_model)
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=quanter, weight=quanter)
>>> qat = QAT(q_config)
>>> model = LeNet()
>>> quant_model = qat.quantize(model)
>>> print(quant_model)
LeNet(
(features): Sequential(
(0): QuantedConv2D(
(weight_quanter): FakeQuanterWithAbsMaxObserverLayer()
(activation_quanter): FakeQuanterWithAbsMaxObserverLayer()
)
(1): ObserveWrapper(
(_observer): FakeQuanterWithAbsMaxObserverLayer()
(_observed): ReLU()
)
(2): ObserveWrapper(
(_observer): FakeQuanterWithAbsMaxObserverLayer()
(_observed): MaxPool2D(kernel_size=2, stride=2, padding=0)
)
(3): QuantedConv2D(
(weight_quanter): FakeQuanterWithAbsMaxObserverLayer()
(activation_quanter): FakeQuanterWithAbsMaxObserverLayer()
)
(4): ObserveWrapper(
(_observer): FakeQuanterWithAbsMaxObserverLayer()
(_observed): ReLU()
)
(5): ObserveWrapper(
(_observer): FakeQuanterWithAbsMaxObserverLayer()
(_observed): MaxPool2D(kernel_size=2, stride=2, padding=0)
)
)
(fc): Sequential(
(0): QuantedLinear(
(weight_quanter): FakeQuanterWithAbsMaxObserverLayer()
(activation_quanter): FakeQuanterWithAbsMaxObserverLayer()
)
(1): QuantedLinear(
(weight_quanter): FakeQuanterWithAbsMaxObserverLayer()
(activation_quanter): FakeQuanterWithAbsMaxObserverLayer()
)
(2): QuantedLinear(
(weight_quanter): FakeQuanterWithAbsMaxObserverLayer()
(activation_quanter): FakeQuanterWithAbsMaxObserverLayer()
)
)
)
"""
assert (
model.training
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册