未验证 提交 0c5781e5 编写于 作者: S Sonder 提交者: GitHub

[xdoctest] reformat example code with google style No.116-119 (#56118)

上级 786c6e99
......@@ -70,12 +70,15 @@ class QuantConfig:
Examples:
.. code-block:: python
from paddle.quantization import QuantConfig
from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
q_config = QuantConfig(activation=quanter, weight=quanter)
print(q_config)
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=quanter, weight=quanter)
>>> print(q_config)
Global config:
activation: FakeQuanterWithAbsMaxObserver(name=None,moving_rate=0.9,bit_length=8,dtype=float32)
weight: FakeQuanterWithAbsMaxObserver(name=None,moving_rate=0.9,bit_length=8,dtype=float32)
"""
......@@ -111,20 +114,25 @@ class QuantConfig:
Examples:
.. code-block:: python
import paddle
from paddle.nn import Linear
from paddle.quantization import QuantConfig
from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
class Model(paddle.nn.Layer):
def __init__(self):
super().__init__()
self.fc = Linear(576, 120)
model = Model()
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
q_config = QuantConfig(activation=None, weight=None)
q_config.add_layer_config([model.fc], activation=quanter, weight=quanter)
print(q_config)
>>> import paddle
>>> from paddle.nn import Linear
>>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> class Model(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
... self.fc = Linear(576, 120)
>>> model = Model()
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_layer_config([model.fc], activation=quanter, weight=quanter)
>>> # doctest: +SKIP
>>> print(q_config)
Global config:
None
Layer prefix config:
{'linear_0': <paddle.quantization.config.SingleLayerConfig object at 0x7fe41a680ee0>}
"""
if isinstance(layer, list):
......@@ -155,20 +163,25 @@ class QuantConfig:
Examples:
.. code-block:: python
import paddle
from paddle.nn import Linear
from paddle.quantization import QuantConfig
from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
class Model(paddle.nn.Layer):
def __init__(self):
super().__init__()
self.fc = Linear(576, 120)
model = Model()
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
q_config = QuantConfig(activation=None, weight=None)
q_config.add_name_config([model.fc.full_name()], activation=quanter, weight=quanter)
print(q_config)
>>> import paddle
>>> from paddle.nn import Linear
>>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> class Model(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
... self.fc = Linear(576, 120)
>>> model = Model()
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_name_config([model.fc.full_name()], activation=quanter, weight=quanter)
>>> # doctest: +SKIP
>>> print(q_config)
Global config:
None
Layer prefix config:
{'linear_0': <paddle.quantization.config.SingleLayerConfig object at 0x7fe41a680fd0>}
"""
if isinstance(layer_name, str):
......@@ -200,20 +213,25 @@ class QuantConfig:
Examples:
.. code-block:: python
import paddle
from paddle.nn import Linear
from paddle.quantization import QuantConfig
from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
class Model(paddle.nn.Layer):
def __init__(self):
super().__init__()
self.fc = Linear(576, 120)
model = Model()
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
q_config = QuantConfig(activation=None, weight=None)
q_config.add_type_config([Linear], activation=quanter, weight=quanter)
print(q_config)
>>> import paddle
>>> from paddle.nn import Linear
>>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> class Model(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
... self.fc = Linear(576, 120)
>>> model = Model()
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_type_config([Linear], activation=quanter, weight=quanter)
>>> # doctest: +SKIP
>>> print(q_config)
Global config:
None
Layer type config:
{<class 'paddle.nn.layer.common.Linear'>: <paddle.quantization.config.SingleLayerConfig object at 0x7fe41a680a60>}
"""
if isinstance(layer_type, type) and issubclass(
......@@ -242,16 +260,16 @@ class QuantConfig:
Examples:
.. code-block:: python
from paddle.nn import Conv2D
from paddle.quantization import QuantConfig
from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
q_config = QuantConfig(activation=None, weight=None)
class CustomizedQuantedConv2D:
def forward(self, x):
pass
# add some code for quantization simulation
q_config.add_qat_layer_mapping(Conv2D, CustomizedQuantedConv2D)
>>> from paddle.nn import Conv2D
>>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None)
>>> class CustomizedQuantedConv2D:
... def forward(self, x):
... pass
... # add some code for quantization simulation
>>> q_config.add_qat_layer_mapping(Conv2D, CustomizedQuantedConv2D)
"""
assert isinstance(source, type) and issubclass(
source, paddle.nn.Layer
......@@ -274,11 +292,11 @@ class QuantConfig:
Examples:
.. code-block:: python
from paddle.nn import Sequential
from paddle.quantization import QuantConfig
from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
q_config = QuantConfig(activation=None, weight=None)
q_config.add_customized_leaf(Sequential)
>>> from paddle.nn import Sequential
>>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_customized_leaf(Sequential)
"""
self._customized_leaves.append(layer_type)
......@@ -381,20 +399,20 @@ class QuantConfig:
Examples:
.. code-block:: python
import paddle
from paddle.nn import Linear, Sequential
from paddle.quantization import QuantConfig
from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
class Model(paddle.nn.Layer):
def __init__(self):
super().__init__()
self.fc = Sequential(Linear(576, 120),Linear(576, 120))
model = Model()
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
q_config = QuantConfig(activation=None, weight=None)
q_config.add_layer_config([model.fc], activation=quanter, weight=quanter)
q_config._specify(model)
>>> import paddle
>>> from paddle.nn import Linear, Sequential
>>> from paddle.quantization import QuantConfig
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> class Model(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
... self.fc = Sequential(Linear(576, 120),Linear(576, 120))
>>> model = Model()
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> q_config = QuantConfig(activation=None, weight=None)
>>> q_config.add_layer_config([model.fc], activation=quanter, weight=quanter)
>>> q_config._specify(model)
"""
self._model = model
self._specify_helper(self._model)
......
......@@ -83,21 +83,22 @@ def quanter(class_name):
Examples:
.. code-block:: python
# Given codes in ./customized_quanter.py
from paddle.quantization import quanter
from paddle.quantization import BaseQuanter
@quanter("CustomizedQuanter")
class CustomizedQuanterLayer(BaseQuanter):
def __init__(self, arg1, kwarg1=None):
pass
# Used in ./test.py
# from .customized_quanter import CustomizedQuanter
from paddle.quantization import QuantConfig
arg1_value = "test"
kwarg1_value = 20
quanter = CustomizedQuanter(arg1_value, kwarg1=kwarg1_value)
q_config = QuantConfig(activation=quanter, weight=quanter)
>>> # doctest: +SKIP
>>> # Given codes in ./customized_quanter.py
>>> from paddle.quantization import quanter
>>> from paddle.quantization import BaseQuanter
>>> @quanter("CustomizedQuanter")
>>> class CustomizedQuanterLayer(BaseQuanter):
... def __init__(self, arg1, kwarg1=None):
... pass
>>> # Used in ./test.py
>>> # from .customized_quanter import CustomizedQuanter
>>> from paddle.quantization import QuantConfig
>>> arg1_value = "test"
>>> kwarg1_value = 20
>>> quanter = CustomizedQuanter(arg1_value, kwarg1=kwarg1_value)
>>> q_config = QuantConfig(activation=quanter, weight=quanter)
"""
......
......@@ -135,79 +135,81 @@ class ImperativeQuantAware:
during training. If this attribute is not sets or the attribute is
false, the Layer would be qunatized in training.
Examples 1:
Examples:
.. code-block:: python
import paddle
from paddle.static.quantization \
import ImperativeQuantAware
from paddle.vision.models \
import resnet
>>> import paddle
>>> from paddle.static.quantization import (
... ImperativeQuantAware,
... )
>>> from paddle.vision.models import (
... resnet,
... )
model = resnet.resnet50(pretrained=True)
>>> model = resnet.resnet50(pretrained=True)
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
>>> imperative_qat = ImperativeQuantAware(
... weight_quantize_type='abs_max',
... activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
# The outscale of outputs in supportted layers would be calculated.
imperative_qat.quantize(model)
>>> # Add the fake quant logical.
>>> # The original model will be rewrite.
>>> # The outscale of outputs in supportted layers would be calculated.
>>> imperative_qat.quantize(model)
# Fine-tune the quantized model
# ...
>>> # Fine-tune the quantized model
>>> # ...
# Save quant model for the inference.
imperative_qat.save_quantized_model(
layer=model,
model_path="./resnet50_qat",
input_spec=[
paddle.static.InputSpec(
shape=[None, 3, 224, 224], dtype='float32')])
>>> # Save quant model for the inference.
>>> imperative_qat.save_quantized_model(
... layer=model,
... model_path="./resnet50_qat",
... input_spec=[
... paddle.static.InputSpec(
... shape=[None, 3, 224, 224], dtype='float32')])
Examples 2:
.. code-block:: python
import paddle
from paddle.static.quantization \
import ImperativeQuantAware
class ImperativeModel(paddle.nn.Layer):
def __init__(self):
super().__init__()
# self.linear_0 would skip the quantization.
self.linear_0 = paddle.nn.Linear(784, 400)
self.linear_0.skip_quant = True
# self.linear_1 would not skip the quantization.
self.linear_1 = paddle.nn.Linear(400, 10)
self.linear_1.skip_quant = False
def forward(self, inputs):
x = self.linear_0(inputs)
x = self.linear_1(inputs)
return x
model = ImperativeModel()
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
#
# There is only one Layer(self.linear1) would be added the
# fake quant logical.
imperative_qat.quantize(model)
# Fine-tune the quantized model
# ...
# Save quant model for the inference.
imperative_qat.save_quantized_model(
layer=model,
model_path="./imperative_model_qat")
>>> import paddle
>>> from paddle.static.quantization import (
... ImperativeQuantAware,
... )
>>> class ImperativeModel(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
... # self.linear_0 would skip the quantization.
... self.linear_0 = paddle.nn.Linear(784, 400)
... self.linear_0.skip_quant = True
... # self.linear_1 would not skip the quantization.
... self.linear_1 = paddle.nn.Linear(400, 10)
... self.linear_1.skip_quant = False
... def forward(self, inputs):
... x = self.linear_0(inputs)
... x = self.linear_1(inputs)
... return x
>>> model = ImperativeModel()
>>> imperative_qat = ImperativeQuantAware(
... weight_quantize_type='abs_max',
... activation_quantize_type='moving_average_abs_max')
>>> # Add the fake quant logical.
>>> # The original model will be rewrite.
>>> #
>>> # There is only one Layer(self.linear1) would be added the
>>> # fake quant logical.
>>> imperative_qat.quantize(model)
>>> # Fine-tune the quantized model
>>> # ...
>>> # Save quant model for the inference.
>>> imperative_qat.save_quantized_model(
... layer=model,
... model_path="./imperative_model_qat")
"""
super().__init__()
self.fuse_conv_bn = fuse_conv_bn
......@@ -247,37 +249,38 @@ class ImperativeQuantAware:
Examples:
.. code-block:: python
import paddle
from paddle.static.quantization \
import ImperativeQuantAware
class ImperativeModel(paddle.nn.Layer):
def __init__(self):
super().__init__()
# self.linear_0 would skip the quantization.
self.linear_0 = paddle.nn.Linear(784, 400)
self.linear_0.skip_quant = True
# self.linear_1 would not skip the quantization.
self.linear_1 = paddle.nn.Linear(400, 10)
self.linear_1.skip_quant = False
def forward(self, inputs):
x = self.linear_0(inputs)
x = self.linear_1(inputs)
return x
model = ImperativeModel()
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
#
# There is only one Layer(self.linear1) would be added the
# fake quant logical.
imperative_qat.quantize(model)
>>> import paddle
>>> from paddle.static.quantization import (
... ImperativeQuantAware,
... )
>>> class ImperativeModel(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
... # self.linear_0 would skip the quantization.
... self.linear_0 = paddle.nn.Linear(784, 400)
... self.linear_0.skip_quant = True
... # self.linear_1 would not skip the quantization.
... self.linear_1 = paddle.nn.Linear(400, 10)
... self.linear_1.skip_quant = False
... def forward(self, inputs):
... x = self.linear_0(inputs)
... x = self.linear_1(inputs)
... return x
>>> model = ImperativeModel()
>>> imperative_qat = ImperativeQuantAware(
... weight_quantize_type='abs_max',
... activation_quantize_type='moving_average_abs_max')
>>> # Add the fake quant logical.
>>> # The original model will be rewrite.
>>> #
>>> # There is only one Layer(self.linear1) would be added the
>>> # fake quant logical.
>>> imperative_qat.quantize(model)
"""
assert isinstance(
model, paddle.nn.Layer
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册