From ff226ba1a2a2232fd45b774cc18947719e06e81d Mon Sep 17 00:00:00 2001 From: cyberslack_lee Date: Thu, 3 Aug 2023 11:03:39 +0800 Subject: [PATCH] [xdoctest] reformat example code with google style in No.95-99 (#55834) * test=docs_preview * test=docs_preview --- python/paddle/nn/quant/quant_layers.py | 28 +++++----- python/paddle/nn/quant/stub.py | 55 +++++++++++++------- python/paddle/nn/utils/clip_grad_norm_.py | 22 ++++---- python/paddle/nn/utils/clip_grad_value_.py | 21 ++++---- python/paddle/nn/utils/spectral_norm_hook.py | 43 ++++++++------- 5 files changed, 94 insertions(+), 75 deletions(-) diff --git a/python/paddle/nn/quant/quant_layers.py b/python/paddle/nn/quant/quant_layers.py index e8b96e81205..0df2c1c2c59 100644 --- a/python/paddle/nn/quant/quant_layers.py +++ b/python/paddle/nn/quant/quant_layers.py @@ -611,19 +611,21 @@ class QuantizedConv2DTranspose(Layer): The only difference is that its inputs are all fake quantized. Examples: - .. code-block:: python - - import paddle - import paddle.nn as nn - from paddle.nn.quant.quant_layers import QuantizedConv2DTranspose - - x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.) - conv = nn.Conv2DTranspose(4, 6, (3, 3)) - conv_quantized = QuantizedConv2DTranspose(conv) - y_quantized = conv_quantized(x_var) - y_var = conv(x_var) - print(y_var.shape, y_quantized.shape) - # [2, 6, 10, 10], [2, 6, 10, 10] + .. code-block:: python + + >>> import paddle + >>> import paddle.nn as nn + >>> from paddle.nn.quant.quant_layers import QuantizedConv2DTranspose + + >>> x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.) + >>> conv = nn.Conv2DTranspose(4, 6, (3, 3)) + >>> conv_quantized = QuantizedConv2DTranspose(conv) + >>> y_quantized = conv_quantized(x_var) + >>> y_var = conv(x_var) + >>> print(y_var.shape) + [2, 6, 10, 10] + >>> print(y_quantized.shape) + [2, 6, 10, 10] """ diff --git a/python/paddle/nn/quant/stub.py b/python/paddle/nn/quant/stub.py index ab977524153..487db44a09b 100644 --- a/python/paddle/nn/quant/stub.py +++ b/python/paddle/nn/quant/stub.py @@ -23,31 +23,46 @@ class Stub(Layer): the forward of a layer. Instead, we can create a stub and add it to the sublayers of the layer. And call the stub before the functional API in the forward. The observer held by the stub will observe or quantize the inputs of the functional API. + Args: observer(QuanterFactory) - The configured information of the observer to be inserted. It will use a global configuration to create the observers if the 'observer' is none. + Examples: .. code-block:: python - import paddle - from paddle.nn.quant import Stub - from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver - from paddle.nn import Conv2D - from paddle.quantization import QAT, QuantConfig - quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9) - class Model(paddle.nn.Layer): - def __init__(self, num_classes=10): - super().__init__() - self.conv = Conv2D(3, 6, 3, stride=1, padding=1) - self.quant = Stub(quanter) - def forward(self, inputs): - out = self.conv(inputs) - out = self.quant(out) - return paddle.nn.functional.relu(out) - model = Model() - q_config = QuantConfig(activation=quanter, weight=quanter) - qat = QAT(q_config) - quant_model = qat.quantize(model) - print(quant_model) + + >>> import paddle + >>> from paddle.nn.quant import Stub + >>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver + >>> from paddle.nn import Conv2D + >>> from paddle.quantization import QAT, QuantConfig + + >>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9) + >>> class Model(paddle.nn.Layer): + ... def __init__(self, num_classes=10): + ... super().__init__() + ... self.conv = Conv2D(3, 6, 3, stride=1, padding=1) + ... self.quant = Stub(quanter) + ... + ... def forward(self, inputs): + ... out = self.conv(inputs) + ... out = self.quant(out) + ... return paddle.nn.functional.relu(out) + + >>> model = Model() + >>> q_config = QuantConfig(activation=quanter, weight=quanter) + >>> qat = QAT(q_config) + >>> quant_model = qat.quantize(model) + >>> print(quant_model) + Model( + (conv): QuantedConv2D( + (weight_quanter): FakeQuanterWithAbsMaxObserverLayer() + (activation_quanter): FakeQuanterWithAbsMaxObserverLayer() + ) + (quant): QuanterStub( + (_observer): FakeQuanterWithAbsMaxObserverLayer() + ) + ) """ def __init__(self, observer=None): diff --git a/python/paddle/nn/utils/clip_grad_norm_.py b/python/paddle/nn/utils/clip_grad_norm_.py index 22fa7341e3f..e0f8cb51d0d 100644 --- a/python/paddle/nn/utils/clip_grad_norm_.py +++ b/python/paddle/nn/utils/clip_grad_norm_.py @@ -43,21 +43,23 @@ def clip_grad_norm_( Returns: Total norm of the parameter gradients (treated as a single vector). + Example: .. code-block:: python - import paddle - x = paddle.uniform([10, 10], min=-1.0, max=1.0, dtype='float32') - max_norm = float(5.0) - linear = paddle.nn.Linear(in_features=10, out_features=10) - out = linear(x) - loss = paddle.mean(out) - loss.backward() + >>> import paddle + + >>> x = paddle.uniform([10, 10], min=-1.0, max=1.0, dtype='float32') + >>> max_norm = float(5.0) + >>> linear = paddle.nn.Linear(in_features=10, out_features=10) + >>> out = linear(x) + >>> loss = paddle.mean(out) + >>> loss.backward() - paddle.nn.utils.clip_grad_norm_(linear.parameters(), max_norm) + >>> paddle.nn.utils.clip_grad_norm_(linear.parameters(), max_norm) - sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters()) - sdg.step() + >>> sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters()) + >>> sdg.step() """ if not paddle.in_dynamic_mode(): raise RuntimeError('this API can only run in dynamic mode.') diff --git a/python/paddle/nn/utils/clip_grad_value_.py b/python/paddle/nn/utils/clip_grad_value_.py index 9c0fa10ed08..4b7275516f5 100644 --- a/python/paddle/nn/utils/clip_grad_value_.py +++ b/python/paddle/nn/utils/clip_grad_value_.py @@ -31,19 +31,20 @@ def clip_grad_value_( clip_value (float or int): maximum allowed value of the gradients. The gradients are clipped in the range :math:`\left[\text{-clip\_value}, \text{clip\_value}\right]` + Example: .. code-block:: python - import paddle - x = paddle.uniform([10, 10], min=-10.0, max=10.0, dtype='float32') - clip_value = float(5.0) - linear = paddle.nn.Linear(in_features=10, out_features=10) - out = linear(x) - loss = paddle.mean(out) - loss.backward() - paddle.nn.utils.clip_grad_value_(linear.parameters(), clip_value) - sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters()) - sdg.step() + >>> import paddle + >>> x = paddle.uniform([10, 10], min=-10.0, max=10.0, dtype='float32') + >>> clip_value = float(5.0) + >>> linear = paddle.nn.Linear(in_features=10, out_features=10) + >>> out = linear(x) + >>> loss = paddle.mean(out) + >>> loss.backward() + >>> paddle.nn.utils.clip_grad_value_(linear.parameters(), clip_value) + >>> sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters()) + >>> sdg.step() """ if not paddle.in_dynamic_mode(): raise RuntimeError('this API can only run in dynamic mode.') diff --git a/python/paddle/nn/utils/spectral_norm_hook.py b/python/paddle/nn/utils/spectral_norm_hook.py index 6dbc07e338a..050f2a533f9 100644 --- a/python/paddle/nn/utils/spectral_norm_hook.py +++ b/python/paddle/nn/utils/spectral_norm_hook.py @@ -182,28 +182,27 @@ def spectral_norm( Layer, the original layer with the spectral norm hook. Examples: - .. code-block:: python - - from paddle.nn import Conv2D - from paddle.nn.utils import spectral_norm - - conv = Conv2D(3, 1, 3) - sn_conv = spectral_norm(conv) - print(sn_conv) - # Conv2D(3, 1, kernel_size=[3, 3], data_format=NCHW) - print(sn_conv.weight) - # Tensor(shape=[1, 3, 3, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=False, - # [[[[-0.21090528, 0.18563725, -0.14127982], - # [-0.02310637, 0.03197737, 0.34353802], - # [-0.17117859, 0.33152047, -0.28408015]], - # - # [[-0.13336606, -0.01862637, 0.06959272], - # [-0.02236020, -0.27091628, -0.24532901], - # [ 0.27254242, 0.15516677, 0.09036587]], - # - # [[ 0.30169338, -0.28146112, -0.11768346], - # [-0.45765871, -0.12504843, -0.17482486], - # [-0.36866254, -0.19969313, 0.08783543]]]]) + .. code-block:: python + + >>> from paddle.nn import Conv2D + >>> from paddle.nn.utils import spectral_norm + >>> paddle.seed(2023) + >>> conv = Conv2D(3, 1, 3) + >>> sn_conv = spectral_norm(conv) + >>> print(sn_conv) + Conv2D(3, 1, kernel_size=[3, 3], data_format=NCHW) + >>> # Conv2D(3, 1, kernel_size=[3, 3], data_format=NCHW) + >>> print(sn_conv.weight) + Tensor(shape=[1, 3, 3, 3], dtype=float32, place=Place(cpu), stop_gradient=False, + [[[[ 0.01668976, 0.30305523, 0.11405435], + [-0.06765547, -0.50396705, -0.40925547], + [ 0.47344422, 0.03628403, 0.45277366]], + [[-0.15177251, -0.16305730, -0.15723954], + [-0.28081197, -0.09183260, -0.08081978], + [-0.40895155, 0.18298769, -0.29325116]], + [[ 0.21819633, -0.01822380, -0.50351536], + [-0.06262003, 0.17713565, 0.20517939], + [ 0.16659889, -0.14333329, 0.05228264]]]]) """ -- GitLab