utils.py 5.1 KB
Newer Older
C
cc 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import math
16
import numpy as np
17 18

import paddle
19
import paddle.nn.quant.quant_layers as quant_layers
20

21 22 23 24
from ..quantization_pass import _get_op_input_var_names
from ..quantization_pass import _get_op_output_var_names
from ..quantization_pass import _get_output_name_index
from ..quantization_pass import _get_input_name_index
C
cc 已提交
25

26
layer_name_map = {
27
    'Conv2DTranspose': paddle.nn.Conv2DTranspose,
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
    'Conv2D': paddle.nn.Conv2D,
    'Linear': paddle.nn.Linear,
    'AdaptiveAvgPool2D': paddle.nn.AdaptiveAvgPool2D,
    'AdaptiveMaxPool2D': paddle.nn.AdaptiveMaxPool2D,
    'AvgPool2D': paddle.nn.AvgPool2D,
    'MaxPool2D': paddle.nn.MaxPool2D,
    'Hardswish': paddle.nn.Hardswish,
    'LeakyReLU': paddle.nn.LeakyReLU,
    'PReLU': paddle.nn.PReLU,
    'ReLU': paddle.nn.ReLU,
    'ReLU6': paddle.nn.ReLU6,
    'Sigmoid': paddle.nn.Sigmoid,
    'Softmax': paddle.nn.Softmax,
    'Swish': paddle.nn.Swish,
    'Tanh': paddle.nn.Tanh,
    'Hardswish': paddle.nn.Hardswish,
    'BatchNorm': paddle.nn.BatchNorm,
    'GroupNorm': paddle.nn.GroupNorm,
    'LayerNorm': paddle.nn.LayerNorm,
C
cc 已提交
47
}
48

49
# Apply fake quant for the inputs of these layers
50 51 52
fake_quant_input_layers = [
    paddle.nn.Conv2D, paddle.nn.Linear, paddle.nn.Conv2DTranspose
]
53 54 55 56 57 58 59 60 61 62

# Apply fake quant for the output of these layers
# TODO(jc): fix the problem of adding duplicate fake_quant ops
# paddle.nn.AdaptiveAvgPool2D, paddle.nn.AvgPool2D, paddle.nn.ReLU,paddle.nn.LeakyReLU
fake_quant_output_layers = [
    paddle.nn.quant.add, paddle.nn.quant.subtract, paddle.nn.quant.multiply,
    paddle.nn.quant.divide
]

fake_quant_leaf_layers = [
63 64 65 66
    quant_layers.FakeQuantAbsMax,
    quant_layers.FakeQuantChannelWiseAbsMax,
    quant_layers.FakeQuantMovingAverageAbsMax,
    quant_layers.MovingAverageAbsMaxScale,
G
guofei 已提交
67 68
]

69
fake_quant_wrap_layers = [
70 71
    quant_layers.QuantizedConv2D, quant_layers.QuantizedLinear,
    quant_layers.QuantizedConv2DTranspose
72
]
73

74
# The weight format of these layers is Cin * Cout * H * W 
75
spec_channel_axis_layers = [paddle.nn.Conv2DTranspose, paddle.nn.Linear]
76

77 78 79 80
weight_op_types = [
    "conv2d", "depthwise_conv2d", "matmul", "conv2d_transpose",
    "depthwise_conv2d_transpose"
]
81

82 83 84 85 86 87
fake_quantize_dequantize_op_types = [
    "fake_quantize_dequantize_abs_max",
    "fake_channel_wise_quantize_dequantize_abs_max",
    "fake_quantize_dequantize_moving_average_abs_max"
]

88 89

def load_variable_data(scope, var_name):
90
    """
91
    Load variable value from scope
92
    """
93 94 95 96 97 98 99 100 101 102 103 104 105
    var_node = scope.find_var(var_name)
    assert var_node is not None, \
        "Can not find " + var_name + " in the scope."
    return np.array(var_node.get_tensor())


def find_previous_op(block, var_name):
    """
    Find the previous op for the input variable.
    """
    for op in block.ops:
        if var_name in op.output_arg_names:
            return op
106
    return None
107 108 109 110 111 112 113 114 115 116 117


def find_next_ops(block, var_name):
    """
    Find all followed ops for the input variable.
    """
    res_ops = []
    for op in block.ops:
        if var_name in op.input_arg_names:
            res_ops.append(op)
    return res_ops
118 119 120 121 122 123 124 125


def find_parent_layer_and_sub_name(model, name):
    """
    Given the model and the name of a layer, find the parent layer and
    the sub_name of the layer.
    For example, if name is 'block_1/convbn_1/conv_1', the parent layer is
    'block_1/convbn_1' and the sub_name is `conv_1`.
126 127 128 129 130 131
    Args:
        model(paddle.nn.Layer): the model to be quantized.
        name(string): the name of a layer

    Returns:
        parent_layer, subname
132
    """
133
    assert isinstance(model, paddle.nn.Layer), \
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
            "The model must be the instance of paddle.nn.Layer."
    assert len(name) > 0, "The input (name) should not be empty."

    last_idx = 0
    idx = 0
    parent_layer = model
    while idx < len(name):
        if name[idx] == '.':
            sub_name = name[last_idx:idx]
            if hasattr(parent_layer, sub_name):
                parent_layer = getattr(parent_layer, sub_name)
                last_idx = idx + 1
        idx += 1
    sub_name = name[last_idx:idx]
    return parent_layer, sub_name


151 152 153 154 155 156 157 158 159 160 161
def program_all_ops(program):
    """
    Return all ops for the input program.
    """
    all_ops = []
    for block in program.blocks:
        for op in block.ops:
            all_ops.append(op)
    return all_ops


162 163 164 165
def is_leaf_layer(layer):
    """
    Whether the layer is leaf layer.
    """
166
    return isinstance(layer, paddle.nn.Layer) \
167
        and len(layer.sublayers()) == 0
168 169


170
def fp_numpy_to_naive(x_np):
171
    """
172
    Convert numpy to float or list.
173
    """
174 175 176 177
    if x_np.size == 1:
        return float(x_np)
    else:
        return x_np.tolist()