test_fusion_gru_int8_mkldnn_op.py 5.5 KB
Newer Older
A
Adam 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

A
Adam 已提交
17
import numpy as np
18

19
from paddle.fluid.tests.unittests.eager_op_test import OpTest
A
Adam 已提交
20
from paddle.fluid.tests.unittests.test_fusion_gru_op import fusion_gru
21
from paddle.fluid.tests.unittests.test_fusion_lstm_op import ACTIVATION
A
Adam 已提交
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39


class TestFusionGRUINT8MKLDNNOp(OpTest):
    def set_confs(self):
        pass

    def setUp(self):
        self.op_type = "fusion_gru"
        self.lod = [[2, 4, 3]]
        self.IC = 3
        self.OC = 5
        self.is_reverse = False
        self.with_h0 = False
        self.with_bias = True
        self.act_state = 'tanh'
        self.act_gate = 'sigmoid'
        self.origin_mode = True
        self.use_mkldnn = True
40
        self.mkldnn_data_type = "int8"
A
Adam 已提交
41 42 43 44 45 46 47 48 49 50
        self.force_fp32_output = True
        self.error_margin = 1e-5
        self.set_confs()

        # RNN dimensions
        T = sum(self.lod[0])
        N = len(self.lod[0])

        # Input data
        x_f32 = np.random.rand(T, self.IC).astype('float32') * 2 - 1
51 52 53 54
        scale_data = 63.0
        shift_data = 64.0
        x_u8 = np.rint(x_f32 * scale_data + shift_data).astype(np.uint8)
        #  x_u8 = (x_f32 * scale_data + shift_data).astype(np.uint8)
A
Adam 已提交
55 56 57 58 59 60 61 62 63 64

        # WeightX/WeightH data
        wx = np.random.rand(self.IC, 3 * self.OC).astype('float32') * 2 - 1
        wh = np.random.rand(self.OC, 3 * self.OC).astype('float32') * 2 - 1

        # Calculating weight scales
        # scales = 63 / max(abs(channel_wise(weightsX + weightsH)))
        # WeightX data shape in PP: [IC, 3 * OC]
        # WeightH data shape in PP: [OC, 2 * OC] + [OC, OC]
        # Scales shape in oneDNN:   [3, OC]
65
        s8_max = 127.0
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
        scale_ur = s8_max / np.max(
            np.abs(
                np.concatenate(
                    [
                        wx[:, : 2 * self.OC],
                        wh.flatten()[: 2 * self.OC * self.OC].reshape(
                            self.OC, 2 * self.OC
                        ),
                    ],
                    axis=0,
                )
            ),
            axis=0,
        )
        scale_o = s8_max / np.max(
            np.abs(
                np.concatenate(
                    [
                        wx[:, 2 * self.OC :],
                        wh.flatten()[2 * self.OC * self.OC :].reshape(
                            self.OC, self.OC
                        ),
                    ],
                    axis=0,
                )
            ),
            axis=0,
        )
A
Adam 已提交
94 95 96

        scale_weights = np.concatenate([scale_ur, scale_o]).astype('float')

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
        bias = (
            np.random.rand(1, 3 * self.OC).astype('float32')
            if self.with_bias
            else np.zeros((1, 3 * self.OC), dtype='float32')
        )
        h0 = (
            np.random.rand(N, self.OC).astype('float32')
            if self.with_h0
            else np.zeros((N, self.OC), dtype='float32')
        )

        _, _, _, hidden_f32 = fusion_gru(
            x_f32,
            self.lod,
            h0,
            wx,
            wh,
            bias,
            self.is_reverse,
            self.origin_mode,
            ACTIVATION[self.act_state],
            ACTIVATION[self.act_gate],
        )
A
Adam 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133

        self.inputs = {'X': (x_u8, self.lod), 'WeightX': wx, 'WeightH': wh}

        if self.with_bias:
            self.inputs['Bias'] = bias

        if self.with_h0:
            self.inputs['H0'] = h0

        if self.force_fp32_output:
            self.error_margin = 1e-1
            self.outputs = {'Hidden': (hidden_f32, self.lod)}
        else:
            self.error_margin = 1
134
            hidden_u8 = np.rint(hidden_f32 * scale_data + shift_data).astype(
135 136
                np.uint8
            )
137
            #  hidden_u8 = (hidden_f32 * scale_data + shift_data).astype(np.uint8)
A
Adam 已提交
138 139 140 141 142 143 144 145
            self.outputs = {'Hidden': (hidden_u8, self.lod)}

        self.attrs = {
            'activation': self.act_state,
            'gate_activation': self.act_gate,
            'is_reverse': self.is_reverse,
            'origin_mode': self.origin_mode,
            'use_mkldnn': self.use_mkldnn,
146
            'mkldnn_data_type': self.mkldnn_data_type,
A
Adam 已提交
147 148 149
            'force_fp32_output': self.force_fp32_output,
            'Scale_data': scale_data,
            'Shift_data': shift_data,
150
            'Scale_weights': scale_weights,
A
Adam 已提交
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
        }

    def test_check_output(self):
        self.check_output(check_dygraph=False, atol=self.error_margin)


class TestFusionGRUINT8MKLDNNOp2(TestFusionGRUINT8MKLDNNOp):
    def set_confs(self):
        self.force_fp32_output = False


class TestFusionGRUINT8MKLDNNOp3(TestFusionGRUINT8MKLDNNOp):
    def set_confs(self):
        self.origin_mode = False


class TestFusionGRUINT8MKLDNNOp4(TestFusionGRUINT8MKLDNNOp):
    def set_confs(self):
        self.with_bias = False


class TestFusionGRUINT8MKLDNNOp5(TestFusionGRUINT8MKLDNNOp):
    def set_confs(self):
        self.with_h0 = False


if __name__ == "__main__":
178
    from paddle import enable_static
179

180
    enable_static()
A
Adam 已提交
181
    unittest.main()