test_fusion_lstm_int8_mkldnn_op.py 5.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import numpy as np
18

19
from paddle.fluid.tests.unittests.eager_op_test import OpTest
20 21 22 23
from paddle.fluid.tests.unittests.test_fusion_lstm_op import (
    ACTIVATION,
    fusion_lstm,
)
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41


class TestFusionLSTMINT8MKLDNNOp(OpTest):
    def set_confs(self):
        pass

    def setUp(self):
        self.op_type = "fusion_lstm"
        self.lod = [[2, 3, 5, 4]]
        self.IC = 3
        self.OC = 5
        self.is_reverse = False
        self.has_initial_state = False
        self.act_cell = 'tanh'
        self.act_gate = 'sigmoid'
        self.act_cand = 'tanh'
        self.use_peepholes = False  # LSTM u8 doesn't support peepholes
        self.use_mkldnn = True
42
        self.mkldnn_data_type = "int8"
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
        self.force_fp32_output = False
        self.error_margin = 1e-5
        self.set_confs()

        # RNN dimensions
        T = sum(self.lod[0])
        N = len(self.lod[0])

        # Input data
        x_f32 = np.random.rand(T, self.IC).astype('float32') * 2 - 1
        scale_data = 63.0
        shift_data = 64.0
        x_u8 = np.rint(x_f32 * scale_data + shift_data).astype(np.uint8)

        # WeightX/WeightH data
        wx = np.random.rand(self.IC, 4 * self.OC).astype('float32') * 2 - 1
        wh = np.random.rand(self.OC, 4 * self.OC).astype('float32') * 2 - 1

        # Calculating weight scales
        # scales = 127 / max(abs(channel_wise(weightsX + weightsH)))
        s8_max = 127.0

        scale_weights = s8_max / np.max(
66 67
            np.abs(np.concatenate([wx[:, :], wh[:, :]], axis=0)), axis=0
        )
68 69 70 71 72 73 74

        scale_weights = scale_weights.astype('float')

        if self.use_peepholes:
            b = np.random.rand(1, 7 * self.OC).astype('float32')
        else:
            b = np.random.rand(1, 4 * self.OC).astype('float32')
75 76
        w_b = np.copy(b[:, 0 : 4 * self.OC])
        w_c = b[:, 4 * self.OC :] if self.use_peepholes else None
77 78

        bx = np.random.normal(size=(1, 4 * self.OC)).astype('float32')
79
        b[0, 0 : 4 * self.OC] += bx[0, :]
80 81 82 83 84 85 86 87

        if self.has_initial_state:
            h0 = np.random.rand(N, self.OC).astype('float32')
            c0 = np.random.rand(N, self.OC).astype('float32')
        else:
            h0 = np.zeros((N, self.OC)).astype('float32')
            c0 = np.zeros((N, self.OC)).astype('float32')

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
        hidden_f32, c = fusion_lstm(
            x_f32,
            self.lod,
            wx,
            bx,
            h0,
            c0,
            wh,
            w_b,
            w_c,
            self.is_reverse,
            ACTIVATION[self.act_gate],
            ACTIVATION[self.act_cell],
            ACTIVATION[self.act_cand],
        )
103 104 105 106 107

        self.inputs = {
            'X': (x_u8, self.lod),
            'WeightX': wx,
            'WeightH': wh,
108
            'Bias': b,
109 110 111 112 113 114 115 116 117 118
        }

        if self.has_initial_state:
            self.inputs['H0'] = h0
            self.inputs['C0'] = c0

        if self.force_fp32_output:
            self.error_margin = 1e-1
            self.outputs = {
                'Hidden': (hidden_f32, self.lod),
119
                'Cell': (c, self.lod),
120 121 122 123
            }
        else:
            self.error_margin = 2
            hidden_u8 = np.rint(hidden_f32 * scale_data + shift_data).astype(
124 125
                np.uint8
            )
126 127
            self.outputs = {
                'Hidden': (hidden_u8, self.lod),
128
                'Cell': (c, self.lod),
129 130 131 132 133 134 135 136 137
            }

        self.attrs = {
            'gate_activation': self.act_gate,
            'cell_activation': self.act_cell,
            'candidate_activation': self.act_cand,
            'is_reverse': self.is_reverse,
            'use_peepholes': self.use_peepholes,
            'use_mkldnn': self.use_mkldnn,
138
            'mkldnn_data_type': self.mkldnn_data_type,
139 140 141
            'force_fp32_output': self.force_fp32_output,
            'Scale_data': scale_data,
            'Shift_data': shift_data,
142
            'Scale_weights': scale_weights,
143 144 145 146 147
        }

    def test_check_output(self):
        for use_seq in {True, False}:
            self.attrs['use_seq'] = use_seq
148 149 150 151 152
            self.check_output(
                check_dygraph=False,
                no_check_set=["Cell"],
                atol=self.error_margin,
            )
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171


class TestFusionLSTMINT8MKLDNNOp2(TestFusionLSTMINT8MKLDNNOp):
    def set_confs(self):
        self.force_fp32_output = True


class TestFusionLSTMINT8MKLDNNOp4(TestFusionLSTMINT8MKLDNNOp):
    def set_confs(self):
        self.is_reverse = True


class TestFusionLSTMINT8MKLDNNOp5(TestFusionLSTMINT8MKLDNNOp):
    def set_confs(self):
        self.has_initial_state = True


if __name__ == "__main__":
    from paddle import enable_static
172

173 174
    enable_static()
    unittest.main()