test_fusion_gru_bf16_mkldnn_op.py 4.6 KB
Newer Older
J
Jacek Czaja 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.test_fusion_gru_op import fusion_gru
20
from paddle.fluid.tests.unittests.test_fusion_lstm_op import ACTIVATION
J
Jacek Czaja 已提交
21 22 23 24 25


@unittest.skipIf(not core.supports_bfloat16(),
                 "place does not support BF16 evaluation")
class TestFusionGRUBF16MKLDNNOp(OpTest):
26

J
Jacek Czaja 已提交
27
    def set_confs(self):
28
        pass
J
Jacek Czaja 已提交
29

30 31 32 33 34
    def test_check_output(self):
        for use_seq in {True, False}:
            self.attrs['use_seq'] = use_seq
            self.check_output(check_dygraph=False)

J
Jacek Czaja 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48
    def setUp(self):
        self.op_type = "fusion_gru"
        self.lod = [[2, 4, 3]]
        self.M = 3
        self.D = 5
        self.is_reverse = False
        self.with_h0 = False
        self.use_mkldnn = True
        self._cpu_only = True
        self.with_bias = True
        self.act_state = 'tanh'
        self.act_gate = 'sigmoid'
        self.origin_mode = False
        self.use_mkldnn = True
49
        self.mkldnn_data_type = "bfloat16"
J
Jacek Czaja 已提交
50
        self.force_fp32_output = False
51
        self.weights_dtype = 'fp32'
J
Jacek Czaja 已提交
52 53 54 55 56 57 58 59 60 61 62 63 64
        self.set_confs()

        T = sum(self.lod[0])
        N = len(self.lod[0])

        # fp32 X input for reference implementation and
        # corressponding bf16 data as input to GRU oneDNN bf16 kernel
        x_fp32 = np.random.rand(T, self.M).astype('float32')
        x_bf16 = convert_float_to_uint16(x_fp32)

        wx_fp32 = np.random.rand(self.M, 3 * self.D).astype('float32')
        wh_fp32 = np.random.rand(self.D, 3 * self.D).astype('float32')

65 66 67
        wx_bf16 = convert_float_to_uint16(wx_fp32)
        wh_bf16 = convert_float_to_uint16(wh_fp32)

J
Jacek Czaja 已提交
68 69 70 71 72 73 74 75 76
        # bias is fp32 despite other inputs being in bf16
        bias = np.random.rand(
            1, 3 * self.D).astype('float32') if self.with_bias else np.zeros(
                (1, 3 * self.D), dtype='float32')

        h0_fp32 = np.random.rand(
            N, self.D).astype('float32') if self.with_h0 else np.zeros(
                (N, self.D), dtype='float32')

77 78 79 80 81
        _, _, _, hidden = fusion_gru(x_fp32, self.lod, h0_fp32, wx_fp32,
                                     wh_fp32, bias, self.is_reverse,
                                     self.origin_mode,
                                     ACTIVATION[self.act_state],
                                     ACTIVATION[self.act_gate])
J
Jacek Czaja 已提交
82 83 84

        hidden_bf16 = convert_float_to_uint16(hidden)

85 86 87 88 89 90 91 92 93 94 95 96
        if self.weights_dtype == 'bf16':
            self.inputs = {
                'X': (x_bf16, self.lod),
                'WeightX': wx_bf16,
                'WeightH': wh_bf16
            }
        elif self.weights_dtype == 'fp32':
            self.inputs = {
                'X': (x_bf16, self.lod),
                'WeightX': wx_fp32,
                'WeightH': wh_fp32
            }
J
Jacek Czaja 已提交
97 98 99 100

        if self.with_bias:
            self.inputs['Bias'] = bias

J
Jiangxinz 已提交
101 102
        h0_bf16 = convert_float_to_uint16(h0_fp32)

J
Jacek Czaja 已提交
103
        if self.with_h0:
104 105 106 107
            if self.weights_dtype == 'bf16':
                self.inputs['H0'] = h0_bf16
            elif self.weights_dtype == 'fp32':
                self.inputs['H0'] = h0_fp32
J
Jacek Czaja 已提交
108

109
        self.outputs = {'Hidden': (hidden, self.lod)}
J
Jacek Czaja 已提交
110 111 112 113 114 115 116

        self.attrs = {
            'activation': self.act_state,
            'gate_activation': self.act_gate,
            'is_reverse': self.is_reverse,
            'origin_mode': self.origin_mode,
            'force_fp32_output': self.force_fp32_output,
117 118
            'use_mkldnn': self.use_mkldnn,
            'mkldnn_data_type': self.mkldnn_data_type,
J
Jacek Czaja 已提交
119 120 121 122
        }


class TestFusionGRUINT8MKLDNNOp2(TestFusionGRUBF16MKLDNNOp):
123

J
Jacek Czaja 已提交
124 125 126 127 128
    def set_confs(self):
        self.origin_mode = False


class TestFusionGRUINT8MKLDNNOp3(TestFusionGRUBF16MKLDNNOp):
129

J
Jacek Czaja 已提交
130 131 132 133
    def set_confs(self):
        self.with_bias = False


134
class TestFusionGRUINT8MKLDNNBF16WeightsOp(TestFusionGRUBF16MKLDNNOp):
135

136 137 138 139
    def set_confs(self):
        self.weights_dtype = 'bf16'


J
Jacek Czaja 已提交
140
if __name__ == "__main__":
141 142
    from paddle import enable_static
    enable_static()
J
Jacek Czaja 已提交
143
    unittest.main()