test_fake_quantize_op.py 4.0 KB
Newer Older
视言's avatar
视言 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

视言's avatar
视言 已提交
17 18
import unittest
import numpy as np
19
from op_test import OpTest
视言's avatar
视言 已提交
20 21 22 23


class TestFakeQuantizeOp(OpTest):
    def setUp(self):
24 25 26 27 28 29 30 31 32 33 34 35
        self.op_type = "fake_quantize_abs_max"
        self.attrs = {'bit_length': 8}
        self.inputs = {'X': np.random.random((124, 240)).astype("float32"), }
        scale = np.max(np.abs(self.inputs['X'])).astype("float32")
        self.outputs = {
            'Out': np.round(self.inputs['X'] / scale * (
                (1 << (self.attrs['bit_length'] - 1)) - 1)),
            'OutScale': np.array(scale).astype("float32"),
        }

    def test_check_output(self):
        self.check_output()
Z
Zhen Wang 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59


class TestFakeChannelWiseQuantizeOp(OpTest):
    def setUp(self):
        self.op_type = "fake_channel_wise_quantize_abs_max"
        self.attrs = {'bit_length': 8}
        self.inputs = {
            'X': np.random.random((4, 3, 64, 64)).astype("float32"),
        }
        scales = []
        for i in range(self.inputs['X'].shape[0]):
            scales.append(np.max(np.abs(self.inputs['X'][i])).astype("float32"))
        outputs = self.inputs['X'].copy()
        for i, scale in enumerate(scales):
            outputs[i] = np.round(outputs[i] / scale * (
                (1 << (self.attrs['bit_length'] - 1)) - 1))

        self.outputs = {
            'Out': outputs,
            'OutScales': np.array(scales).astype("float32"),
        }

    def test_check_output(self):
        self.check_output()
60 61


62
class TestFakeQuantizeRangeAbsMaxOp(OpTest):
63 64
    def setUp(self):
        self.op_type = "fake_quantize_range_abs_max"
视言's avatar
视言 已提交
65
        self.attrs = {
66 67 68
            'bit_length': int(5),
            'window_size': int(1),
            'is_test': False
视言's avatar
视言 已提交
69
        }
70 71
        x = (np.random.random((8, 16, 7, 7)) - 0.5) * 10
        x = x.astype("float32")
视言's avatar
视言 已提交
72
        self.inputs = {
73
            'X': x,
74 75
            'Iter': np.zeros(1).astype("int64"),
            'InScale': np.zeros(1).astype("float32")
视言's avatar
视言 已提交
76
        }
77 78 79
        scale = np.max(np.abs(self.inputs['X'])).astype("float32")
        out_scales = np.zeros(self.attrs['window_size']).astype("float32")
        out_scales[0] = scale
视言's avatar
视言 已提交
80
        self.outputs = {
81
            'Out': np.round(self.inputs['X'] / scale * (
视言's avatar
视言 已提交
82
                (1 << (self.attrs['bit_length'] - 1)) - 1)),
83 84
            'OutScale': scale,
            'OutScales': out_scales,
视言's avatar
视言 已提交
85 86 87 88 89 90
        }

    def test_check_output(self):
        self.check_output()


91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
class TestFakeQuantizeRangeAbsMaxOp2(OpTest):
    def setUp(self):
        self.op_type = "fake_quantize_range_abs_max"
        self.attrs = {
            'bit_length': int(8),
            'window_size': int(1),
            'is_test': True
        }
        x = (np.random.random((8, 16, 7, 7)) - 0.5) * 10
        x = x.astype("float32")
        scale = np.max(np.abs(x)).astype("float32") - 1.0
        out_scales = np.zeros(self.attrs['window_size']).astype("float32")
        out_scales[0] = scale

        self.inputs = {
            'X': x,
            'Iter': np.zeros(1).astype("int64"),
            'InScale': scale.astype("float32")
        }
        xs = np.clip(x, -scale, scale)
        qs = np.round(xs / scale * ((1 << (self.attrs['bit_length'] - 1)) - 1))
        self.outputs = {
            'Out': qs,
            'OutScale': scale.astype("float32"),
            'OutScales': out_scales,
        }

    def test_check_output(self):
        self.check_output(no_check_set=set(['OutScale', 'OutScales']))


视言's avatar
视言 已提交
122 123
if __name__ == "__main__":
    unittest.main()