From 146ed4099d05d7510ee36b7c449dd566b56b49ba Mon Sep 17 00:00:00 2001 From: "joanna.wozna.intel" Date: Thu, 13 Feb 2020 04:28:05 +0100 Subject: [PATCH] Add test with reused requantize op (#22482) test=develop --- .../mkldnn/test_requantize_mkldnn_op.py | 67 ++++++++++++++++--- 1 file changed, 57 insertions(+), 10 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_requantize_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_requantize_mkldnn_op.py index 4a48453da52..750e7f37df4 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_requantize_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_requantize_mkldnn_op.py @@ -16,6 +16,8 @@ from __future__ import print_function import unittest import numpy as np +import paddle.fluid as fluid +import paddle.fluid.core as core from paddle.fluid.tests.unittests.op_test import OpTest from mkldnn_op_test import format_reorder @@ -25,29 +27,31 @@ class TestReQuantizeOp(OpTest): self.op_type = 'requantize' self.scale_in = 2.0 self.scale_out = 1.5 - self.input_size = [1, 1, 5, 5] + self.input_size = [1, 1, 10, 10] self.data_type = 'int8' self.set_scale() self.set_data_type() + self.prepare_inputs() + def prepare_inputs(self): scale_shift = self.scale_out / self.scale_in if self.data_type == 'int8': - input = (np.random.randint(0, 100, self.input_size) - 50 - ).astype(self.data_type) - output_tmp = np.round(input.astype('float32') * + self.input = (np.random.randint(0, 100, self.input_size) - 50 + ).astype(self.data_type) + output_tmp = np.round(self.input.astype('float32') * scale_shift).astype('int8') else: - input = (np.random.randint(0, 100, - self.input_size)).astype(self.data_type) - output_tmp = np.round(input.astype('float32') * + self.input = (np.random.randint( + 0, 100, self.input_size)).astype(self.data_type) + output_tmp = np.round(self.input.astype('float32') * scale_shift).astype('uint8') - output = format_reorder(output_tmp, self.input_size) + self.output = format_reorder(output_tmp, self.input_size) - self.inputs = {'Input': OpTest.np_dtype_to_fluid_dtype(input)} + self.inputs = {'Input': OpTest.np_dtype_to_fluid_dtype(self.input)} - self.outputs = {'Output': output} + self.outputs = {'Output': self.output} self.attrs = {'Scale_in': self.scale_in, 'Scale_out': self.scale_out} @@ -90,5 +94,48 @@ class TestReQuantizeOp4(TestReQuantizeOp2): self.data_type = 'uint8' +#-------------------test reused requantize op--------------------------- + + +class TestReQuantizeOpReused(TestReQuantizeOp): + def setUp(self): + self.input_size = [1, 1, 10, 10] + self.data_type = 'int8' + self.set_scale() + self.prepare_inputs() + + def set_scale(self): + self.scale_in = 0.1 + self.scale_out = 0.2 + + def test_check_output(self): + variables = { + "input": self.input, + "output": self.output, + } + program = fluid.Program() + with fluid.program_guard(program): + block = program.global_block() + for name in variables: + block.create_var( + name=name, dtype="int8", shape=variables[name].shape) + requant_op = block.append_op( + type="requantize", + inputs={'Input': block.var('input'), }, + outputs={"Output": block.var('output')}, + attrs={'Scale_in': self.scale_in, + 'Scale_out': self.scale_out}) + place = core.CPUPlace() + exe = fluid.Executor(place) + for i in range(2): + out = exe.run(program, + feed={'input': variables['input']}, + fetch_list=['output']) + + self.assertTrue( + np.allclose( + variables['output'], out[0], atol=1e-4), 'output') + + if __name__ == '__main__': unittest.main() -- GitLab