From 0c43ce2275f0748a6016e93390eeee8d40f56401 Mon Sep 17 00:00:00 2001 From: arlesniak Date: Mon, 7 Feb 2022 08:17:41 +0100 Subject: [PATCH] Update BF16 amp list (#39304) * amp list updated * tests updated * gray list updated * amp list updated * test updated --- .../contrib/mixed_precision/bf16/amp_lists.py | 15 ++++++----- .../fluid/contrib/tests/test_bf16_utils.py | 16 ++++++------ .../contrib/tests/test_model_cast_to_bf16.py | 26 ++++++++++++++++--- 3 files changed, 40 insertions(+), 17 deletions(-) diff --git a/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py b/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py index bbabbaa007..3f799809af 100644 --- a/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py +++ b/python/paddle/fluid/contrib/mixed_precision/bf16/amp_lists.py @@ -83,15 +83,18 @@ class AutoMixedPrecisionListsBF16(object): bf16_initializer_list = {'fill_constant', 'uniform_random'} # always bf16 -bf16_list = {'elementwise_add', 'mul'} +bf16_list = { + 'conv2d', + 'matmul', + 'matmul_v2', + 'mul', +} # depends on the prev_op type gray_list = { - 'cast', - 'fill_constant', - 'reduce_mean', - 'reshape2', - 'scale', + 'elementwise_add', 'elementwise_sub', 'elementwise_mul', 'elementwise_div', + 'relu', 'layer_norm', 'slice', 'concat', 'uniform_random', 'reshape2', + 'transpose2', 'pool2d', 'sigmoid', 'cast', 'scale', 'fill_constant', 'split' } _, _, _sys_unsupported_bf16_list = core.op_supported_infos( diff --git a/python/paddle/fluid/contrib/tests/test_bf16_utils.py b/python/paddle/fluid/contrib/tests/test_bf16_utils.py index 41aa5e5412..a1439c487b 100644 --- a/python/paddle/fluid/contrib/tests/test_bf16_utils.py +++ b/python/paddle/fluid/contrib/tests/test_bf16_utils.py @@ -57,20 +57,20 @@ class AMPTest(unittest.TestCase): self.amp_lists_ = amp.bf16.AutoMixedPrecisionListsBF16({'lstm'}) def test_amp_lists_4(self): - # 4. w=None, b={'elementwise_add'} - self.bf16_list.remove('elementwise_add') - self.fp32_list.add('elementwise_add') + # 4. w=None, b={'matmul_v2'} + self.bf16_list.remove('matmul_v2') + self.fp32_list.add('matmul_v2') self.amp_lists_ = amp.bf16.AutoMixedPrecisionListsBF16( - custom_fp32_list={'elementwise_add'}) + custom_fp32_list={'matmul_v2'}) def test_amp_lists_5(self): - # 5. w=None, b={'elementwise_add'} - self.fp32_list.add('elementwise_add') - self.bf16_list.remove('elementwise_add') + # 5. w=None, b={'matmul_v2'} + self.fp32_list.add('matmul_v2') + self.bf16_list.remove('matmul_v2') self.amp_lists_ = amp.bf16.AutoMixedPrecisionListsBF16( - custom_fp32_list={'elementwise_add'}) + custom_fp32_list={'matmul_v2'}) def test_amp_lists_6(self): # 6. w=None, b={'lstm'} diff --git a/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py b/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py index 470073543c..5362a6ecd1 100644 --- a/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py +++ b/python/paddle/fluid/contrib/tests/test_model_cast_to_bf16.py @@ -19,6 +19,7 @@ import paddle.fluid as fluid import contextlib import unittest import numpy as np +import struct import paddle.fluid.layers as layers import paddle.static.amp as amp from paddle.fluid import core @@ -26,6 +27,20 @@ from paddle.fluid import core paddle.enable_static() +def convert_uint16_to_float(in_list): + if in_list.dtype == np.uint16: + in_list = np.asarray(in_list) + out = np.vectorize( + lambda x: struct.unpack('