未验证 提交 8a07d02c 编写于 作者: A Allen Guo 提交者: GitHub

fix UTs on physical ipu (#44647)

上级 be132719
...@@ -179,7 +179,7 @@ class IPUOpTest(IPUTest): ...@@ -179,7 +179,7 @@ class IPUOpTest(IPUTest):
@classmethod @classmethod
def cast_model_to_fp16(cls, main_program): def cast_model_to_fp16(cls, main_program):
amp_list = paddle.static.amp.CustomOpLists() amp_list = paddle.static.amp.CustomOpLists()
amp_list.unsupported_list = {} amp_list.unsupported_list = {'scale'}
to_fp16_var_names = paddle.static.amp.cast_model_to_fp16( to_fp16_var_names = paddle.static.amp.cast_model_to_fp16(
main_program, amp_list, use_fp16_guard=False) main_program, amp_list, use_fp16_guard=False)
paddle.static.amp.cast_parameters_to_fp16( paddle.static.amp.cast_parameters_to_fp16(
......
...@@ -35,7 +35,7 @@ class TestBase(IPUOpTest): ...@@ -35,7 +35,7 @@ class TestBase(IPUOpTest):
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 3, 3]) data = np.random.uniform(size=[1, 3, 3, 3])
self.feed_fp32 = {'x': data.astype(np.float32)} self.feed_fp32 = {'x': data.astype(np.float16)}
def set_feed_attr(self): def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()] self.feed_shape = [x.shape for x in self.feed_fp32.values()]
...@@ -44,7 +44,7 @@ class TestBase(IPUOpTest): ...@@ -44,7 +44,7 @@ class TestBase(IPUOpTest):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['dtype'] = 'float16' self.attrs['dtype'] = 'float32'
@IPUOpTest.static_graph @IPUOpTest.static_graph
def build_model(self): def build_model(self):
...@@ -86,14 +86,19 @@ class TestEnableFp16(TestBase): ...@@ -86,14 +86,19 @@ class TestEnableFp16(TestBase):
class TestCase2(TestBase): class TestCase2(TestBase):
def set_atol(self):
super().set_atol()
self.atol = 1e-3
self.rtol = 1e-3
def set_data_feed(self): def set_data_feed(self):
self.feed_fp32 = { self.feed_fp32 = {
"x": np.random.uniform(size=[1, 3, 3, 3]).astype('float16'), "x": np.random.uniform(size=[1, 3, 3, 3]).astype('float32'),
} }
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
self.attrs['dtype'] = 'float32' self.attrs['dtype'] = 'float16'
class TestCase3(TestBase): class TestCase3(TestBase):
...@@ -145,7 +150,7 @@ class TestCase6(TestBase): ...@@ -145,7 +150,7 @@ class TestCase6(TestBase):
@unittest.skip('float64 is not supported') @unittest.skip('float64 is not supported')
class TestCase2(TestBase): class TestCase7(TestBase):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {} self.attrs = {}
...@@ -153,7 +158,7 @@ class TestCase2(TestBase): ...@@ -153,7 +158,7 @@ class TestCase2(TestBase):
@unittest.skip('skip float16 to float32') @unittest.skip('skip float16 to float32')
class TestCase3(TestBase): class TestCase8(TestBase):
def set_data_feed(self): def set_data_feed(self):
self.feed_fp32 = { self.feed_fp32 = {
...@@ -166,7 +171,7 @@ class TestCase3(TestBase): ...@@ -166,7 +171,7 @@ class TestCase3(TestBase):
@unittest.skip('int32 to int8 is not supported') @unittest.skip('int32 to int8 is not supported')
class TestCase4(TestBase): class TestCase9(TestBase):
def set_atol(self): def set_atol(self):
super().set_atol() super().set_atol()
......
...@@ -113,8 +113,8 @@ class TestTrainCase2(TestBase): ...@@ -113,8 +113,8 @@ class TestTrainCase2(TestBase):
def set_atol(self): def set_atol(self):
self.atol = 7e-4 self.atol = 7e-4
self.rtol = 1e-6 self.rtol = 1e-6
self.atol_fp16 = 4e-3 self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-3 self.rtol_fp16 = 1e-2
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = { self.attrs = {
......
...@@ -30,7 +30,7 @@ class TestBase(IPUOpTest): ...@@ -30,7 +30,7 @@ class TestBase(IPUOpTest):
self.set_op_attrs() self.set_op_attrs()
def set_data_feed(self): def set_data_feed(self):
x = np.random.uniform(size=[2, 3, 6, 10]) x = np.random.uniform(size=[1, 2, 6, 10])
self.feed_fp32 = {"x": x.astype(np.float32)} self.feed_fp32 = {"x": x.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16)} self.feed_fp16 = {"x": x.astype(np.float16)}
......
...@@ -36,8 +36,8 @@ class TestBase(IPUOpTest): ...@@ -36,8 +36,8 @@ class TestBase(IPUOpTest):
self.rtol_fp16 = 1e-3 self.rtol_fp16 = 1e-3
def set_feed(self): def set_feed(self):
data1 = np.random.uniform(size=[100]) data1 = np.random.uniform(size=[10])
data2 = np.random.uniform(size=[200]) data2 = np.random.uniform(size=[20])
self.feed_fp32 = { self.feed_fp32 = {
'x': data1.astype(np.float32), 'x': data1.astype(np.float32),
'y': data2.astype(np.float32) 'y': data2.astype(np.float32)
......
...@@ -31,17 +31,18 @@ class TestBase(IPUOpTest): ...@@ -31,17 +31,18 @@ class TestBase(IPUOpTest):
self.set_attrs() self.set_attrs()
def set_atol(self): def set_atol(self):
self.atol = 2e-6 super().set_atol()
self.rtol = 1e-5 self.atol = 1e-6
self.rtol = 1e-3
self.atol_fp16 = 1e-2 self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-3 self.rtol_fp16 = 1e-1
def set_training(self): def set_training(self):
self.is_training = True self.is_training = True
self.epoch = 20 self.epoch = 20
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 3, 28, 28]) data = np.random.uniform(size=[1, 3, 10, 10])
self.feed_fp32 = {"in_0": data.astype(np.float32)} self.feed_fp32 = {"in_0": data.astype(np.float32)}
def set_feed_attr(self): def set_feed_attr(self):
...@@ -73,7 +74,7 @@ class TestBase(IPUOpTest): ...@@ -73,7 +74,7 @@ class TestBase(IPUOpTest):
# using fp16 # using fp16
with paddle.static.amp.fp16_guard(): with paddle.static.amp.fp16_guard():
x = paddle.static.nn.conv2d(input=x, num_filters=6, filter_size=3) x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3)
x = paddle.static.nn.batch_norm(x, act='relu') x = paddle.static.nn.batch_norm(x, act='relu')
x = F.max_pool2d(x, kernel_size=2, stride=2) x = F.max_pool2d(x, kernel_size=2, stride=2)
...@@ -82,9 +83,9 @@ class TestBase(IPUOpTest): ...@@ -82,9 +83,9 @@ class TestBase(IPUOpTest):
loss = paddle.mean(x) loss = paddle.mean(x)
# optimizer # optimizer
optimizer = paddle.optimizer.Adam(learning_rate=1e-2) optimizer = paddle.optimizer.Adam(learning_rate=1e-3)
optimizer.minimize(loss, self.startup_prog) optimizer.minimize(loss, self.startup_prog)
self.fetch_list = [loss.name] self.fetch_list = [x.name]
def run_model(self, exec_mode): def run_model(self, exec_mode):
# cast model to fp16 # cast model to fp16
......
...@@ -45,78 +45,73 @@ class TestBase(IPUOpTest): ...@@ -45,78 +45,73 @@ class TestBase(IPUOpTest):
self.attrs = {} self.attrs = {}
self.attrs['steps'] = 100 self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20 self.attrs['save_at_step'] = 20
self.attrs['enable_fp16'] = False
self.attrs['model_path'] = tempfile.TemporaryDirectory() self.attrs['model_path'] = tempfile.TemporaryDirectory()
def set_optimizer(self): def set_optimizer(self):
self.optimizer = partial(paddle.optimizer.SGD, learning_rate=1e-1) self.optimizer = partial(paddle.optimizer.SGD, learning_rate=1e-1)
def _test_base(self, save_otherwise_load): @IPUOpTest.static_graph
scope = paddle.static.Scope() def build_model(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
generator = paddle.fluid.unique_name.UniqueNameGenerator() generator = paddle.fluid.unique_name.UniqueNameGenerator()
with paddle.fluid.unique_name.guard(generator): with paddle.fluid.unique_name.guard(generator):
with paddle.static.scope_guard(scope): x = paddle.static.data(name=self.feed_list[0],
with paddle.static.program_guard(main_prog, startup_prog): shape=self.feed_shape[0],
x = paddle.static.data(name=self.feed_list[0], dtype='float32')
shape=self.feed_shape[0], conv1 = paddle.static.nn.conv2d(x,
dtype='float32') num_filters=3,
conv1 = paddle.static.nn.conv2d(x, filter_size=3,
num_filters=3, bias_attr=False,
filter_size=3, name='conv2d')
bias_attr=False, loss = paddle.mean(conv1)
name='conv2d') # apply optimizer
loss = paddle.mean(conv1) self.optimizer().minimize(loss)
self.fetch_list = [loss.name]
# apply optimizer
self.optimizer().minimize(loss) def run_model(self, exec_mode, save_otherwise_load):
fetch_list = [loss.name] self.build_model()
place = paddle.IPUPlace() place = paddle.IPUPlace()
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_prog) exe.run(self.startup_prog)
if not save_otherwise_load: if not save_otherwise_load:
paddle.static.load(main_prog, self.attrs['model_path'].name) paddle.static.load(self.main_prog, self.attrs['model_path'].name)
ipu_strategy = paddle.static.IpuStrategy() ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=True) ipu_strategy.set_graph_config(is_training=True)
ipu_strategy.set_precision_config( if self.is_fp16_mode(exec_mode):
enable_fp16=self.attrs['enable_fp16']) ipu_strategy.set_precision_config(enable_fp16=True)
ipu_program = paddle.static.IpuCompiledProgram( IPUOpTest.cast_model_to_fp16(self.main_prog)
main_prog, ipu_strategy=ipu_strategy) ipu_compiler = paddle.static.IpuCompiledProgram(
program = ipu_program.compile(self.feed_list, fetch_list) self.main_prog, ipu_strategy=ipu_strategy)
program = ipu_compiler.compile(self.feed_list, self.fetch_list)
result = []
run_steps = self.attrs['steps'] if save_otherwise_load \ feed = self.feed_fp32
else self.attrs['steps'] - self.attrs['save_at_step'] if self.is_fp16_mode(exec_mode):
feed = self.feed_fp16
feed = self.feed_fp16 if self.attrs[
'enable_fp16'] else self.feed_fp32 result = []
for i in range(run_steps): run_steps = self.attrs['steps'] if save_otherwise_load \
tmp = exe.run(program, feed=feed, fetch_list=fetch_list) else self.attrs['steps'] - self.attrs['save_at_step']
for i in range(run_steps):
if save_otherwise_load and \ tmp = exe.run(program, feed=feed, fetch_list=self.fetch_list)
i == self.attrs['save_at_step'] - 1:
ipu_program._backend.weights_to_host() if save_otherwise_load and \
paddle.static.save(main_prog, i == self.attrs['save_at_step'] - 1:
self.attrs['model_path'].name) ipu_compiler._backend.weights_to_host()
paddle.static.save(self.main_prog,
if save_otherwise_load and i >= self.attrs['save_at_step']: self.attrs['model_path'].name)
result.append(tmp)
elif not save_otherwise_load: if save_otherwise_load and i >= self.attrs['save_at_step']:
result.append(tmp) result.append(tmp)
elif not save_otherwise_load:
return np.asarray(result).flatten() result.append(tmp)
return np.asarray(result)
def test_base(self): def test_base(self):
res0 = self._test_base(True) res0 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP32, True)
res1 = self._test_base(False) res1 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP32, False)
self.assertTrue( self.assertTrue(
np.allclose(res0.flatten(), res1.flatten(), atol=self.atol)) np.allclose(res0.flatten(), res1.flatten(), atol=self.atol))
self.attrs['model_path'].cleanup() self.attrs['model_path'].cleanup()
...@@ -185,12 +180,18 @@ class TestSGDFP16(TestBase): ...@@ -185,12 +180,18 @@ class TestSGDFP16(TestBase):
self.attrs = {} self.attrs = {}
self.attrs['steps'] = 100 self.attrs['steps'] = 100
self.attrs['save_at_step'] = 20 self.attrs['save_at_step'] = 20
self.attrs['enable_fp16'] = True
self.attrs['model_path'] = tempfile.TemporaryDirectory() self.attrs['model_path'] = tempfile.TemporaryDirectory()
def set_optimizer(self): def set_optimizer(self):
self.optimizer = partial(paddle.optimizer.SGD, learning_rate=1e-1) self.optimizer = partial(paddle.optimizer.SGD, learning_rate=1e-1)
def test_base(self):
res0 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP16, True)
res1 = self.run_model(IPUOpTest.ExecutionMode.IPU_FP16, False)
self.assertTrue(
np.allclose(res0.flatten(), res1.flatten(), atol=self.atol))
self.attrs['model_path'].cleanup()
class TestMomentumFp16(TestSGDFP16): class TestMomentumFp16(TestSGDFP16):
......
...@@ -28,6 +28,10 @@ class TestBase(IPUOpTest): ...@@ -28,6 +28,10 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_attrs() self.set_attrs()
@property
def fp16_enabled(self):
return False
def set_training(self): def set_training(self):
self.is_training = True self.is_training = True
self.epoch = 100 self.epoch = 100
......
...@@ -83,7 +83,7 @@ class TestBase(IPUOpTest): ...@@ -83,7 +83,7 @@ class TestBase(IPUOpTest):
loss = paddle.mean(conv1) loss = paddle.mean(conv1)
opt = paddle.optimizer.Lamb( opt = paddle.optimizer.Lamb(
learning_rate=1e-1, learning_rate=1e-3,
lamb_weight_decay=self.attrs['weight_decay'], lamb_weight_decay=self.attrs['weight_decay'],
exclude_from_weight_decay_fn=exclude_fn) exclude_from_weight_decay_fn=exclude_fn)
opt.minimize(loss) opt.minimize(loss)
......
...@@ -29,6 +29,12 @@ class TestBase(IPUOpTest): ...@@ -29,6 +29,12 @@ class TestBase(IPUOpTest):
self.set_feed_attr() self.set_feed_attr()
self.set_op_attrs() self.set_op_attrs()
def set_atol(self):
self.atol = 1e-6
self.rtol = 1e-6
self.atol_fp16 = 1e-2
self.rtol_fp16 = 1e-2
def set_data_feed(self): def set_data_feed(self):
data = np.random.uniform(size=[1, 255, 13, 13]) data = np.random.uniform(size=[1, 255, 13, 13])
self.feed_fp32 = {"in_0": data.astype(np.float32)} self.feed_fp32 = {"in_0": data.astype(np.float32)}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册