未验证 提交 60cf9b50 编写于 作者: 6 6clc 提交者: GitHub

test(prim-cinn): split test_resnet and test_bert into three tests (#53723)

* test(prim-cinn): split test_resnet and test_bert into three tests

* test(prim-cinn): fix cmake file to run prim test in CINN-CI
上级 772b4906
...@@ -8,12 +8,21 @@ foreach(TEST_OP ${TEST_OPS}) ...@@ -8,12 +8,21 @@ foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP} ENVS ${GC_ENVS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP} ENVS ${GC_ENVS})
endforeach() endforeach()
set_tests_properties(test_resnet_prim_cinn PROPERTIES TIMEOUT 850) set_tests_properties(test_resnet_prim PROPERTIES TIMEOUT 850)
set_tests_properties(test_bert_prim_cinn PROPERTIES TIMEOUT 500) set_tests_properties(test_bert_prim PROPERTIES TIMEOUT 500)
set_tests_properties(test_prim_simplenet_cinn PROPERTIES TIMEOUT 120) set_tests_properties(test_prim_simplenet_cinn PROPERTIES TIMEOUT 120)
if(WITH_CINN) if(WITH_CINN)
set_tests_properties(test_resnet_cinn PROPERTIES TIMEOUT 850)
set_tests_properties(test_resnet_prim_cinn PROPERTIES TIMEOUT 850)
set_tests_properties(test_bert_cinn PROPERTIES TIMEOUT 500)
set_tests_properties(test_bert_prim_cinn PROPERTIES TIMEOUT 500)
set_tests_properties(test_resnet_prim PROPERTIES LABELS "RUN_TYPE=CINN")
set_tests_properties(test_resnet_cinn PROPERTIES LABELS "RUN_TYPE=CINN")
set_tests_properties(test_resnet_prim_cinn PROPERTIES LABELS "RUN_TYPE=CINN") set_tests_properties(test_resnet_prim_cinn PROPERTIES LABELS "RUN_TYPE=CINN")
set_tests_properties(test_bert_prim PROPERTIES LABELS "RUN_TYPE=CINN")
set_tests_properties(test_bert_cinn PROPERTIES LABELS "RUN_TYPE=CINN")
set_tests_properties(test_bert_prim_cinn PROPERTIES LABELS "RUN_TYPE=CINN") set_tests_properties(test_bert_prim_cinn PROPERTIES LABELS "RUN_TYPE=CINN")
set_tests_properties(test_prim_simplenet_cinn PROPERTIES LABELS set_tests_properties(test_prim_simplenet_cinn PROPERTIES LABELS
"RUN_TYPE=CINN") "RUN_TYPE=CINN")
......
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import unittest
import numpy as np
from bert import Bert, BertPretrainingCriterion, create_pretraining_dataset
import paddle
from paddle import fluid
from paddle.dataset.common import DATA_HOME, download
from paddle.fluid import core
SEED = 2023
BATCH_SIZE = 2
URL = 'https://paddle-ci.gz.bcebos.com/prim_cinn/bert_training_data.npz'
MODULE_NAME = 'test_bert_prim_cinn'
MD5SUM = '71e730ee8d7aa77a215b7e898aa089af'
SAVE_NAME = 'bert_training_data.npz'
DY2ST_CINN_GT = [
10.649632453918457,
10.333406448364258,
10.33541202545166,
10.260543823242188,
10.219606399536133,
10.176884651184082,
10.124699592590332,
10.072620391845703,
10.112163543701172,
9.969393730163574,
]
if core.is_compiled_with_cuda():
paddle.set_flags({'FLAGS_cudnn_deterministic': True})
def train(to_static, enable_prim, enable_cinn):
if core.is_compiled_with_cuda():
paddle.set_device('gpu')
else:
paddle.set_device('cpu')
fluid.core._set_prim_all_enabled(enable_prim)
np.random.seed(SEED)
paddle.seed(SEED)
# paddle.framework.random._manual_program_seed(SEED)
train_data_loader = create_pretraining_dataset(
os.path.join(DATA_HOME, MODULE_NAME, SAVE_NAME),
20,
{},
batch_size=BATCH_SIZE,
worker_init=None,
)
# Now only apply dy2st for encoder
bert = Bert(to_static, enable_cinn)
criterion = BertPretrainingCriterion()
optimizer = fluid.optimizer.Adam(parameter_list=bert.parameters())
losses = []
for step, batch in enumerate(train_data_loader):
start_time = time.time()
(
input_ids,
segment_ids,
input_mask,
masked_lm_positions,
masked_lm_labels,
next_sentence_labels,
masked_lm_scale,
) = batch
prediction_scores, seq_relationship_score = bert(
input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_mask,
masked_positions=masked_lm_positions,
)
loss = criterion(
prediction_scores,
seq_relationship_score,
masked_lm_labels,
next_sentence_labels,
masked_lm_scale,
)
loss.backward()
optimizer.minimize(loss)
bert.clear_gradients()
losses.append(loss.numpy().item())
print(
"step: {}, loss: {}, batch_cost: {:.5}".format(
step,
loss.numpy(),
time.time() - start_time,
)
)
if step >= 9:
break
print(losses)
return losses
class TestBert(unittest.TestCase):
@classmethod
def setUpClass(cls):
download(URL, MODULE_NAME, MD5SUM, SAVE_NAME)
def tearDown(self):
paddle.set_flags({'FLAGS_deny_cinn_ops': ''})
@unittest.skipIf(
not (paddle.is_compiled_with_cinn() and paddle.is_compiled_with_cuda()),
"paddle is not compiled with CINN and CUDA",
)
def test_cinn(self):
paddle.set_flags({'FLAGS_deny_cinn_ops': "dropout"})
dy2st_cinn = train(to_static=True, enable_prim=False, enable_cinn=True)
np.testing.assert_allclose(dy2st_cinn, DY2ST_CINN_GT, rtol=1e-5)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import unittest
import numpy as np
from bert import Bert, BertPretrainingCriterion, create_pretraining_dataset
import paddle
from paddle import fluid
from paddle.dataset.common import DATA_HOME, download
from paddle.fluid import core
SEED = 2023
BATCH_SIZE = 2
URL = 'https://paddle-ci.gz.bcebos.com/prim_cinn/bert_training_data.npz'
MODULE_NAME = 'test_bert_prim_cinn'
MD5SUM = '71e730ee8d7aa77a215b7e898aa089af'
SAVE_NAME = 'bert_training_data.npz'
DY2ST_PRIM_GT = [
11.144556999206543,
10.343620300292969,
10.330279350280762,
10.276118278503418,
10.222086906433105,
10.194628715515137,
10.14902114868164,
10.096250534057617,
10.104615211486816,
9.985644340515137,
]
if core.is_compiled_with_cuda():
paddle.set_flags({'FLAGS_cudnn_deterministic': True})
def train(to_static, enable_prim, enable_cinn):
if core.is_compiled_with_cuda():
paddle.set_device('gpu')
else:
paddle.set_device('cpu')
fluid.core._set_prim_all_enabled(enable_prim)
np.random.seed(SEED)
paddle.seed(SEED)
# paddle.framework.random._manual_program_seed(SEED)
train_data_loader = create_pretraining_dataset(
os.path.join(DATA_HOME, MODULE_NAME, SAVE_NAME),
20,
{},
batch_size=BATCH_SIZE,
worker_init=None,
)
# Now only apply dy2st for encoder
bert = Bert(to_static, enable_cinn)
criterion = BertPretrainingCriterion()
optimizer = fluid.optimizer.Adam(parameter_list=bert.parameters())
losses = []
for step, batch in enumerate(train_data_loader):
start_time = time.time()
(
input_ids,
segment_ids,
input_mask,
masked_lm_positions,
masked_lm_labels,
next_sentence_labels,
masked_lm_scale,
) = batch
prediction_scores, seq_relationship_score = bert(
input_ids=input_ids,
token_type_ids=segment_ids,
attention_mask=input_mask,
masked_positions=masked_lm_positions,
)
loss = criterion(
prediction_scores,
seq_relationship_score,
masked_lm_labels,
next_sentence_labels,
masked_lm_scale,
)
loss.backward()
optimizer.minimize(loss)
bert.clear_gradients()
losses.append(loss.numpy().item())
print(
"step: {}, loss: {}, batch_cost: {:.5}".format(
step,
loss.numpy(),
time.time() - start_time,
)
)
if step >= 9:
break
print(losses)
return losses
class TestBert(unittest.TestCase):
@classmethod
def setUpClass(cls):
download(URL, MODULE_NAME, MD5SUM, SAVE_NAME)
def tearDown(self):
paddle.set_flags({'FLAGS_deny_cinn_ops': ''})
@unittest.skipIf(
not (paddle.is_compiled_with_cinn() and paddle.is_compiled_with_cuda()),
"paddle is not compiled with CINN and CUDA",
)
def test_prim(self):
dy2st_prim = train(to_static=True, enable_prim=True, enable_cinn=False)
np.testing.assert_allclose(dy2st_prim, DY2ST_PRIM_GT, rtol=1e-5)
if __name__ == '__main__':
unittest.main()
...@@ -33,31 +33,6 @@ MD5SUM = '71e730ee8d7aa77a215b7e898aa089af' ...@@ -33,31 +33,6 @@ MD5SUM = '71e730ee8d7aa77a215b7e898aa089af'
SAVE_NAME = 'bert_training_data.npz' SAVE_NAME = 'bert_training_data.npz'
DY2ST_PRIM_GT = [
11.144556999206543,
10.343620300292969,
10.330279350280762,
10.276118278503418,
10.222086906433105,
10.194628715515137,
10.14902114868164,
10.096250534057617,
10.104615211486816,
9.985644340515137,
]
DY2ST_CINN_GT = [
10.649632453918457,
10.333406448364258,
10.33541202545166,
10.260543823242188,
10.219606399536133,
10.176884651184082,
10.124699592590332,
10.072620391845703,
10.112163543701172,
9.969393730163574,
]
DY2ST_PRIM_CINN_GT = [ DY2ST_PRIM_CINN_GT = [
10.976988792419434, 10.976988792419434,
10.345282554626465, 10.345282554626465,
...@@ -155,23 +130,6 @@ class TestBert(unittest.TestCase): ...@@ -155,23 +130,6 @@ class TestBert(unittest.TestCase):
def tearDown(self): def tearDown(self):
paddle.set_flags({'FLAGS_deny_cinn_ops': ''}) paddle.set_flags({'FLAGS_deny_cinn_ops': ''})
@unittest.skipIf(
not (paddle.is_compiled_with_cinn() and paddle.is_compiled_with_cuda()),
"paddle is not compiled with CINN and CUDA",
)
def test_prim(self):
dy2st_prim = train(to_static=True, enable_prim=True, enable_cinn=False)
np.testing.assert_allclose(dy2st_prim, DY2ST_PRIM_GT, rtol=1e-5)
@unittest.skipIf(
not (paddle.is_compiled_with_cinn() and paddle.is_compiled_with_cuda()),
"paddle is not compiled with CINN and CUDA",
)
def test_cinn(self):
paddle.set_flags({'FLAGS_deny_cinn_ops': "dropout"})
dy2st_cinn = train(to_static=True, enable_prim=False, enable_cinn=True)
np.testing.assert_allclose(dy2st_cinn, DY2ST_CINN_GT, rtol=1e-5)
@unittest.skipIf( @unittest.skipIf(
not (paddle.is_compiled_with_cinn() and paddle.is_compiled_with_cuda()), not (paddle.is_compiled_with_cinn() and paddle.is_compiled_with_cuda()),
"paddle is not compiled with CINN and CUDA", "paddle is not compiled with CINN and CUDA",
......
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import numpy as np
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.vision.models import resnet50
SEED = 2020
base_lr = 0.001
momentum_rate = 0.9
l2_decay = 1e-4
batch_size = 2
epoch_num = 1
# In V100, 16G, CUDA 11.2, the results are as follows:
# DY2ST_CINN_GT = [
# 5.847336769104004,
# 8.336246490478516,
# 5.108744144439697,
# 8.316713333129883,
# 8.175262451171875,
# 7.590441703796387,
# 9.895681381225586,
# 8.196207046508789,
# 8.438933372497559,
# 10.305074691772461,
# The results in ci as as follows:
DY2ST_CINN_GT = [
5.828789710998535,
8.340764999389648,
4.998944282531738,
8.474305152893066,
8.09157943725586,
7.440057754516602,
9.907357215881348,
8.304681777954102,
8.383116722106934,
10.120304107666016,
]
if core.is_compiled_with_cuda():
paddle.set_flags({'FLAGS_cudnn_deterministic': True})
def reader_decorator(reader):
def __reader__():
for item in reader():
img = np.array(item[0]).astype('float32').reshape(3, 224, 224)
label = np.array(item[1]).astype('int64').reshape(1)
yield img, label
return __reader__
def optimizer_setting(parameter_list=None):
optimizer = fluid.optimizer.Momentum(
learning_rate=base_lr,
momentum=momentum_rate,
regularization=fluid.regularizer.L2Decay(l2_decay),
parameter_list=parameter_list,
)
return optimizer
def run(model, data_loader, optimizer, mode):
if mode == 'train':
model.train()
end_step = 9
elif mode == 'eval':
model.eval()
end_step = 1
for epoch in range(epoch_num):
total_acc1 = 0.0
total_acc5 = 0.0
total_sample = 0
losses = []
for batch_id, data in enumerate(data_loader()):
start_time = time.time()
img, label = data
pred = model(img)
avg_loss = paddle.nn.functional.cross_entropy(
input=pred,
label=label,
soft_label=False,
reduction='mean',
use_softmax=True,
)
acc_top1 = paddle.static.accuracy(input=pred, label=label, k=1)
acc_top5 = paddle.static.accuracy(input=pred, label=label, k=5)
if mode == 'train':
avg_loss.backward()
optimizer.minimize(avg_loss)
model.clear_gradients()
total_acc1 += acc_top1
total_acc5 += acc_top5
total_sample += 1
losses.append(avg_loss.numpy().item())
end_time = time.time()
print(
"[%s]epoch %d | batch step %d, loss %0.8f, acc1 %0.3f, acc5 %0.3f, time %f"
% (
mode,
epoch,
batch_id,
avg_loss,
total_acc1.numpy() / total_sample,
total_acc5.numpy() / total_sample,
end_time - start_time,
)
)
if batch_id >= end_step:
# avoid dataloader throw abort signaal
data_loader._reset()
break
print(losses)
return losses
def train(to_static, enable_prim, enable_cinn):
if core.is_compiled_with_cuda():
paddle.set_device('gpu')
else:
paddle.set_device('cpu')
np.random.seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
fluid.core._set_prim_all_enabled(enable_prim)
train_reader = paddle.batch(
reader_decorator(paddle.dataset.flowers.train(use_xmap=False)),
batch_size=batch_size,
drop_last=True,
)
data_loader = fluid.io.DataLoader.from_generator(capacity=5, iterable=True)
data_loader.set_sample_list_generator(train_reader)
resnet = resnet50(False)
if to_static:
build_strategy = paddle.static.BuildStrategy()
if enable_cinn:
build_strategy.build_cinn_pass = True
resnet = paddle.jit.to_static(resnet, build_strategy=build_strategy)
optimizer = optimizer_setting(parameter_list=resnet.parameters())
train_losses = run(resnet, data_loader, optimizer, 'train')
if to_static and enable_prim and enable_cinn:
eval_losses = run(resnet, data_loader, optimizer, 'eval')
return train_losses
class TestResnet(unittest.TestCase):
@unittest.skipIf(
not (paddle.is_compiled_with_cinn() and paddle.is_compiled_with_cuda()),
"paddle is not compiled with CINN and CUDA",
)
def test_cinn(self):
dy2st_cinn = train(to_static=True, enable_prim=False, enable_cinn=True)
np.testing.assert_allclose(dy2st_cinn, DY2ST_CINN_GT, rtol=1e-5)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import numpy as np
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.vision.models import resnet50
SEED = 2020
base_lr = 0.001
momentum_rate = 0.9
l2_decay = 1e-4
batch_size = 2
epoch_num = 1
# In V100, 16G, CUDA 11.2, the results are as follows:
# DY2ST_PRIM_GT = [
# 5.8473358154296875,
# 8.354944229125977,
# 5.098367691040039,
# 8.533346176147461,
# 8.179085731506348,
# 7.285282135009766,
# 9.824585914611816,
# 8.56928825378418,
# 8.539499282836914,
# 10.256929397583008,
# ]
# The results in ci as as follows:
DY2ST_PRIM_GT = [
5.82879114151001,
8.33370590209961,
5.091761589050293,
8.776082992553711,
8.274380683898926,
7.546653747558594,
9.607137680053711,
8.27371597290039,
8.429732322692871,
10.362630844116211,
]
if core.is_compiled_with_cuda():
paddle.set_flags({'FLAGS_cudnn_deterministic': True})
def reader_decorator(reader):
def __reader__():
for item in reader():
img = np.array(item[0]).astype('float32').reshape(3, 224, 224)
label = np.array(item[1]).astype('int64').reshape(1)
yield img, label
return __reader__
def optimizer_setting(parameter_list=None):
optimizer = fluid.optimizer.Momentum(
learning_rate=base_lr,
momentum=momentum_rate,
regularization=fluid.regularizer.L2Decay(l2_decay),
parameter_list=parameter_list,
)
return optimizer
def run(model, data_loader, optimizer, mode):
if mode == 'train':
model.train()
end_step = 9
elif mode == 'eval':
model.eval()
end_step = 1
for epoch in range(epoch_num):
total_acc1 = 0.0
total_acc5 = 0.0
total_sample = 0
losses = []
for batch_id, data in enumerate(data_loader()):
start_time = time.time()
img, label = data
pred = model(img)
avg_loss = paddle.nn.functional.cross_entropy(
input=pred,
label=label,
soft_label=False,
reduction='mean',
use_softmax=True,
)
acc_top1 = paddle.static.accuracy(input=pred, label=label, k=1)
acc_top5 = paddle.static.accuracy(input=pred, label=label, k=5)
if mode == 'train':
avg_loss.backward()
optimizer.minimize(avg_loss)
model.clear_gradients()
total_acc1 += acc_top1
total_acc5 += acc_top5
total_sample += 1
losses.append(avg_loss.numpy().item())
end_time = time.time()
print(
"[%s]epoch %d | batch step %d, loss %0.8f, acc1 %0.3f, acc5 %0.3f, time %f"
% (
mode,
epoch,
batch_id,
avg_loss,
total_acc1.numpy() / total_sample,
total_acc5.numpy() / total_sample,
end_time - start_time,
)
)
if batch_id >= end_step:
# avoid dataloader throw abort signaal
data_loader._reset()
break
print(losses)
return losses
def train(to_static, enable_prim, enable_cinn):
if core.is_compiled_with_cuda():
paddle.set_device('gpu')
else:
paddle.set_device('cpu')
np.random.seed(SEED)
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
fluid.core._set_prim_all_enabled(enable_prim)
train_reader = paddle.batch(
reader_decorator(paddle.dataset.flowers.train(use_xmap=False)),
batch_size=batch_size,
drop_last=True,
)
data_loader = fluid.io.DataLoader.from_generator(capacity=5, iterable=True)
data_loader.set_sample_list_generator(train_reader)
resnet = resnet50(False)
if to_static:
build_strategy = paddle.static.BuildStrategy()
if enable_cinn:
build_strategy.build_cinn_pass = True
resnet = paddle.jit.to_static(resnet, build_strategy=build_strategy)
optimizer = optimizer_setting(parameter_list=resnet.parameters())
train_losses = run(resnet, data_loader, optimizer, 'train')
if to_static and enable_prim and enable_cinn:
eval_losses = run(resnet, data_loader, optimizer, 'eval')
return train_losses
class TestResnet(unittest.TestCase):
@unittest.skipIf(
not (paddle.is_compiled_with_cinn() and paddle.is_compiled_with_cuda()),
"paddle is not compiled with CINN and CUDA",
)
def test_prim(self):
dy2st_prim = train(to_static=True, enable_prim=True, enable_cinn=False)
np.testing.assert_allclose(dy2st_prim, DY2ST_PRIM_GT, rtol=1e-5)
if __name__ == '__main__':
unittest.main()
...@@ -30,30 +30,6 @@ batch_size = 2 ...@@ -30,30 +30,6 @@ batch_size = 2
epoch_num = 1 epoch_num = 1
# In V100, 16G, CUDA 11.2, the results are as follows: # In V100, 16G, CUDA 11.2, the results are as follows:
# DY2ST_PRIM_GT = [
# 5.8473358154296875,
# 8.354944229125977,
# 5.098367691040039,
# 8.533346176147461,
# 8.179085731506348,
# 7.285282135009766,
# 9.824585914611816,
# 8.56928825378418,
# 8.539499282836914,
# 10.256929397583008,
# ]
# DY2ST_CINN_GT = [
# 5.847336769104004,
# 8.336246490478516,
# 5.108744144439697,
# 8.316713333129883,
# 8.175262451171875,
# 7.590441703796387,
# 9.895681381225586,
# 8.196207046508789,
# 8.438933372497559,
# 10.305074691772461,
# ]
# DY2ST_PRIM_CINN_GT = [ # DY2ST_PRIM_CINN_GT = [
# 5.8473358154296875, # 5.8473358154296875,
# 8.322463989257812, # 8.322463989257812,
...@@ -68,31 +44,6 @@ epoch_num = 1 ...@@ -68,31 +44,6 @@ epoch_num = 1
# ] # ]
# The results in ci as as follows: # The results in ci as as follows:
DY2ST_PRIM_GT = [
5.82879114151001,
8.33370590209961,
5.091761589050293,
8.776082992553711,
8.274380683898926,
7.546653747558594,
9.607137680053711,
8.27371597290039,
8.429732322692871,
10.362630844116211,
]
DY2ST_CINN_GT = [
5.828789710998535,
8.340764999389648,
4.998944282531738,
8.474305152893066,
8.09157943725586,
7.440057754516602,
9.907357215881348,
8.304681777954102,
8.383116722106934,
10.120304107666016,
]
DY2ST_PRIM_CINN_GT = [ DY2ST_PRIM_CINN_GT = [
5.828786849975586, 5.828786849975586,
8.332868576049805, 8.332868576049805,
...@@ -225,22 +176,6 @@ def train(to_static, enable_prim, enable_cinn): ...@@ -225,22 +176,6 @@ def train(to_static, enable_prim, enable_cinn):
class TestResnet(unittest.TestCase): class TestResnet(unittest.TestCase):
@unittest.skipIf(
not (paddle.is_compiled_with_cinn() and paddle.is_compiled_with_cuda()),
"paddle is not compiled with CINN and CUDA",
)
def test_prim(self):
dy2st_prim = train(to_static=True, enable_prim=True, enable_cinn=False)
np.testing.assert_allclose(dy2st_prim, DY2ST_PRIM_GT, rtol=1e-5)
@unittest.skipIf(
not (paddle.is_compiled_with_cinn() and paddle.is_compiled_with_cuda()),
"paddle is not compiled with CINN and CUDA",
)
def test_cinn(self):
dy2st_cinn = train(to_static=True, enable_prim=False, enable_cinn=True)
np.testing.assert_allclose(dy2st_cinn, DY2ST_CINN_GT, rtol=1e-5)
@unittest.skipIf( @unittest.skipIf(
not (paddle.is_compiled_with_cinn() and paddle.is_compiled_with_cuda()), not (paddle.is_compiled_with_cinn() and paddle.is_compiled_with_cuda()),
"paddle is not compiled with CINN and CUDA", "paddle is not compiled with CINN and CUDA",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册