未验证 提交 a419fe10 编写于 作者: W whs 提交者: GitHub

Make unitest support for Paddle2.0 API (#475)

* Make unitest support for Paddle2.0 API.

* Fix demo in static mode

* Remove unused lines

* Fix demo of darts
上级 8e9af24c
......@@ -152,8 +152,7 @@ def main(args):
count_parameters_in_MB(model.parameters())))
device_num = fluid.dygraph.parallel.Env().nranks
step_per_epoch = int(args.trainset_num /
(args.batch_size * device_num))
step_per_epoch = int(args.trainset_num / (args.batch_size * device_num))
learning_rate = fluid.dygraph.CosineDecay(args.learning_rate,
step_per_epoch, args.epochs)
clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=args.grad_clip)
......
......@@ -102,8 +102,8 @@ def compress(args):
raise ValueError("{} is not supported.".format(args.data))
image_shape = [int(m) for m in image_shape.split(",")]
assert args.model in model_list, "{} is not in lists: {}".format(
args.model, model_list)
assert args.model in model_list, "{} is not in lists: {}".format(args.model,
model_list)
student_program = fluid.Program()
s_startup = fluid.Program()
......@@ -234,4 +234,5 @@ def main():
if __name__ == '__main__':
paddle.enable_static()
main()
......@@ -203,7 +203,7 @@ def search_mobilenetv2_block(config, args, image_size):
if __name__ == '__main__':
paddle.enable_static()
parser = argparse.ArgumentParser(
description='SA NAS MobileNetV2 cifar10 argparase')
parser.add_argument(
......
......@@ -191,7 +191,7 @@ def search_mobilenetv2(config, args, image_size, is_server=True):
if __name__ == '__main__':
paddle.enable_static()
parser = argparse.ArgumentParser(
description='RL NAS MobileNetV2 cifar10 argparase')
parser.add_argument(
......
......@@ -176,7 +176,7 @@ def search_mobilenetv2(config, args, image_size, is_server=True):
if __name__ == '__main__':
paddle.enable_static()
parser = argparse.ArgumentParser(
description='RL NAS MobileNetV2 cifar10 argparase')
parser.add_argument(
......
......@@ -209,9 +209,7 @@ def test_search_result(tokens, image_size, args, config):
drop_last=False)
elif args.data == 'imagenet':
train_reader = paddle.fluid.io.batch(
imagenet_reader.train(),
batch_size=args.batch_size,
drop_last=True)
imagenet_reader.train(), batch_size=args.batch_size, drop_last=True)
test_reader = paddle.fluid.io.batch(
imagenet_reader.val(), batch_size=args.batch_size, drop_last=False)
......@@ -250,8 +248,8 @@ def test_search_result(tokens, image_size, args, config):
_logger.info(
'TEST: batch: {}, avg_cost: {}, acc_top1: {}, acc_top5: {}'.
format(batch_id, batch_reward[0], batch_reward[1],
batch_reward[2]))
format(batch_id, batch_reward[0], batch_reward[1], batch_reward[
2]))
finally_reward = np.mean(np.array(reward), axis=0)
_logger.info(
......@@ -309,5 +307,5 @@ if __name__ == '__main__':
args.data))
config = [('MobileNetV2Space')]
paddle.enable_static()
search_mobilenetv2(config, args, image_size, is_server=args.is_server)
Subproject commit 56c6c3ae0e5c9ae6b9401a9446c629e513d4617f
Subproject commit 2bdaea56566257cf73bd1cbbf834a16f4f7ac4cf
......@@ -114,8 +114,8 @@ def compress(args):
else:
raise ValueError("{} is not supported.".format(args.data))
image_shape = [int(m) for m in image_shape.split(",")]
assert args.model in model_list, "{} is not in lists: {}".format(
args.model, model_list)
assert args.model in model_list, "{} is not in lists: {}".format(args.model,
model_list)
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# model definition
......@@ -135,8 +135,7 @@ def compress(args):
if args.pretrained_model:
def if_exist(var):
return os.path.exists(
os.path.join(args.pretrained_model, var.name))
return os.path.exists(os.path.join(args.pretrained_model, var.name))
_logger.info("Load pretrained model from {}".format(
args.pretrained_model))
......@@ -171,10 +170,9 @@ def compress(args):
acc_top5_ns.append(np.mean(acc_top5_n))
batch_id += 1
_logger.info("Final eval epoch[{}] - acc_top1: {}; acc_top5: {}".
format(epoch,
np.mean(np.array(acc_top1_ns)),
np.mean(np.array(acc_top5_ns))))
_logger.info("Final eval epoch[{}] - acc_top1: {}; acc_top5: {}".format(
epoch,
np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns))))
def train(epoch, program):
......@@ -241,6 +239,7 @@ def compress(args):
def main():
paddle.enable_static()
args = parser.parse_args()
print_arguments(args)
compress(args)
......
......@@ -353,6 +353,7 @@ def compress(args):
def main():
paddle.enable_static()
args = parser.parse_args()
print_arguments(args)
compress(args)
......
......@@ -116,8 +116,8 @@ def compress(args):
raise ValueError("{} is not supported.".format(args.data))
image_shape = [int(m) for m in image_shape.split(",")]
assert args.model in model_list, "{} is not in lists: {}".format(
args.model, model_list)
assert args.model in model_list, "{} is not in lists: {}".format(args.model,
model_list)
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# model definition
......@@ -154,8 +154,7 @@ def compress(args):
if args.pretrained_model:
def if_exist(var):
return os.path.exists(
os.path.join(args.pretrained_model, var.name))
return os.path.exists(os.path.join(args.pretrained_model, var.name))
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
......@@ -188,10 +187,9 @@ def compress(args):
acc_top5_ns.append(np.mean(acc_top5_n))
batch_id += 1
_logger.info("Final eval epoch[{}] - acc_top1: {}; acc_top5: {}".
format(epoch,
np.mean(np.array(acc_top1_ns)),
np.mean(np.array(acc_top5_ns))))
_logger.info("Final eval epoch[{}] - acc_top1: {}; acc_top5: {}".format(
epoch,
np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns))))
return np.mean(np.array(acc_top1_ns))
def train(epoch, compiled_train_prog):
......@@ -290,6 +288,7 @@ def compress(args):
def main():
paddle.enable_static()
args = parser.parse_args()
print_arguments(args)
compress(args)
......
......@@ -51,8 +51,7 @@ def parse_args():
'--use_cuda', type=int, default='0', help='whether use cuda')
parser.add_argument(
'--batch_size', type=int, default='5', help='batch_size')
parser.add_argument(
'--emb_size', type=int, default='64', help='batch_size')
parser.add_argument('--emb_size', type=int, default='64', help='batch_size')
parser.add_argument(
'--emb_quant',
type=bool,
......@@ -76,9 +75,7 @@ def infer_epoch(args, vocab_size, test_reader, use_cuda, i2w):
copy_program = main_program.clone()
model_path = model_dir + "/pass-" + str(epoch)
fluid.io.load_params(
executor=exe,
dirname=model_path,
main_program=copy_program)
executor=exe, dirname=model_path, main_program=copy_program)
if args.emb_quant:
config = {
'quantize_op_types': 'lookup_table',
......@@ -208,6 +205,7 @@ def infer_step(args, vocab_size, test_reader, use_cuda, i2w):
if __name__ == "__main__":
paddle.enable_static()
args = parse_args()
start_index = args.start_index
last_index = args.last_index
......
......@@ -194,8 +194,8 @@ def train(args):
os.mkdir(args.model_output_dir)
filelist = GetFileList(args.train_data_dir)
word2vec_reader = reader.Word2VecReader(
args.dict_path, args.train_data_dir, filelist, 0, 1)
word2vec_reader = reader.Word2VecReader(args.dict_path, args.train_data_dir,
filelist, 0, 1)
logger.info("dict_size: {}".format(word2vec_reader.dict_size))
np_power = np.power(np.array(word2vec_reader.id_frequencys), 0.75)
......@@ -224,5 +224,6 @@ def train(args):
if __name__ == '__main__':
paddle.enable_static()
args = parse_args()
train(args)
......@@ -87,6 +87,7 @@ def eval(args):
def main():
paddle.enable_static()
args = parser.parse_args()
print_arguments(args)
eval(args)
......
......@@ -48,8 +48,8 @@ def export_model(args):
image_shape = [int(m) for m in image_shape.split(",")]
image = fluid.data(
name='image', shape=[None] + image_shape, dtype='float32')
assert args.model in model_list, "{} is not in lists: {}".format(
args.model, model_list)
assert args.model in model_list, "{} is not in lists: {}".format(args.model,
model_list)
# model definition
model = models.__dict__[args.model]()
out = model.net(input=image, class_dim=class_dim)
......@@ -61,8 +61,7 @@ def export_model(args):
if args.pretrained_model:
def if_exist(var):
return os.path.exists(
os.path.join(args.pretrained_model, var.name))
return os.path.exists(os.path.join(args.pretrained_model, var.name))
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
else:
......@@ -85,4 +84,5 @@ def main():
if __name__ == '__main__':
paddle.enable_static()
main()
......@@ -57,4 +57,5 @@ def main():
if __name__ == '__main__':
paddle.enable_static()
main()
import unittest
import paddle
class StaticCase(unittest.TestCase):
def __init__(self, name):
super(StaticCase, self).__init__()
paddle.enable_static()
def runTest(self):
pass
......@@ -17,10 +17,11 @@ import unittest
import paddle.fluid as fluid
from paddleslim.prune import Pruner
from paddleslim.prune import AutoPruner
from static_case import StaticCase
from layers import conv_bn_layer
class TestPrune(unittest.TestCase):
class TestPrune(StaticCase):
def test_prune(self):
main_program = fluid.Program()
startup_program = fluid.Program()
......
......@@ -17,6 +17,7 @@ import os
import time
import signal
import unittest
from static_case import StaticCase
import paddle.fluid as fluid
from paddleslim.nas import SANAS
from paddleslim.common.controller_client import ControllerClient
......@@ -44,7 +45,7 @@ def start_server(configs, port):
return server_sanas
class TestClientConnect(unittest.TestCase):
class TestClientConnect(StaticCase):
def setUp(self):
self.configs = [('MobileNetV2BlockSpace', {'block_mask': [0]})]
self.port = np.random.randint(8337, 8773)
......@@ -58,7 +59,7 @@ class TestClientConnect(unittest.TestCase):
start_server(self.configs, self.port)
class TestClientConnectCase1(unittest.TestCase):
class TestClientConnectCase1(StaticCase):
def setUp(self):
self.configs = [('MobileNetV2BlockSpace', {'block_mask': [0]})]
self.port = np.random.randint(8337, 8773)
......@@ -74,7 +75,7 @@ class TestClientConnectCase1(unittest.TestCase):
os.kill(os.getpid(), 0)
class TestClientConnectCase2(unittest.TestCase):
class TestClientConnectCase2(StaticCase):
def setUp(self):
self.port = np.random.randint(8337, 8773)
self.addr = socket.gethostbyname(socket.gethostname())
......
......@@ -17,11 +17,12 @@ import paddle
import unittest
import paddle.fluid as fluid
import numpy as np
from static_case import StaticCase
from paddleslim.nas.darts import DARTSearch
from layers import conv_bn_layer
class TestDARTS(unittest.TestCase):
class TestDARTS(StaticCase):
def test_darts(self):
class SuperNet(fluid.dygraph.Layer):
def __init__(self):
......
......@@ -17,6 +17,7 @@ import unittest
import logging
import numpy as np
import paddle
from static_case import StaticCase
import paddle.fluid as fluid
import paddle.dataset.mnist as reader
from paddle.fluid.dygraph.base import to_variable
......@@ -49,7 +50,7 @@ class Model(fluid.dygraph.Layer):
return y
class TestDML(unittest.TestCase):
class TestDML(StaticCase):
def test_dml(self):
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
......
......@@ -17,11 +17,12 @@ import unittest
import paddle
from paddleslim.nas import SANAS
from paddleslim.nas.early_stop import MedianStop
from static_case import StaticCase
steps = 5
epochs = 5
class TestMedianStop(unittest.TestCase):
class TestMedianStop(StaticCase):
def test_median_stop(self):
config = [('MobileNetV2Space')]
sanas = SANAS(config, server_addr=("", 8732), save_checkpoint=None)
......
......@@ -17,9 +17,10 @@ import unittest
import paddle.fluid as fluid
from paddleslim.analysis import flops
from layers import conv_bn_layer
from static_case import StaticCase
class TestPrune(unittest.TestCase):
class TestPrune(StaticCase):
def test_prune(self):
main_program = fluid.Program()
startup_program = fluid.Program()
......
......@@ -17,9 +17,10 @@ import unittest
import paddle.fluid as fluid
from paddleslim.prune import Pruner
from layers import conv_bn_layer
from static_case import StaticCase
class TestPrune(unittest.TestCase):
class TestPrune(StaticCase):
def test_prune(self):
main_program = fluid.Program()
startup_program = fluid.Program()
......
......@@ -17,9 +17,10 @@ import unittest
import paddle.fluid as fluid
from paddleslim.dist import merge, fsp_loss
from layers import conv_bn_layer
from static_case import StaticCase
class TestFSPLoss(unittest.TestCase):
class TestFSPLoss(StaticCase):
def test_fsp_loss(self):
student_main = fluid.Program()
student_startup = fluid.Program()
......
......@@ -17,9 +17,10 @@ import unittest
import paddle.fluid as fluid
from layers import conv_bn_layer
from paddleslim.prune import collect_convs
from static_case import StaticCase
class TestPrune(unittest.TestCase):
class TestPrune(StaticCase):
def test_prune(self):
main_program = fluid.Program()
startup_program = fluid.Program()
......
......@@ -14,17 +14,19 @@
import sys
sys.path.append("../")
import unittest
import paddle
import paddle.fluid as fluid
from static_case import StaticCase
from paddleslim.dist import merge, l2_loss
from layers import conv_bn_layer
class TestL2Loss(unittest.TestCase):
class TestL2Loss(StaticCase):
def test_l2_loss(self):
student_main = fluid.Program()
student_startup = fluid.Program()
with fluid.program_guard(student_main, student_startup):
input = fluid.data(name="image", shape=[None, 3, 224, 224])
input = paddle.data(name="image", shape=[None, 3, 224, 224])
conv1 = conv_bn_layer(input, 8, 3, "conv1")
conv2 = conv_bn_layer(conv1, 8, 3, "conv2")
student_predict = conv1 + conv2
......@@ -32,7 +34,7 @@ class TestL2Loss(unittest.TestCase):
teacher_main = fluid.Program()
teacher_startup = fluid.Program()
with fluid.program_guard(teacher_main, teacher_startup):
input = fluid.data(name="image", shape=[None, 3, 224, 224])
input = paddle.data(name="image", shape=[None, 3, 224, 224])
conv1 = conv_bn_layer(input, 8, 3, "conv1")
conv2 = conv_bn_layer(conv1, 8, 3, "conv2")
sum1 = conv1 + conv2
......
......@@ -17,9 +17,10 @@ import unittest
import paddle.fluid as fluid
from paddleslim.dist import merge, loss
from layers import conv_bn_layer
from static_case import StaticCase
class TestLoss(unittest.TestCase):
class TestLoss(StaticCase):
def test_loss(self):
student_main = fluid.Program()
student_startup = fluid.Program()
......
......@@ -17,9 +17,10 @@ import unittest
import paddle.fluid as fluid
from paddleslim.dist import merge
from layers import conv_bn_layer
from static_case import StaticCase
class TestMerge(unittest.TestCase):
class TestMerge(StaticCase):
def test_merge(self):
student_main = fluid.Program()
student_startup = fluid.Program()
......
......@@ -17,9 +17,10 @@ import unittest
import paddle.fluid as fluid
from paddleslim.analysis import model_size
from layers import conv_bn_layer
from static_case import StaticCase
class TestPrune(unittest.TestCase):
class TestPrune(StaticCase):
def test_prune(self):
main_program = fluid.Program()
startup_program = fluid.Program()
......
......@@ -17,6 +17,7 @@ sys.path.append("../")
import numpy as np
import unittest
import paddle
from static_case import StaticCase
import paddle.fluid as fluid
import paddle.fluid.dygraph.nn as nn
from paddle.nn import ReLU
......@@ -119,7 +120,7 @@ class ModelLinear(fluid.dygraph.Layer):
return inputs
class TestOFA(unittest.TestCase):
class TestOFA(StaticCase):
def setUp(self):
fluid.enable_dygraph()
self.init_model_and_data()
......
......@@ -17,9 +17,10 @@ import unittest
import paddle.fluid as fluid
from paddleslim.prune import Pruner
from layers import conv_bn_layer
from static_case import StaticCase
class TestPrune(unittest.TestCase):
class TestPrune(StaticCase):
def test_prune(self):
main_program = fluid.Program()
startup_program = fluid.Program()
......
......@@ -14,12 +14,14 @@
import sys
sys.path.append("../")
import unittest
from static_case import StaticCase
import paddle.fluid as fluid
from paddleslim.prune import Pruner
from static_case import StaticCase
from layers import conv_bn_layer
class TestPrune(unittest.TestCase):
class TestPrune(StaticCase):
def test_prune(self):
main_program = fluid.Program()
startup_program = fluid.Program()
......
......@@ -18,12 +18,13 @@ import numpy as np
import paddle
import paddle.fluid as fluid
from paddleslim.prune import Pruner
from static_case import StaticCase
from layers import conv_bn_layer
import random
from paddleslim.core import GraphWrapper
class TestPrune(unittest.TestCase):
class TestPrune(StaticCase):
def test_prune(self):
main_program = fluid.Program()
startup_program = fluid.Program()
......
......@@ -17,11 +17,12 @@ import unittest
import paddle.fluid as fluid
from paddleslim.prune import Pruner, save_model, load_model
from layers import conv_bn_layer
from static_case import StaticCase
import numpy as np
import numpy
class TestSaveAndLoad(unittest.TestCase):
class TestSaveAndLoad(StaticCase):
def test_prune(self):
train_program = fluid.Program()
startup_program = fluid.Program()
......
......@@ -17,6 +17,7 @@ import unittest
import paddle
import paddle.fluid as fluid
from paddleslim.quant import quant_aware, convert
from static_case import StaticCase
sys.path.append("../demo")
from models import MobileNet
from layers import conv_bn_layer
......@@ -26,7 +27,7 @@ from paddle.fluid import core
import numpy as np
class TestQuantAwareCase1(unittest.TestCase):
class TestQuantAwareCase1(StaticCase):
def get_model(self):
image = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
......@@ -88,7 +89,7 @@ class TestQuantAwareCase1(unittest.TestCase):
self.assertTrue(convert_quant_op_nums_1 - 2 == convert_quant_op_nums_2)
class TestQuantAwareCase2(unittest.TestCase):
class TestQuantAwareCase2(StaticCase):
def test_accuracy(self):
image = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
......@@ -140,9 +141,8 @@ class TestQuantAwareCase2(unittest.TestCase):
fetch_list=[avg_cost, acc_top1, acc_top5])
iter += 1
if iter % 100 == 0:
print(
'eval iter={}, avg loss {}, acc_top1 {}, acc_top5 {}'.
format(iter, cost, top1, top5))
print('eval iter={}, avg loss {}, acc_top1 {}, acc_top5 {}'.
format(iter, cost, top1, top5))
result[0].append(cost)
result[1].append(top1)
result[2].append(top5)
......@@ -158,8 +158,7 @@ class TestQuantAwareCase2(unittest.TestCase):
'activation_quantize_type': 'moving_average_abs_max',
'quantize_op_types': ['depthwise_conv2d', 'mul', 'conv2d'],
}
quant_train_prog = quant_aware(
main_prog, place, config, for_test=False)
quant_train_prog = quant_aware(main_prog, place, config, for_test=False)
quant_eval_prog = quant_aware(val_prog, place, config, for_test=True)
train(quant_train_prog)
quant_eval_prog, int8_prog = convert(
......
......@@ -17,6 +17,7 @@ import unittest
import paddle
import paddle.fluid as fluid
from paddleslim.quant import quant_aware, convert
from static_case import StaticCase
sys.path.append("../demo")
from models import MobileNet
from layers import conv_bn_layer
......@@ -37,8 +38,7 @@ def pact(x, name=None):
initializer=fluid.initializer.ConstantInitializer(value=init_thres),
regularizer=fluid.regularizer.L2Decay(0.0001),
learning_rate=1)
u_param = helper.create_parameter(
attr=u_param_attr, shape=[1], dtype=dtype)
u_param = helper.create_parameter(attr=u_param_attr, shape=[1], dtype=dtype)
x = fluid.layers.elementwise_sub(
x, fluid.layers.relu(fluid.layers.elementwise_sub(x, u_param)))
x = fluid.layers.elementwise_add(
......@@ -51,7 +51,7 @@ def get_optimizer():
return fluid.optimizer.MomentumOptimizer(0.0001, 0.9)
class TestQuantAwareCase1(unittest.TestCase):
class TestQuantAwareCase1(StaticCase):
def get_model(self):
image = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
......@@ -116,9 +116,8 @@ class TestQuantAwareCase1(unittest.TestCase):
fetch_list=[avg_cost, acc_top1, acc_top5])
iter += 1
if iter % 100 == 0:
print(
'eval iter={}, avg loss {}, acc_top1 {}, acc_top5 {}'.
format(iter, cost, top1, top5))
print('eval iter={}, avg loss {}, acc_top1 {}, acc_top5 {}'.
format(iter, cost, top1, top5))
result[0].append(cost)
result[1].append(top1)
result[2].append(top5)
......
......@@ -17,8 +17,10 @@ import paddle.fluid as fluid
import paddleslim.quant as quant
import unittest
from static_case import StaticCase
class TestQuantEmbedding(unittest.TestCase):
class TestQuantEmbedding(StaticCase):
def test_quant_embedding(self):
train_program = fluid.Program()
with fluid.program_guard(train_program):
......
......@@ -17,6 +17,7 @@ import unittest
import paddle
import paddle.fluid as fluid
from paddleslim.quant import quant_post_static
from static_case import StaticCase
sys.path.append("../demo")
from models import MobileNet
from layers import conv_bn_layer
......@@ -26,7 +27,7 @@ from paddle.fluid import core
import numpy as np
class TestQuantAwareCase1(unittest.TestCase):
class TestQuantAwareCase1(StaticCase):
def test_accuracy(self):
image = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
......@@ -77,9 +78,8 @@ class TestQuantAwareCase1(unittest.TestCase):
fetch_list=outputs)
iter += 1
if iter % 100 == 0:
print(
'eval iter={}, avg loss {}, acc_top1 {}, acc_top5 {}'.
format(iter, cost, top1, top5))
print('eval iter={}, avg loss {}, acc_top1 {}, acc_top5 {}'.
format(iter, cost, top1, top5))
result[0].append(cost)
result[1].append(top1)
result[2].append(top5)
......
......@@ -17,6 +17,7 @@ import unittest
import paddle
import paddle.fluid as fluid
from paddleslim.quant import quant_post_dynamic
from static_case import StaticCase
sys.path.append("../demo")
from models import MobileNet
from layers import conv_bn_layer
......@@ -26,7 +27,7 @@ from paddle.fluid import core
import numpy as np
class TestQuantPostOnlyWeightCase1(unittest.TestCase):
class TestQuantPostOnlyWeightCase1(StaticCase):
def test_accuracy(self):
image = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
......
......@@ -17,6 +17,7 @@ import unittest
import paddle.fluid as fluid
from paddleslim.nas import RLNAS
from paddleslim.analysis import flops
from static_case import StaticCase
import numpy as np
......@@ -31,7 +32,7 @@ def compute_op_num(program):
return params, ch_list
class TestRLNAS(unittest.TestCase):
class TestRLNAS(StaticCase):
def setUp(self):
self.init_test_case()
port = np.random.randint(8337, 8773)
......
......@@ -17,6 +17,7 @@ import os
import sys
import unittest
import paddle.fluid as fluid
from static_case import StaticCase
from paddleslim.nas import SANAS
from paddleslim.analysis import flops
import numpy as np
......@@ -33,7 +34,7 @@ def compute_op_num(program):
return params, ch_list
class TestSANAS(unittest.TestCase):
class TestSANAS(StaticCase):
def setUp(self):
self.init_test_case()
port = np.random.randint(8337, 8773)
......
......@@ -17,11 +17,12 @@ import unittest
import numpy
import paddle
import paddle.fluid as fluid
from static_case import StaticCase
from paddleslim.prune import sensitivity, merge_sensitive, load_sensitivities
from layers import conv_bn_layer
class TestSensitivity(unittest.TestCase):
class TestSensitivity(StaticCase):
def test_sensitivity(self):
main_program = fluid.Program()
startup_program = fluid.Program()
......
......@@ -17,9 +17,10 @@ import unittest
import paddle.fluid as fluid
from paddleslim.prune import Pruner
from layers import conv_bn_layer
from static_case import StaticCase
class TestPrune(unittest.TestCase):
class TestPrune(StaticCase):
def test_prune(self):
main_program = fluid.Program()
startup_program = fluid.Program()
......
......@@ -17,9 +17,10 @@ import unittest
import paddle.fluid as fluid
from paddleslim.dist import merge, soft_label_loss
from layers import conv_bn_layer
from static_case import StaticCase
class TestSoftLabelLoss(unittest.TestCase):
class TestSoftLabelLoss(StaticCase):
def test_soft_label_loss(self):
student_main = fluid.Program()
student_startup = fluid.Program()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册