未验证 提交 daf35241 编写于 作者: L lzzyzlbb 提交者: GitHub

add benchmark_infer (#590)

* add benchmark_infer
上级 0d66d0ac
...@@ -183,7 +183,7 @@ class BaseModel(ABC): ...@@ -183,7 +183,7 @@ class BaseModel(ABC):
for param in net.parameters(): for param in net.parameters():
param.trainable = requires_grad param.trainable = requires_grad
def export_model(self, export_model, output_dir=None, inputs_size=[], export_serving_model=False): def export_model(self, export_model, output_dir=None, inputs_size=[], export_serving_model=False, model_name=None):
inputs_num = 0 inputs_num = 0
for net in export_model: for net in export_model:
input_spec = [ input_spec = [
...@@ -196,11 +196,13 @@ class BaseModel(ABC): ...@@ -196,11 +196,13 @@ class BaseModel(ABC):
input_spec=input_spec) input_spec=input_spec)
if output_dir is None: if output_dir is None:
output_dir = 'inference_model' output_dir = 'inference_model'
if model_name is None:
model_name = '{}_{}'.format(self.__class__.__name__.lower(),
net["name"])
paddle.jit.save( paddle.jit.save(
static_model, static_model,
os.path.join( os.path.join(
output_dir, '{}_{}'.format(self.__class__.__name__.lower(), output_dir, model_name))
net["name"])))
if export_serving_model: if export_serving_model:
from paddle_serving_client.io import inference_model_to_serving from paddle_serving_client.io import inference_model_to_serving
model_name = '{}_{}'.format(self.__class__.__name__.lower(), model_name = '{}_{}'.format(self.__class__.__name__.lower(),
......
...@@ -214,7 +214,7 @@ class FirstOrderModel(BaseModel): ...@@ -214,7 +214,7 @@ class FirstOrderModel(BaseModel):
kp_driving=kp_norm) kp_driving=kp_norm)
return out['prediction'] return out['prediction']
def export_model(self, export_model=None, output_dir=None, inputs_size=[], export_serving_model=False): def export_model(self, export_model=None, output_dir=None, inputs_size=[], export_serving_model=False, model_name=None):
source = paddle.rand(shape=inputs_size[0], dtype='float32') source = paddle.rand(shape=inputs_size[0], dtype='float32')
driving = paddle.rand(shape=inputs_size[1], dtype='float32') driving = paddle.rand(shape=inputs_size[1], dtype='float32')
......
...@@ -310,13 +310,16 @@ class StyleGAN2Model(BaseModel): ...@@ -310,13 +310,16 @@ class StyleGAN2Model(BaseModel):
export_model=None, export_model=None,
output_dir=None, output_dir=None,
inputs_size=[[1, 1, 512], [1, 1]], inputs_size=[[1, 1, 512], [1, 1]],
export_serving_model=False): export_serving_model=False,
model_name=None):
infer_generator = self.InferGenerator() infer_generator = self.InferGenerator()
infer_generator.set_generator(self.nets['gen']) infer_generator.set_generator(self.nets['gen'])
style = paddle.rand(shape=inputs_size[0], dtype='float32') style = paddle.rand(shape=inputs_size[0], dtype='float32')
truncation = paddle.rand(shape=inputs_size[1], dtype='float32') truncation = paddle.rand(shape=inputs_size[1], dtype='float32')
if output_dir is None: if output_dir is None:
output_dir = 'inference_model' output_dir = 'inference_model'
if model_name is None:
model_name = "stylegan2model_gen"
paddle.jit.save(infer_generator, paddle.jit.save(infer_generator,
os.path.join(output_dir, "stylegan2model_gen"), os.path.join(output_dir, model_name),
input_spec=[style, truncation]) input_spec=[style, truncation])
...@@ -4,7 +4,7 @@ python:python3.7 ...@@ -4,7 +4,7 @@ python:python3.7
gpu_list:0|0,1 gpu_list:0|0,1
## ##
auto_cast:null auto_cast:null
epochs:lite_train_lite_infer=1|whole_train_whole_infer=200 epochs:lite_train_lite_infer=1|lite_train_whole_infer=1|whole_train_whole_infer=200
output_dir:./output/ output_dir:./output/
dataset.train.batch_size:lite_train_lite_infer=1|whole_train_whole_infer=1 dataset.train.batch_size:lite_train_lite_infer=1|whole_train_whole_infer=1
pretrained_model:null pretrained_model:null
...@@ -27,13 +27,13 @@ null:null ...@@ -27,13 +27,13 @@ null:null
===========================infer_params=========================== ===========================infer_params===========================
--output_dir:./output/ --output_dir:./output/
load:null load:null
norm_export:tools/export_model.py -c configs/cyclegan_horse2zebra.yaml --inputs_size="-1,3,-1,-1;-1,3,-1,-1" --load norm_export:tools/export_model.py -c configs/cyclegan_horse2zebra.yaml --inputs_size="-1,3,-1,-1;-1,3,-1,-1" --model_name inference --load
quant_export:null quant_export:null
fpgm_export:null fpgm_export:null
distill_export:null distill_export:null
export1:null export1:null
export2:null export2:null
inference_dir:cycleganmodel_netG_A inference_dir:inference
train_model:./inference/cyclegan_horse2zebra/cycleganmodel_netG_A train_model:./inference/cyclegan_horse2zebra/cycleganmodel_netG_A
infer_export:null infer_export:null
infer_quant:False infer_quant:False
...@@ -54,4 +54,6 @@ batch_size:1 ...@@ -54,4 +54,6 @@ batch_size:1
fp_items:fp32 fp_items:fp32
epoch:1 epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file ===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,256,256]}]
\ No newline at end of file
...@@ -4,7 +4,7 @@ python:python3.7 ...@@ -4,7 +4,7 @@ python:python3.7
gpu_list:0 gpu_list:0
## ##
auto_cast:null auto_cast:null
epochs:lite_train_lite_infer=1|whole_train_whole_infer=100 epochs:lite_train_lite_infer=1|lite_train_whole_infer=1|whole_train_whole_infer=100
output_dir:./output/ output_dir:./output/
dataset.train.batch_size:lite_train_lite_infer=8|whole_train_whole_infer=8 dataset.train.batch_size:lite_train_lite_infer=8|whole_train_whole_infer=8
pretrained_model:null pretrained_model:null
......
...@@ -4,7 +4,7 @@ python:python3.7 ...@@ -4,7 +4,7 @@ python:python3.7
gpu_list:0 gpu_list:0
## ##
auto_cast:null auto_cast:null
epochs:lite_train_lite_infer=10|whole_train_whole_infer=200 epochs:lite_train_lite_infer=10|lite_train_whole_infer=10|whole_train_whole_infer=200
output_dir:./output/ output_dir:./output/
dataset.train.batch_size:lite_train_lite_infer=1|whole_train_whole_infer=1 dataset.train.batch_size:lite_train_lite_infer=1|whole_train_whole_infer=1
pretrained_model:null pretrained_model:null
...@@ -27,13 +27,13 @@ null:null ...@@ -27,13 +27,13 @@ null:null
===========================infer_params=========================== ===========================infer_params===========================
--output_dir:./output/ --output_dir:./output/
load:null load:null
norm_export:tools/export_model.py -c configs/pix2pix_facades.yaml --inputs_size="-1,3,-1,-1" --load norm_export:tools/export_model.py -c configs/pix2pix_facades.yaml --inputs_size="-1,3,-1,-1" --model_name inference --load
quant_export:null quant_export:null
fpgm_export:null fpgm_export:null
distill_export:null distill_export:null
export1:null export1:null
export2:null export2:null
inference_dir:pix2pixmodel_netG inference_dir:inference
train_model:./inference/pix2pix_facade/pix2pixmodel_netG train_model:./inference/pix2pix_facade/pix2pixmodel_netG
infer_export:null infer_export:null
infer_quant:False infer_quant:False
...@@ -54,4 +54,6 @@ batch_size:1 ...@@ -54,4 +54,6 @@ batch_size:1
fp_items:fp32 fp_items:fp32
epoch:10 epoch:10
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file ===========================infer_benchmark_params==========================
random_infer_input:[{float32,[3,256,256]}]
...@@ -4,7 +4,7 @@ python:python3.7 ...@@ -4,7 +4,7 @@ python:python3.7
gpu_list:0 gpu_list:0
## ##
auto_cast:null auto_cast:null
total_iters:lite_train_lite_infer=10|whole_train_whole_infer=800 total_iters:lite_train_lite_infer=10|lite_train_whole_infer=10|whole_train_whole_infer=800
output_dir:./output/ output_dir:./output/
dataset.train.batch_size:lite_train_lite_infer=3|whole_train_whole_infer=3 dataset.train.batch_size:lite_train_lite_infer=3|whole_train_whole_infer=3
pretrained_model:null pretrained_model:null
...@@ -27,13 +27,13 @@ null:null ...@@ -27,13 +27,13 @@ null:null
===========================infer_params=========================== ===========================infer_params===========================
--output_dir:./output/ --output_dir:./output/
load:null load:null
norm_export:tools/export_model.py -c configs/stylegan_v2_256_ffhq.yaml --inputs_size="1,1,512;1,1" --load norm_export:tools/export_model.py -c configs/stylegan_v2_256_ffhq.yaml --inputs_size="1,1,512;1,1" --model_name inference --load
quant_export:null quant_export:null
fpgm_export:null fpgm_export:null
distill_export:null distill_export:null
export1:null export1:null
export2:null export2:null
inference_dir:stylegan2model_gen inference_dir:inference
train_model:./inference/stylegan2/stylegan2model_gen train_model:./inference/stylegan2/stylegan2model_gen
infer_export:null infer_export:null
infer_quant:False infer_quant:False
...@@ -54,4 +54,6 @@ batch_size:8|16 ...@@ -54,4 +54,6 @@ batch_size:8|16
fp_items:fp32 fp_items:fp32
epoch:100 epoch:100
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file ===========================infer_benchmark_params==========================
random_infer_input:[{float32,[1,512]}, {float32,[1]}]
\ No newline at end of file
...@@ -4,7 +4,7 @@ python:python3.7 ...@@ -4,7 +4,7 @@ python:python3.7
gpu_list:0 gpu_list:0
## ##
auto_cast:null auto_cast:null
total_iters:lite_train_lite_infer=10|whole_train_whole_infer=200 total_iters:lite_train_lite_infer=10|lite_train_whole_infer=10|whole_train_whole_infer=200
output_dir:./output/ output_dir:./output/
dataset.train.batch_size:lite_train_lite_infer=1|whole_train_whole_infer=1 dataset.train.batch_size:lite_train_lite_infer=1|whole_train_whole_infer=1
pretrained_model:null pretrained_model:null
...@@ -27,13 +27,13 @@ null:null ...@@ -27,13 +27,13 @@ null:null
===========================infer_params=========================== ===========================infer_params===========================
--output_dir:./output/ --output_dir:./output/
load:null load:null
norm_export:tools/export_model.py -c configs/basicvsr_reds.yaml --inputs_size="1,6,3,180,320" --load norm_export:tools/export_model.py -c configs/basicvsr_reds.yaml --inputs_size="1,6,3,180,320" --model_name inference --load
quant_export:null quant_export:null
fpgm_export:null fpgm_export:null
distill_export:null distill_export:null
export1:null export1:null
export2:null export2:null
inference_dir:basicvsrmodel_generator inference_dir:inference
train_model:./inference/basicvsr/basicvsrmodel_generator train_model:./inference/basicvsr/basicvsrmodel_generator
infer_export:null infer_export:null
infer_quant:False infer_quant:False
...@@ -54,4 +54,6 @@ batch_size:2|4 ...@@ -54,4 +54,6 @@ batch_size:2|4
fp_items:fp32 fp_items:fp32
total_iters:50 total_iters:50
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file ===========================infer_benchmark_params==========================
random_infer_input:[{float32,[6,3,180,320]}]
\ No newline at end of file
...@@ -4,7 +4,7 @@ python:python3.7 ...@@ -4,7 +4,7 @@ python:python3.7
gpu_list:0 gpu_list:0
## ##
auto_cast:null auto_cast:null
total_iters:lite_train_lite_infer=10|whole_train_whole_infer=200 total_iters:lite_train_lite_infer=10|lite_train_whole_infer=10|whole_train_whole_infer=200
output_dir:./output/ output_dir:./output/
dataset.train.batch_size:lite_train_lite_infer=1|whole_train_whole_infer=1 dataset.train.batch_size:lite_train_lite_infer=1|whole_train_whole_infer=1
pretrained_model:null pretrained_model:null
...@@ -27,13 +27,13 @@ null:null ...@@ -27,13 +27,13 @@ null:null
===========================infer_params=========================== ===========================infer_params===========================
--output_dir:./output/ --output_dir:./output/
load:null load:null
norm_export:tools/export_model.py -c configs/msvsr_reds.yaml --inputs_size="1,2,3,180,320" --load norm_export:tools/export_model.py -c configs/msvsr_reds.yaml --inputs_size="1,2,3,180,320" --model_name inference --load
quant_export:null quant_export:null
fpgm_export:null fpgm_export:null
distill_export:null distill_export:null
export1:null export1:null
export2:null export2:null
inference_dir:multistagevsrmodel_generator inference_dir:inference
train_model:./inference/msvsr/multistagevsrmodel_generator train_model:./inference/msvsr/multistagevsrmodel_generator
infer_export:null infer_export:null
infer_quant:False infer_quant:False
...@@ -49,3 +49,5 @@ null:null ...@@ -49,3 +49,5 @@ null:null
null:null null:null
--benchmark:True --benchmark:True
null:null null:null
===========================infer_benchmark_params==========================
random_infer_input:[{float32,[2,3,180,320]}]
...@@ -73,7 +73,7 @@ save_log_key=$(func_parser_key "${lines[48]}") ...@@ -73,7 +73,7 @@ save_log_key=$(func_parser_key "${lines[48]}")
infer_key1=$(func_parser_key "${lines[50]}") infer_key1=$(func_parser_key "${lines[50]}")
infer_value1=$(func_parser_value "${lines[50]}") infer_value1=$(func_parser_value "${lines[50]}")
LOG_PATH="./test_tipc/output" LOG_PATH="./test_tipc/output/${model_name}"
mkdir -p ${LOG_PATH} mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_python.log" status_log="${LOG_PATH}/results_python.log"
......
...@@ -57,6 +57,12 @@ def parse_args(): ...@@ -57,6 +57,12 @@ def parse_args():
type=bool, type=bool,
help="export serving model.", help="export serving model.",
) )
parser.add_argument(
"--model_name",
default=None,
type=str,
help="model_name.",
)
args = parser.parse_args() args = parser.parse_args()
return args return args
...@@ -70,7 +76,8 @@ def main(args, cfg): ...@@ -70,7 +76,8 @@ def main(args, cfg):
for net_name, net in model.nets.items(): for net_name, net in model.nets.items():
if net_name in state_dicts: if net_name in state_dicts:
net.set_state_dict(state_dicts[net_name]) net.set_state_dict(state_dicts[net_name])
model.export_model(cfg.export_model, args.output_dir, inputs_size, args.export_serving_model) model.export_model(cfg.export_model, args.output_dir, inputs_size,
args.export_serving_model, args.model_name)
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册