diff --git a/docs/en_US/tutorials/prenet.md b/docs/en_US/tutorials/prenet.md index dff8dc3d723f3c438649dd1b5a561e4f625ab36b..fbe141c6bd4b519107bdd533fc8e6c7cb3b1edb0 100644 --- a/docs/en_US/tutorials/prenet.md +++ b/docs/en_US/tutorials/prenet.md @@ -41,7 +41,6 @@ . ``` - ### 2.2 Train/Test @@ -70,6 +69,7 @@ Output: ## 4 Model Download + | model | dataset | |---|---| | [PReNet](https://paddlegan.bj.bcebos.com/models/PReNet.pdparams) | [RainH.zip](https://pan.baidu.com/s/1_vxCatOV3sOA6Vkx1l23eA?pwd=vitu) | @@ -77,6 +77,7 @@ Output: + # References - 1. [Progressive Image Deraining Networks: A Better and Simpler Baseline](https://arxiv.org/pdf/1901.09221v3.pdf) @@ -89,4 +90,4 @@ Output: booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, year={2019}, } -``` + diff --git a/test_tipc/configs/PReNet/train_infer_python.txt b/test_tipc/configs/PReNet/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..cf30f759816db817f9a63d5ea9e9b96e2b975e44 --- /dev/null +++ b/test_tipc/configs/PReNet/train_infer_python.txt @@ -0,0 +1,59 @@ +===========================train_params=========================== +model_name:prenet +python:python3.7 +gpu_list:0 +## +auto_cast:null +total_iters:lite_train_lite_infer=10|lite_train_whole_infer=10|whole_train_whole_infer=200 +output_dir:./output/ +dataset.train.batch_size:lite_train_lite_infer=1|whole_train_whole_infer=1 +pretrained_model:null +train_model_name:prenet*/*checkpoint.pdparams +train_infer_img_dir:./data/prenet/test +null:null +## +trainer:norm_train +norm_train:tools/main.py -c configs/prenet.yaml --seed 123 -o dataset.train.num_workers=0 log_config.interval=1 snapshot_config.interval=5 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +--output_dir:./output/ +load:null +norm_export:tools/export_model.py -c configs/prenet.yaml --inputs_size="-1,3,-1,-1" --model_name inference --load +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +inference_dir:inference +train_model:./inference/prenet/prenet_generator +infer_export:null +infer_quant:False +inference:tools/inference.py --model_type prenet -c configs/prenet.yaml --seed 123 --output_path test_tipc/output/ +--device:gpu +null:null +null:null +null:null +null:null +null:null +--model_path: +null:null +null:null +--benchmark:True +null:null +===========================train_benchmark_params========================== +batch_size:2|4 +fp_items:fp32 +total_iters:50 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[6,3,180,320]}] diff --git a/tools/inference.py b/tools/inference.py index 0f79ebcb26def517518c645be5bea6a0c6c41237..6ee2108adbc98c164645f61113df2db4a959d29c 100644 --- a/tools/inference.py +++ b/tools/inference.py @@ -313,6 +313,22 @@ def main(): metric_file = os.path.join(args.output_path, "singan/metric.txt") for metric in metrics.values(): metric.update(prediction, data['A']) + elif model_type == "prenet": + lq = data['lq'].numpy() + gt = data['gt'].numpy() + input_handles[0].copy_from_cpu(lq) + predictor.run() + prediction = output_handle.copy_to_cpu() + prediction = paddle.to_tensor(prediction) + gt = paddle.to_tensor(gt) + image_numpy = tensor2img(prediction, min_max) + gt_img = tensor2img(gt, min_max) + save_image( + image_numpy, + os.path.join(args.output_path, "prenet/{}.png".format(i))) + metric_file = os.path.join(args.output_path, "prenet/metric.txt") + for metric in metrics.values(): + metric.update(image_numpy, gt_img) elif model_type == "prenet": lq = data['lq'].numpy()