提交 da75ef8b 编写于 作者: L licx

fix conflict

上级 224667b8
......@@ -45,26 +45,10 @@ from ppocr.utils.save_load import init_model
from eval_utils.eval_det_utils import eval_det_run
from eval_utils.eval_rec_utils import test_rec_benchmark
from eval_utils.eval_rec_utils import eval_rec_run
from ppocr.utils.character import CharacterOps
def main():
config = program.load_config(FLAGS.config)
program.merge_config(FLAGS.opt)
logger.info(config)
# check if set use_gpu=True in paddlepaddle cpu version
use_gpu = config['Global']['use_gpu']
program.check_gpu(use_gpu)
alg = config['Global']['algorithm']
assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE', 'SAST']
if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']:
config['Global']['char_ops'] = CharacterOps(config['Global'])
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
startup_prog = fluid.Program()
eval_program = fluid.Program()
startup_prog, eval_program, place, config, train_alg_type = program.preprocess()
eval_build_outputs = program.build(
config, eval_program, startup_prog, mode='test')
eval_fetch_name_list = eval_build_outputs[1]
......@@ -75,7 +59,7 @@ def main():
init_model(config, eval_program, exe)
if alg in ['EAST', 'DB', 'SAST']:
if train_alg_type == 'det':
eval_reader = reader_main(config=config, mode="eval")
eval_info_dict = {'program':eval_program,\
'reader':eval_reader,\
......@@ -101,6 +85,4 @@ def main():
if __name__ == '__main__':
parser = program.ArgsParser()
FLAGS = parser.parse_args()
main()
......@@ -22,6 +22,7 @@ import yaml
import os
from ppocr.utils.utility import create_module
from ppocr.utils.utility import initial_logger
logger = initial_logger()
import paddle.fluid as fluid
......@@ -31,8 +32,7 @@ from eval_utils.eval_det_utils import eval_det_run
from eval_utils.eval_rec_utils import eval_rec_run
from ppocr.utils.save_load import save_model
import numpy as np
from ppocr.utils.character import cal_predicts_accuracy
from ppocr.utils.character import cal_predicts_accuracy, CharacterOps
class ArgsParser(ArgumentParser):
def __init__(self):
......@@ -81,10 +81,8 @@ default_config = {'Global': {'debug': False, }}
def load_config(file_path):
"""
Load config from yml/yaml file.
Args:
file_path (str): Path of the config file to be loaded.
Returns: global config
"""
merge_config(default_config)
......@@ -103,10 +101,8 @@ def load_config(file_path):
def merge_config(config):
"""
Merge config into global config.
Args:
config (dict): Config to be merged.
Returns: global config
"""
for key, value in config.items():
......@@ -157,13 +153,11 @@ def build(config, main_prog, startup_prog, mode):
3. create a model
4. create fetchs
5. create an optimizer
Args:
config(dict): config
main_prog(): main program
startup_prog(): startup program
is_train(bool): train or valid
Returns:
dataloader(): a bridge between the model and the data
fetchs(dict): dict of model outputs(included loss and measures)
......@@ -374,3 +368,29 @@ def train_eval_rec_run(config, exe, train_info_dict, eval_info_dict):
save_path = save_model_dir + "/iter_epoch_%d" % (epoch)
save_model(train_info_dict['train_program'], save_path)
return
def preprocess():
FLAGS = ArgsParser().parse_args()
config = load_config(FLAGS.config)
merge_config(FLAGS.opt)
logger.info(config)
# check if set use_gpu=True in paddlepaddle cpu version
use_gpu = config['Global']['use_gpu']
check_gpu(use_gpu)
alg = config['Global']['algorithm']
assert alg in ['EAST', 'DB', 'SAST', 'Rosetta', 'CRNN', 'STARNet', 'RARE']
if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']:
config['Global']['char_ops'] = CharacterOps(config['Global'])
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
startup_program = fluid.Program()
train_program = fluid.Program()
if alg in ['EAST', 'DB', 'SAST']:
train_alg_type = 'det'
else:
train_alg_type = 'rec'
return startup_program, train_program, place, config, train_alg_type
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册