提交 5d248b16 编写于 作者: W WenmuZhou

解决冲突

...@@ -26,6 +26,8 @@ void DBDetector::LoadModel(const std::string &model_dir) { ...@@ -26,6 +26,8 @@ void DBDetector::LoadModel(const std::string &model_dir) {
config.DisableGpu(); config.DisableGpu();
if (this->use_mkldnn_) { if (this->use_mkldnn_) {
config.EnableMKLDNN(); config.EnableMKLDNN();
// cache 10 different shapes for mkldnn to avoid memory leak
config.SetMkldnnCacheCapacity(10);
} }
config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_); config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_);
} }
......
...@@ -129,6 +129,8 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) { ...@@ -129,6 +129,8 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) {
config.DisableGpu(); config.DisableGpu();
if (this->use_mkldnn_) { if (this->use_mkldnn_) {
config.EnableMKLDNN(); config.EnableMKLDNN();
// cache 10 different shapes for mkldnn to avoid memory leak
config.SetMkldnnCacheCapacity(10);
} }
config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_); config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_);
} }
......
...@@ -3,24 +3,25 @@ use_gpu 0 ...@@ -3,24 +3,25 @@ use_gpu 0
gpu_id 0 gpu_id 0
gpu_mem 4000 gpu_mem 4000
cpu_math_library_num_threads 10 cpu_math_library_num_threads 10
use_mkldnn 0 use_mkldnn 1
use_zero_copy_run 0 use_zero_copy_run 1
# det config # det config
max_side_len 960 max_side_len 960
det_db_thresh 0.3 det_db_thresh 0.3
det_db_box_thresh 0.5 det_db_box_thresh 0.5
det_db_unclip_ratio 2.0 det_db_unclip_ratio 2.0
det_model_dir ../model/det det_model_dir ./inference/det_db
# cls config # cls config
use_angle_cls 0 use_angle_cls 0
cls_model_dir ../model/cls cls_model_dir ../inference/cls
cls_thresh 0.9 cls_thresh 0.9
# rec config # rec config
rec_model_dir ../model/rec rec_model_dir ./inference/rec_crnn
char_list_file ../model/ppocr_keys_v1.txt char_list_file ../../ppocr/utils/ppocr_keys_v1.txt
# show the detection results # show the detection results
visualize 1 visualize 1
...@@ -20,7 +20,7 @@ git clone https://github.com/PaddlePaddle/PaddleOCR.git ...@@ -20,7 +20,7 @@ git clone https://github.com/PaddlePaddle/PaddleOCR.git
``` ```
b. Goto Dockerfile directory(ps:Need to distinguish between cpu and gpu version, the following takes cpu as an example, gpu version needs to replace the keyword) b. Goto Dockerfile directory(ps:Need to distinguish between cpu and gpu version, the following takes cpu as an example, gpu version needs to replace the keyword)
``` ```
cd docker/cpu cd deploy/docker/cpu
``` ```
c. Build image c. Build image
``` ```
......
...@@ -20,7 +20,7 @@ git clone https://github.com/PaddlePaddle/PaddleOCR.git ...@@ -20,7 +20,7 @@ git clone https://github.com/PaddlePaddle/PaddleOCR.git
``` ```
b.切换至Dockerfile目录(注:需要区分cpu或gpu版本,下文以cpu为例,gpu版本需要替换一下关键字即可) b.切换至Dockerfile目录(注:需要区分cpu或gpu版本,下文以cpu为例,gpu版本需要替换一下关键字即可)
``` ```
cd docker/cpu cd deploy/docker/cpu
``` ```
c.生成镜像 c.生成镜像
``` ```
......
...@@ -90,15 +90,3 @@ def check_and_read_gif(img_path): ...@@ -90,15 +90,3 @@ def check_and_read_gif(img_path):
return imgvalue, True return imgvalue, True
return None, False return None, False
def create_multi_devices_program(program, loss_var_name):
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = True
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_iteration_per_drop_scope = 1
compile_program = fluid.CompiledProgram(program).with_data_parallel(
loss_name=loss_var_name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
return compile_program
...@@ -133,7 +133,6 @@ def main(args): ...@@ -133,7 +133,6 @@ def main(args):
image_file_list = get_image_file_list(args.image_dir) image_file_list = get_image_file_list(args.image_dir)
text_sys = TextSystem(args) text_sys = TextSystem(args)
is_visualize = True is_visualize = True
tackle_img_num = 0
for image_file in image_file_list: for image_file in image_file_list:
img, flag = check_and_read_gif(image_file) img, flag = check_and_read_gif(image_file)
if not flag: if not flag:
...@@ -142,9 +141,6 @@ def main(args): ...@@ -142,9 +141,6 @@ def main(args):
logger.info("error in loading image:{}".format(image_file)) logger.info("error in loading image:{}".format(image_file))
continue continue
starttime = time.time() starttime = time.time()
tackle_img_num += 1
if not args.use_gpu and args.enable_mkldnn and tackle_img_num % 30 == 0:
text_sys = TextSystem(args)
dt_boxes, rec_res = text_sys(img) dt_boxes, rec_res = text_sys(img)
elapse = time.time() - starttime elapse = time.time() - starttime
print("Predict time of %s: %.3fs" % (image_file, elapse)) print("Predict time of %s: %.3fs" % (image_file, elapse))
......
...@@ -112,6 +112,8 @@ def create_predictor(args, mode): ...@@ -112,6 +112,8 @@ def create_predictor(args, mode):
config.disable_gpu() config.disable_gpu()
config.set_cpu_math_library_num_threads(6) config.set_cpu_math_library_num_threads(6)
if args.enable_mkldnn: if args.enable_mkldnn:
# cache 10 different shapes for mkldnn to avoid memory leak
config.set_mkldnn_cache_capacity(10)
config.enable_mkldnn() config.enable_mkldnn()
# config.enable_memory_optim() # config.enable_memory_optim()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册