diff --git a/tools/cpp_infer.py b/tools/cpp_infer.py index 41b1e8a007ab179b0e08341a16026c1f18c43803..6e8f0b5b8fc24af4cd2378d786171378bcad01ca 100644 --- a/tools/cpp_infer.py +++ b/tools/cpp_infer.py @@ -78,7 +78,9 @@ def get_extra_info(im, arch, shape, scale): logger.info('Extra info: im_size') info.append(im_size) elif 'SSD' in arch: - pass + im_shape = np.array([shape[:2]]).astype('int32') + logger.info('Extra info: im_shape') + info.append([im_shape]) elif 'RetinaNet' in arch: input_shape.extend(im.shape[2:]) im_info = np.array([input_shape + [scale]]).astype('float32') @@ -190,6 +192,7 @@ def Preprocess(img_path, arch, config): def infer(): model_path = FLAGS.model_path config_path = FLAGS.config_path + res = {} assert model_path is not None, "Model path: {} does not exist!".format( model_path) assert config_path is not None, "Config path: {} does not exist!".format( @@ -198,6 +201,9 @@ def infer(): conf = yaml.safe_load(f) img_data = Preprocess(FLAGS.infer_img, conf['arch'], conf['Preprocess']) + if 'SSD' in conf['arch']: + img_data, res['im_shape'] = img_data + img_data = [img_data] if conf['use_python_inference']: place = fluid.CUDAPlace(0) @@ -253,7 +259,6 @@ def infer(): is_bbox_normalized = True if 'SSD' in conf['arch'] else False out = outs[-1] - res = {} lod = out.lod() if conf['use_python_inference'] else out.lod lengths = offset_to_lengths(lod) np_data = np.array(out) if conf[