diff --git a/dygraph/deploy/python/infer.py b/dygraph/deploy/python/infer.py index 53e7693f51a8e62d730adcdff22dc474a3c64f61..911c4fd974b43b170acf2596addbd80806b725fb 100644 --- a/dygraph/deploy/python/infer.py +++ b/dygraph/deploy/python/infer.py @@ -23,7 +23,7 @@ from PIL import Image import cv2 import numpy as np import paddle -from preprocess import preprocess, ResizeOp, NormalizeImageOp, PermuteOp, PadStride +from preprocess import preprocess, Resize, NormalizeImage, Permute, PadStride from visualize import visualize_box_mask from paddle.inference import Config from paddle.inference import create_predictor @@ -196,22 +196,22 @@ class DetectorSOLOv2(Detector): self.predictor.run() output_names = self.predictor.get_output_names() np_label = self.predictor.get_output_handle(output_names[ - 0]).copy_to_cpu() - np_score = self.predictor.get_output_handle(output_names[ 1]).copy_to_cpu() - np_segms = self.predictor.get_output_handle(output_names[ + np_score = self.predictor.get_output_handle(output_names[ 2]).copy_to_cpu() + np_segms = self.predictor.get_output_handle(output_names[ + 3]).copy_to_cpu() t1 = time.time() for i in range(repeats): self.predictor.run() output_names = self.predictor.get_output_names() np_label = self.predictor.get_output_handle(output_names[ - 0]).copy_to_cpu() - np_score = self.predictor.get_output_handle(output_names[ 1]).copy_to_cpu() - np_segms = self.predictor.get_output_handle(output_names[ + np_score = self.predictor.get_output_handle(output_names[ 2]).copy_to_cpu() + np_segms = self.predictor.get_output_handle(output_names[ + 3]).copy_to_cpu() t2 = time.time() ms = (t2 - t1) * 1000.0 / repeats print("Inference: {} ms per batch image".format(ms)) diff --git a/dygraph/deploy/python/preprocess.py b/dygraph/deploy/python/preprocess.py index 0fedc682452afdac49ee0aac1eefd3b6b977ebe8..cf31a3f3d1b18a26ace14666959fa91e1498d81f 100644 --- a/dygraph/deploy/python/preprocess.py +++ b/dygraph/deploy/python/preprocess.py @@ -38,7 +38,7 @@ def decode_image(im_file, im_info): return im, im_info -class ResizeOp(object): +class Resize(object): """resize image by target_size and max_size Args: target_size (int): the target size of image @@ -115,7 +115,7 @@ class ResizeOp(object): return im_scale_y, im_scale_x -class NormalizeImageOp(object): +class NormalizeImage(object): """normalize image Args: mean (list): im - mean @@ -150,7 +150,7 @@ class NormalizeImageOp(object): return im, im_info -class PermuteOp(object): +class Permute(object): """permute image Args: to_bgr (bool): whether convert RGB to BGR @@ -158,7 +158,7 @@ class PermuteOp(object): """ def __init__(self, ): - super(PermuteOp, self).__init__() + super(Permute, self).__init__() def __call__(self, im, im_info): """ diff --git a/dygraph/deploy/python/visualize.py b/dygraph/deploy/python/visualize.py index fefba9773f12296d134e436698ca04bc0c327fcc..6093572818225138af3984b1f07d4c1963ca3d5b 100644 --- a/dygraph/deploy/python/visualize.py +++ b/dygraph/deploy/python/visualize.py @@ -173,7 +173,7 @@ def draw_segm(im, clsid2color = {} np_segms = np_segms.astype(np.uint8) for i in range(np_segms.shape[0]): - mask, score, clsid = np_segms[i], np_score[i], np_label[i] + 1 + mask, score, clsid = np_segms[i], np_score[i], np_label[i] if score < threshold: continue diff --git a/dygraph/docs/tutorials/INSTALL_cn.md b/dygraph/docs/tutorials/INSTALL_cn.md index 78f0b4424afb43ceefe7673704aabd87a0ff4c26..ceefa45ab357f823be9054bd33347284058243b8 100644 --- a/dygraph/docs/tutorials/INSTALL_cn.md +++ b/dygraph/docs/tutorials/INSTALL_cn.md @@ -12,7 +12,7 @@ **环境需求:** -- PaddlePaddle 每日版本 +- PaddlePaddle 2.0.1 或 PaddlePaddle release/2.0分支最新编译安装包 - OS 64位操作系统 - Python 3(3.5.1+/3.6/3.7),64位版本 - pip/pip3(9.0.1+),64位版本 diff --git a/dygraph/ppdet/engine/export_utils.py b/dygraph/ppdet/engine/export_utils.py index 140d61964921ac3cceafcda96eea6e68c7c845fc..9d0320c0ee2ea3ba65e92309922bfd8a755e9323 100644 --- a/dygraph/ppdet/engine/export_utils.py +++ b/dygraph/ppdet/engine/export_utils.py @@ -52,7 +52,7 @@ def _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape): for st in sample_transforms[1:]: for key, value in st.items(): p = {'type': key} - if key == 'ResizeOp': + if key == 'Resize': if value.get('keep_ratio', False) and image_shape[1] is not None: max_size = max(image_shape[1:]) @@ -65,7 +65,7 @@ def _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape): methods = [list(bt.keys())[0] for bt in batch_transforms] for bt in batch_transforms: for key, value in bt.items(): - if key == 'PadBatchOp': + if key == 'PadBatch': preprocess_list.append({'type': 'PadStride'}) preprocess_list[-1].update({ 'stride': value['pad_to_stride']