未验证 提交 7d74dc26 编写于 作者: H haoyuying 提交者: GitHub

Add paddle.no_grad in predict()

上级 b87f08f5
...@@ -92,33 +92,34 @@ class ImageClassifierModule(RunModule, ImageServing): ...@@ -92,33 +92,34 @@ class ImageClassifierModule(RunModule, ImageServing):
results(list[dict]) : The prediction result of each input image results(list[dict]) : The prediction result of each input image
''' '''
self.eval() self.eval()
res = [] with paddle.no_grad():
total_num = len(images) res = []
loop_num = int(np.ceil(total_num / batch_size)) total_num = len(images)
loop_num = int(np.ceil(total_num / batch_size))
for iter_id in range(loop_num):
batch_data = [] for iter_id in range(loop_num):
handle_id = iter_id * batch_size batch_data = []
for image_id in range(batch_size): handle_id = iter_id * batch_size
try: for image_id in range(batch_size):
image = self.transforms(images[handle_id + image_id]) try:
batch_data.append(image) image = self.transforms(images[handle_id + image_id])
except: batch_data.append(image)
pass except:
batch_image = np.array(batch_data, dtype='float32') pass
preds, feature = self(paddle.to_tensor(batch_image)) batch_image = np.array(batch_data, dtype='float32')
preds = F.softmax(preds, axis=1).numpy() preds, feature = self(paddle.to_tensor(batch_image))
pred_idxs = np.argsort(preds)[:, ::-1][:, :top_k] preds = F.softmax(preds, axis=1).numpy()
pred_idxs = np.argsort(preds)[:, ::-1][:, :top_k]
for i, pred in enumerate(pred_idxs):
res_dict = {} for i, pred in enumerate(pred_idxs):
for k in pred: res_dict = {}
class_name = self.labels[int(k)] for k in pred:
res_dict[class_name] = preds[i][k] class_name = self.labels[int(k)]
res_dict[class_name] = preds[i][k]
res.append(res_dict)
res.append(res_dict)
return res
return res
@serving @serving
def serving_method(self, images: list, top_k: int, **kwargs): def serving_method(self, images: list, top_k: int, **kwargs):
...@@ -223,54 +224,55 @@ class ImageColorizeModule(RunModule, ImageServing): ...@@ -223,54 +224,55 @@ class ImageColorizeModule(RunModule, ImageServing):
res(list[dict]) : The prediction result of each input image res(list[dict]) : The prediction result of each input image
''' '''
self.eval() self.eval()
lab2rgb = T.LAB2RGB() with paddle.no_grad():
res = [] lab2rgb = T.LAB2RGB()
total_num = len(images) res = []
loop_num = int(np.ceil(total_num / batch_size)) total_num = len(images)
for iter_id in range(loop_num): loop_num = int(np.ceil(total_num / batch_size))
batch_data = [] for iter_id in range(loop_num):
handle_id = iter_id * batch_size batch_data = []
for image_id in range(batch_size): handle_id = iter_id * batch_size
try: for image_id in range(batch_size):
image = self.transforms(images[handle_id + image_id]) try:
batch_data.append(image) image = self.transforms(images[handle_id + image_id])
except: batch_data.append(image)
pass except:
batch_data = np.array(batch_data) pass
im = self.preprocess(batch_data) batch_data = np.array(batch_data)
out_class, out_reg = self(im['A'], im['hint_B'], im['mask_B']) im = self.preprocess(batch_data)
out_class, out_reg = self(im['A'], im['hint_B'], im['mask_B'])
visual_ret = OrderedDict()
for i in range(im['A'].shape[0]): visual_ret = OrderedDict()
gray = lab2rgb(np.concatenate((im['A'].numpy(), np.zeros(im['B'].shape)), axis=1))[i] for i in range(im['A'].shape[0]):
gray = np.clip(np.transpose(gray, (1, 2, 0)), 0, 1) * 255 gray = lab2rgb(np.concatenate((im['A'].numpy(), np.zeros(im['B'].shape)), axis=1))[i]
visual_ret['gray'] = gray.astype(np.uint8) gray = np.clip(np.transpose(gray, (1, 2, 0)), 0, 1) * 255
hint = lab2rgb(np.concatenate((im['A'].numpy(), im['hint_B'].numpy()), axis=1))[i] visual_ret['gray'] = gray.astype(np.uint8)
hint = np.clip(np.transpose(hint, (1, 2, 0)), 0, 1) * 255 hint = lab2rgb(np.concatenate((im['A'].numpy(), im['hint_B'].numpy()), axis=1))[i]
visual_ret['hint'] = hint.astype(np.uint8) hint = np.clip(np.transpose(hint, (1, 2, 0)), 0, 1) * 255
real = lab2rgb(np.concatenate((im['A'].numpy(), im['B'].numpy()), axis=1))[i] visual_ret['hint'] = hint.astype(np.uint8)
real = np.clip(np.transpose(real, (1, 2, 0)), 0, 1) * 255 real = lab2rgb(np.concatenate((im['A'].numpy(), im['B'].numpy()), axis=1))[i]
visual_ret['real'] = real.astype(np.uint8) real = np.clip(np.transpose(real, (1, 2, 0)), 0, 1) * 255
fake = lab2rgb(np.concatenate((im['A'].numpy(), out_reg.numpy()), axis=1))[i] visual_ret['real'] = real.astype(np.uint8)
fake = np.clip(np.transpose(fake, (1, 2, 0)), 0, 1) * 255 fake = lab2rgb(np.concatenate((im['A'].numpy(), out_reg.numpy()), axis=1))[i]
visual_ret['fake_reg'] = fake.astype(np.uint8) fake = np.clip(np.transpose(fake, (1, 2, 0)), 0, 1) * 255
visual_ret['fake_reg'] = fake.astype(np.uint8)
if visualization:
if isinstance(images[handle_id + i], str): if visualization:
org_img = cv2.imread(images[handle_id + i]).astype('float32') if isinstance(images[handle_id + i], str):
else: org_img = cv2.imread(images[handle_id + i]).astype('float32')
org_img = images[handle_id + i] else:
h, w, c = org_img.shape org_img = images[handle_id + i]
fake_name = "fake_" + str(time.time()) + ".png" h, w, c = org_img.shape
if not os.path.exists(save_path): fake_name = "fake_" + str(time.time()) + ".png"
os.mkdir(save_path) if not os.path.exists(save_path):
fake_path = os.path.join(save_path, fake_name) os.mkdir(save_path)
visual_gray = Image.fromarray(visual_ret['fake_reg']) fake_path = os.path.join(save_path, fake_name)
visual_gray = visual_gray.resize((w, h), Image.BILINEAR) visual_gray = Image.fromarray(visual_ret['fake_reg'])
visual_gray.save(fake_path) visual_gray = visual_gray.resize((w, h), Image.BILINEAR)
visual_gray.save(fake_path)
res.append(visual_ret)
return res res.append(visual_ret)
return res
@serving @serving
def serving_method(self, images: list, **kwargs): def serving_method(self, images: list, **kwargs):
...@@ -393,60 +395,61 @@ class Yolov3Module(RunModule, ImageServing): ...@@ -393,60 +395,61 @@ class Yolov3Module(RunModule, ImageServing):
labels(np.ndarray): Predict labels. labels(np.ndarray): Predict labels.
''' '''
self.eval() self.eval()
boxes = [] with paddle.no_grad():
scores = [] boxes = []
self.downsample = 32 scores = []
im = self.transform(imgpath) self.downsample = 32
h, w, c = utils.img_shape(imgpath) im = self.transform(imgpath)
im_shape = paddle.to_tensor(np.array([[h, w]]).astype('int32')) h, w, c = utils.img_shape(imgpath)
label_names = utils.get_label_infos(filelist) im_shape = paddle.to_tensor(np.array([[h, w]]).astype('int32'))
img_data = paddle.to_tensor(np.array([im]).astype('float32')) label_names = utils.get_label_infos(filelist)
img_data = paddle.to_tensor(np.array([im]).astype('float32'))
outputs = self(img_data)
outputs = self(img_data)
for i, out in enumerate(outputs):
anchor_mask = self.anchor_masks[i] for i, out in enumerate(outputs):
mask_anchors = [] anchor_mask = self.anchor_masks[i]
for m in anchor_mask: mask_anchors = []
mask_anchors.append((self.anchors[2 * m])) for m in anchor_mask:
mask_anchors.append(self.anchors[2 * m + 1]) mask_anchors.append((self.anchors[2 * m]))
mask_anchors.append(self.anchors[2 * m + 1])
box, score = F.yolo_box(
x=out, box, score = F.yolo_box(
img_size=im_shape, x=out,
anchors=mask_anchors, img_size=im_shape,
class_num=self.class_num, anchors=mask_anchors,
conf_thresh=self.valid_thresh, class_num=self.class_num,
downsample_ratio=self.downsample, conf_thresh=self.valid_thresh,
name="yolo_box" + str(i)) downsample_ratio=self.downsample,
name="yolo_box" + str(i))
boxes.append(box)
scores.append(paddle.transpose(score, perm=[0, 2, 1])) boxes.append(box)
self.downsample //= 2 scores.append(paddle.transpose(score, perm=[0, 2, 1]))
self.downsample //= 2
yolo_boxes = paddle.concat(boxes, axis=1)
yolo_scores = paddle.concat(scores, axis=2) yolo_boxes = paddle.concat(boxes, axis=1)
yolo_scores = paddle.concat(scores, axis=2)
pred = F.multiclass_nms(
bboxes=yolo_boxes, pred = F.multiclass_nms(
scores=yolo_scores, bboxes=yolo_boxes,
score_threshold=self.valid_thresh, scores=yolo_scores,
nms_top_k=self.nms_topk, score_threshold=self.valid_thresh,
keep_top_k=self.nms_posk, nms_top_k=self.nms_topk,
nms_threshold=self.nms_thresh, keep_top_k=self.nms_posk,
background_label=-1) nms_threshold=self.nms_thresh,
background_label=-1)
bboxes = pred.numpy()
labels = bboxes[:, 0].astype('int32') bboxes = pred.numpy()
scores = bboxes[:, 1].astype('float32') labels = bboxes[:, 0].astype('int32')
boxes = bboxes[:, 2:].astype('float32') scores = bboxes[:, 1].astype('float32')
boxes = bboxes[:, 2:].astype('float32')
if visualization:
if not os.path.exists(save_path): if visualization:
os.mkdir(save_path) if not os.path.exists(save_path):
utils.draw_boxes_on_image(imgpath, boxes, scores, labels, label_names, 0.5, save_path) os.mkdir(save_path)
utils.draw_boxes_on_image(imgpath, boxes, scores, labels, label_names, 0.5, save_path)
return boxes, scores, labels
return boxes, scores, labels
class StyleTransferModule(RunModule, ImageServing): class StyleTransferModule(RunModule, ImageServing):
...@@ -521,37 +524,38 @@ class StyleTransferModule(RunModule, ImageServing): ...@@ -521,37 +524,38 @@ class StyleTransferModule(RunModule, ImageServing):
output(list[np.ndarray]) : The style transformed images with bgr mode. output(list[np.ndarray]) : The style transformed images with bgr mode.
''' '''
self.eval() self.eval()
style = paddle.to_tensor(self.transform(style).astype('float32')) with paddle.no_grad():
style = style.unsqueeze(0) style = paddle.to_tensor(self.transform(style).astype('float32'))
style = style.unsqueeze(0)
res = []
total_num = len(origin) res = []
loop_num = int(np.ceil(total_num / batch_size)) total_num = len(origin)
for iter_id in range(loop_num): loop_num = int(np.ceil(total_num / batch_size))
batch_data = [] for iter_id in range(loop_num):
handle_id = iter_id * batch_size batch_data = []
for image_id in range(batch_size): handle_id = iter_id * batch_size
try: for image_id in range(batch_size):
image = self.transform(origin[handle_id + image_id]) try:
batch_data.append(image.astype('float32')) image = self.transform(origin[handle_id + image_id])
except: batch_data.append(image.astype('float32'))
pass except:
pass
batch_image = np.array(batch_data)
content = paddle.to_tensor(batch_image) batch_image = np.array(batch_data)
content = paddle.to_tensor(batch_image)
self.setTarget(style)
output = self(content) self.setTarget(style)
for num in range(batch_size): output = self(content)
out = paddle.clip(output[num].transpose((1, 2, 0)), 0, 255).numpy().astype(np.uint8) for num in range(batch_size):
res.append(out) out = paddle.clip(output[num].transpose((1, 2, 0)), 0, 255).numpy().astype(np.uint8)
if visualization: res.append(out)
style_name = "style_" + str(time.time()) + ".png" if visualization:
if not os.path.exists(save_path): style_name = "style_" + str(time.time()) + ".png"
os.mkdir(save_path) if not os.path.exists(save_path):
path = os.path.join(save_path, style_name) os.mkdir(save_path)
cv2.imwrite(path, out) path = os.path.join(save_path, style_name)
return res cv2.imwrite(path, out)
return res
@serving @serving
def serving_method(self, images: list, **kwargs): def serving_method(self, images: list, **kwargs):
...@@ -655,47 +659,48 @@ class ImageSegmentationModule(ImageServing, RunModule): ...@@ -655,47 +659,48 @@ class ImageSegmentationModule(ImageServing, RunModule):
output(list[np.ndarray]) : The segmentation mask. output(list[np.ndarray]) : The segmentation mask.
''' '''
self.eval() self.eval()
result = [] with paddle.no_grad():
result = []
total_num = len(images)
loop_num = int(np.ceil(total_num / batch_size)) total_num = len(images)
for iter_id in range(loop_num): loop_num = int(np.ceil(total_num / batch_size))
batch_data = [] for iter_id in range(loop_num):
handle_id = iter_id * batch_size batch_data = []
for image_id in range(batch_size): handle_id = iter_id * batch_size
try: for image_id in range(batch_size):
image, _ = self.transform(images[handle_id + image_id]) try:
batch_data.append(image) image, _ = self.transform(images[handle_id + image_id])
except: batch_data.append(image)
pass except:
batch_image = np.array(batch_data).astype('float32') pass
pred = self(paddle.to_tensor(batch_image)) batch_image = np.array(batch_data).astype('float32')
pred = paddle.argmax(pred[0], axis=1, keepdim=True, dtype='int32') pred = self(paddle.to_tensor(batch_image))
pred = paddle.argmax(pred[0], axis=1, keepdim=True, dtype='int32')
for num in range(pred.shape[0]):
if isinstance(images[handle_id + num], str): for num in range(pred.shape[0]):
image = cv2.imread(images[handle_id + num]) if isinstance(images[handle_id + num], str):
else: image = cv2.imread(images[handle_id + num])
image = images[handle_id + num] else:
h, w, c = image.shape image = images[handle_id + num]
pred_final = utils.reverse_transform(pred[num:num + 1], (h, w), self.transforms.transforms) h, w, c = image.shape
pred_final = paddle.squeeze(pred_final) pred_final = utils.reverse_transform(pred[num:num + 1], (h, w), self.transforms.transforms)
pred_final = pred_final.numpy().astype('uint8') pred_final = paddle.squeeze(pred_final)
pred_final = pred_final.numpy().astype('uint8')
if visualization:
added_image = utils.visualize(images[handle_id + num], pred_final, weight=0.6) if visualization:
pred_mask = utils.get_pseudo_color_map(pred_final) added_image = utils.visualize(images[handle_id + num], pred_final, weight=0.6)
pred_image_path = os.path.join(save_path, 'image', str(time.time()) + ".png") pred_mask = utils.get_pseudo_color_map(pred_final)
pred_mask_path = os.path.join(save_path, 'mask', str(time.time()) + ".png") pred_image_path = os.path.join(save_path, 'image', str(time.time()) + ".png")
if not os.path.exists(os.path.dirname(pred_image_path)): pred_mask_path = os.path.join(save_path, 'mask', str(time.time()) + ".png")
os.makedirs(os.path.dirname(pred_image_path)) if not os.path.exists(os.path.dirname(pred_image_path)):
if not os.path.exists(os.path.dirname(pred_mask_path)): os.makedirs(os.path.dirname(pred_image_path))
os.makedirs(os.path.dirname(pred_mask_path)) if not os.path.exists(os.path.dirname(pred_mask_path)):
cv2.imwrite(pred_image_path, added_image) os.makedirs(os.path.dirname(pred_mask_path))
pred_mask.save(pred_mask_path) cv2.imwrite(pred_image_path, added_image)
pred_mask.save(pred_mask_path)
result.append(pred_final)
return result result.append(pred_final)
return result
@serving @serving
def serving_method(self, images: List[str], **kwargs): def serving_method(self, images: List[str], **kwargs):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册