From 4dc6f2376aa7ac12b42d48e704a17e35d7aa74ba Mon Sep 17 00:00:00 2001 From: haoyuying <35907364+haoyuying@users.noreply.github.com> Date: Fri, 30 Oct 2020 17:42:42 +0800 Subject: [PATCH] adapt openpose to rc version --- .../openpose_body_estimation/module.py | 42 +++++++------------ .../openpose_hands_estimation/module.py | 41 +++++++----------- modules/image/style_transfer/msgnet/module.py | 33 +++++++-------- 3 files changed, 44 insertions(+), 72 deletions(-) diff --git a/modules/image/keypoint_detection/openpose_body_estimation/module.py b/modules/image/keypoint_detection/openpose_body_estimation/module.py index bcc2d85d..457f21a6 100644 --- a/modules/image/keypoint_detection/openpose_body_estimation/module.py +++ b/modules/image/keypoint_detection/openpose_body_estimation/module.py @@ -1,11 +1,10 @@ -# coding:utf-8 -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # -# Licensed under the Apache License, Version 2.0 (the "License" +# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -23,27 +22,24 @@ import paddle.nn as nn import numpy as np from paddlehub.module.module import moduleinfo import paddlehub.process.transforms as T - import openpose_body_estimation.processor as P -@moduleinfo( - name="openpose_body_estimation", - type="CV/image_editing", - author="paddlepaddle", - author_email="", - summary="Openpose_body_estimation is a body pose estimation model based on Realtime Multi-Person 2D Pose \ +@moduleinfo(name="openpose_body_estimation", + type="CV/image_editing", + author="paddlepaddle", + author_email="", + summary="Openpose_body_estimation is a body pose estimation model based on Realtime Multi-Person 2D Pose \ Estimation using Part Affinity Fields.", - version="1.0.0") + version="1.0.0") class BodyPoseModel(nn.Layer): """ - BodyPoseModel + BodyposeModel Args: load_checkpoint(str): Checkpoint save path, default is None. visualization (bool): Whether to save the estimation result. Default is True. """ - def __init__(self, load_checkpoint: str = None, visualization: bool = True): super(BodyPoseModel, self).__init__() @@ -118,15 +114,13 @@ class BodyPoseModel(nn.Layer): self.model6_2 = blocks['block6_2'] if load_checkpoint is not None: - model_dict = paddle.load(load_checkpoint)[0] + model_dict = paddle.load(load_checkpoint) self.set_dict(model_dict) print("load custom checkpoint success") else: checkpoint = os.path.join(self.directory, 'openpose_body.pdparams') - if not os.path.exists(checkpoint): - os.system('wget https://paddlehub.bj.bcebos.com/dygraph/pose/openpose_body.pdparams -O ' + checkpoint) - model_dict = paddle.load(checkpoint)[0] + model_dict = paddle.load(checkpoint) self.set_dict(model_dict) print("load pretrained checkpoint success") @@ -134,10 +128,10 @@ class BodyPoseModel(nn.Layer): layers = [] for layer_name, v in block.items(): if 'pool' in layer_name: - layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2]) + layer = nn.MaxPool2D(kernel_size=v[0], stride=v[1], padding=v[2]) layers.append((layer_name, layer)) else: - conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) + conv2d = nn.Conv2D(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) layers.append((layer_name, conv2d)) if layer_name not in no_relu_layers: layers.append(('relu_' + layer_name, nn.ReLU())) @@ -204,11 +198,3 @@ class BodyPoseModel(nn.Layer): save_path = os.path.join(save_path, img_path.rsplit("/", 1)[-1]) cv2.imwrite(save_path, canvas) return candidate, subset - - -if __name__ == "__main__": - - paddle.disable_static() - model = BodyPoseModel() - model.eval() - out1, out2 = model.predict("demo.jpg") diff --git a/modules/image/keypoint_detection/openpose_hands_estimation/module.py b/modules/image/keypoint_detection/openpose_hands_estimation/module.py index 4429597d..fedbde9d 100644 --- a/modules/image/keypoint_detection/openpose_hands_estimation/module.py +++ b/modules/image/keypoint_detection/openpose_hands_estimation/module.py @@ -1,11 +1,10 @@ -# coding:utf-8 -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # -# Licensed under the Apache License, Version 2.0 (the "License" +# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -31,27 +30,25 @@ import paddlehub.process.transforms as T import openpose_hands_estimation.processor as P -@moduleinfo( - name="openpose_hands_estimation", - type="CV/image_editing", - author="paddlepaddle", - author_email="", - summary="Openpose_hands_estimation is a hand pose estimation model based on Hand Keypoint Detection in \ +@moduleinfo(name="openpose_hands_estimation", + type="CV/image_editing", + author="paddlepaddle", + author_email="", + summary="Openpose_hands_estimation is a hand pose estimation model based on Hand Keypoint Detection in \ Single Images using Multiview Bootstrapping.", - version="1.0.0") + version="1.0.0") class HandPoseModel(nn.Layer): """ - HandPoseModel + HandposeModel Args: load_checkpoint(str): Checkpoint save path, default is None. visualization (bool): Whether to save the estimation result. Default is True. """ - def __init__(self, load_checkpoint: str = None, visualization: bool = True): super(HandPoseModel, self).__init__() - self.visualization = visualization + self.resize_func = T.ResizeScaling() self.norm_func = T.Normalize(std=[1, 1, 1]) self.hand_detect = P.HandDetect() @@ -59,6 +56,7 @@ class HandPoseModel(nn.Layer): self.remove_pad = P.RemovePadding() self.draw_pose = P.DrawPose() self.draw_hand = P.DrawHandPose() + no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3', \ 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6'] @@ -105,9 +103,7 @@ class HandPoseModel(nn.Layer): else: checkpoint = os.path.join(self.directory, 'openpose_hand.pdparams') - if not os.path.exists(checkpoint): - os.system('wget https://paddlehub.bj.bcebos.com/dygraph/pose/openpose_hand.pdparams -O ' + checkpoint) - model_dict = paddle.load(checkpoint)[0] + model_dict = paddle.load(checkpoint) self.set_dict(model_dict) print("load pretrained checkpoint success") @@ -115,10 +111,10 @@ class HandPoseModel(nn.Layer): layers = [] for layer_name, v in block.items(): if 'pool' in layer_name: - layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2]) + layer = nn.MaxPool2D(kernel_size=v[0], stride=v[1], padding=v[2]) layers.append((layer_name, layer)) else: - conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) + conv2d = nn.Conv2D(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) layers.append((layer_name, conv2d)) if layer_name not in no_relu_layers: layers.append(('relu_' + layer_name, nn.ReLU())) @@ -197,10 +193,3 @@ class HandPoseModel(nn.Layer): save_path = os.path.join(save_path, img_path.rsplit("/", 1)[-1]) cv2.imwrite(save_path, canvas) return all_hand_peaks - - -if __name__ == "__main__": - paddle.disable_static() - model = HandPoseModel() - model.eval() - out1 = model.predict("detect_hand4.jpg") diff --git a/modules/image/style_transfer/msgnet/module.py b/modules/image/style_transfer/msgnet/module.py index ef1bc202..63eddcd4 100644 --- a/modules/image/style_transfer/msgnet/module.py +++ b/modules/image/style_transfer/msgnet/module.py @@ -13,7 +13,6 @@ from paddlehub.module.cv_module import StyleTransferModule class GramMatrix(nn.Layer): """Calculate gram matrix""" - def forward(self, y): (b, ch, h, w) = y.shape features = y.reshape((b, ch, w * h)) @@ -24,7 +23,6 @@ class GramMatrix(nn.Layer): class ConvLayer(nn.Layer): """Basic conv layer with reflection padding layer""" - def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int): super(ConvLayer, self).__init__() pad = int(np.floor(kernel_size / 2)) @@ -52,7 +50,6 @@ class UpsampleConvLayer(nn.Layer): Return: img(paddle.Tensor): UpsampleConvLayer output. """ - def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int, upsample=None): super(UpsampleConvLayer, self).__init__() self.upsample = upsample @@ -87,7 +84,6 @@ class Bottleneck(nn.Layer): Return: img(paddle.Tensor): Bottleneck output. """ - def __init__(self, inplanes: int, planes: int, @@ -101,8 +97,8 @@ class Bottleneck(nn.Layer): self.residual_layer = nn.Conv2D(inplanes, planes * self.expansion, kernel_size=1, stride=stride) conv_block = (norm_layer(inplanes), nn.ReLU(), nn.Conv2D(inplanes, planes, kernel_size=1, stride=1), norm_layer(planes), nn.ReLU(), ConvLayer(planes, planes, kernel_size=3, stride=stride), - norm_layer(planes), nn.ReLU(), nn.Conv2D( - planes, planes * self.expansion, kernel_size=1, stride=1)) + norm_layer(planes), nn.ReLU(), nn.Conv2D(planes, planes * self.expansion, kernel_size=1, + stride=1)) self.conv_block = nn.Sequential(*conv_block) def forward(self, x: paddle.Tensor): @@ -128,12 +124,14 @@ class UpBottleneck(nn.Layer): Return: img(paddle.Tensor): UpBottleneck output. """ - def __init__(self, inplanes: int, planes: int, stride: int = 2, norm_layer: nn.Layer = nn.BatchNorm2D): super(UpBottleneck, self).__init__() self.expansion = 4 - self.residual_layer = UpsampleConvLayer( - inplanes, planes * self.expansion, kernel_size=1, stride=1, upsample=stride) + self.residual_layer = UpsampleConvLayer(inplanes, + planes * self.expansion, + kernel_size=1, + stride=1, + upsample=stride) conv_block = [] conv_block += [norm_layer(inplanes), nn.ReLU(), nn.Conv2D(inplanes, planes, kernel_size=1, stride=1)] conv_block += [ @@ -164,7 +162,6 @@ class Inspiration(nn.Layer): Return: img(paddle.Tensor): UpBottleneck output. """ - def __init__(self, C: int, B: int = 1): super(Inspiration, self).__init__() @@ -181,8 +178,8 @@ class Inspiration(nn.Layer): self.P = paddle.bmm(self.weight.expand_as(self.G), self.G) x = paddle.bmm( - self.P.transpose((0, 2, 1)).expand((X.shape[0], self.C, self.C)), X.reshape((X.shape[0], X.shape[1], - -1))).reshape(X.shape) + self.P.transpose((0, 2, 1)).expand((X.shape[0], self.C, self.C)), X.reshape( + (X.shape[0], X.shape[1], -1))).reshape(X.shape) return x def __repr__(self): @@ -192,7 +189,6 @@ class Inspiration(nn.Layer): class Vgg16(nn.Layer): """ First four layers from Vgg16.""" - def __init__(self): super(Vgg16, self).__init__() self.conv1_1 = nn.Conv2D(3, 64, kernel_size=3, stride=1, padding=1) @@ -214,9 +210,6 @@ class Vgg16(nn.Layer): self.conv5_3 = nn.Conv2D(512, 512, kernel_size=3, stride=1, padding=1) checkpoint = os.path.join(MODULE_HOME, 'msgnet', 'vgg16.pdparams') - if not os.path.exists(checkpoint): - os.system('wget https://bj.bcebos.com/paddlehub/model/image/image_editing/vgg_paddle.pdparams -O ' + - checkpoint) model_dict = paddle.load(checkpoint) self.set_dict(model_dict) print("load pretrained vgg16 checkpoint success") @@ -270,8 +263,12 @@ class MSGNet(nn.Layer): Return: img(paddle.Tensor): MSGNet output. """ - - def __init__(self, input_nc=3, output_nc=3, ngf=128, n_blocks=6, norm_layer=nn.InstanceNorm2D, + def __init__(self, + input_nc=3, + output_nc=3, + ngf=128, + n_blocks=6, + norm_layer=nn.InstanceNorm2D, load_checkpoint=None): super(MSGNet, self).__init__() self.gram = GramMatrix() -- GitLab