未验证 提交 4dc6f237 编写于 作者: H haoyuying 提交者: GitHub

adapt openpose to rc version

上级 6f153ddc
# coding:utf-8 # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License" # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at # You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
...@@ -23,27 +22,24 @@ import paddle.nn as nn ...@@ -23,27 +22,24 @@ import paddle.nn as nn
import numpy as np import numpy as np
from paddlehub.module.module import moduleinfo from paddlehub.module.module import moduleinfo
import paddlehub.process.transforms as T import paddlehub.process.transforms as T
import openpose_body_estimation.processor as P import openpose_body_estimation.processor as P
@moduleinfo( @moduleinfo(name="openpose_body_estimation",
name="openpose_body_estimation", type="CV/image_editing",
type="CV/image_editing", author="paddlepaddle",
author="paddlepaddle", author_email="",
author_email="", summary="Openpose_body_estimation is a body pose estimation model based on Realtime Multi-Person 2D Pose \
summary="Openpose_body_estimation is a body pose estimation model based on Realtime Multi-Person 2D Pose \
Estimation using Part Affinity Fields.", Estimation using Part Affinity Fields.",
version="1.0.0") version="1.0.0")
class BodyPoseModel(nn.Layer): class BodyPoseModel(nn.Layer):
""" """
BodyPoseModel BodyposeModel
Args: Args:
load_checkpoint(str): Checkpoint save path, default is None. load_checkpoint(str): Checkpoint save path, default is None.
visualization (bool): Whether to save the estimation result. Default is True. visualization (bool): Whether to save the estimation result. Default is True.
""" """
def __init__(self, load_checkpoint: str = None, visualization: bool = True): def __init__(self, load_checkpoint: str = None, visualization: bool = True):
super(BodyPoseModel, self).__init__() super(BodyPoseModel, self).__init__()
...@@ -118,15 +114,13 @@ class BodyPoseModel(nn.Layer): ...@@ -118,15 +114,13 @@ class BodyPoseModel(nn.Layer):
self.model6_2 = blocks['block6_2'] self.model6_2 = blocks['block6_2']
if load_checkpoint is not None: if load_checkpoint is not None:
model_dict = paddle.load(load_checkpoint)[0] model_dict = paddle.load(load_checkpoint)
self.set_dict(model_dict) self.set_dict(model_dict)
print("load custom checkpoint success") print("load custom checkpoint success")
else: else:
checkpoint = os.path.join(self.directory, 'openpose_body.pdparams') checkpoint = os.path.join(self.directory, 'openpose_body.pdparams')
if not os.path.exists(checkpoint): model_dict = paddle.load(checkpoint)
os.system('wget https://paddlehub.bj.bcebos.com/dygraph/pose/openpose_body.pdparams -O ' + checkpoint)
model_dict = paddle.load(checkpoint)[0]
self.set_dict(model_dict) self.set_dict(model_dict)
print("load pretrained checkpoint success") print("load pretrained checkpoint success")
...@@ -134,10 +128,10 @@ class BodyPoseModel(nn.Layer): ...@@ -134,10 +128,10 @@ class BodyPoseModel(nn.Layer):
layers = [] layers = []
for layer_name, v in block.items(): for layer_name, v in block.items():
if 'pool' in layer_name: if 'pool' in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2]) layer = nn.MaxPool2D(kernel_size=v[0], stride=v[1], padding=v[2])
layers.append((layer_name, layer)) layers.append((layer_name, layer))
else: else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) conv2d = nn.Conv2D(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4])
layers.append((layer_name, conv2d)) layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers: if layer_name not in no_relu_layers:
layers.append(('relu_' + layer_name, nn.ReLU())) layers.append(('relu_' + layer_name, nn.ReLU()))
...@@ -204,11 +198,3 @@ class BodyPoseModel(nn.Layer): ...@@ -204,11 +198,3 @@ class BodyPoseModel(nn.Layer):
save_path = os.path.join(save_path, img_path.rsplit("/", 1)[-1]) save_path = os.path.join(save_path, img_path.rsplit("/", 1)[-1])
cv2.imwrite(save_path, canvas) cv2.imwrite(save_path, canvas)
return candidate, subset return candidate, subset
if __name__ == "__main__":
paddle.disable_static()
model = BodyPoseModel()
model.eval()
out1, out2 = model.predict("demo.jpg")
# coding:utf-8 # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License" # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at # You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
...@@ -31,27 +30,25 @@ import paddlehub.process.transforms as T ...@@ -31,27 +30,25 @@ import paddlehub.process.transforms as T
import openpose_hands_estimation.processor as P import openpose_hands_estimation.processor as P
@moduleinfo( @moduleinfo(name="openpose_hands_estimation",
name="openpose_hands_estimation", type="CV/image_editing",
type="CV/image_editing", author="paddlepaddle",
author="paddlepaddle", author_email="",
author_email="", summary="Openpose_hands_estimation is a hand pose estimation model based on Hand Keypoint Detection in \
summary="Openpose_hands_estimation is a hand pose estimation model based on Hand Keypoint Detection in \
Single Images using Multiview Bootstrapping.", Single Images using Multiview Bootstrapping.",
version="1.0.0") version="1.0.0")
class HandPoseModel(nn.Layer): class HandPoseModel(nn.Layer):
""" """
HandPoseModel HandposeModel
Args: Args:
load_checkpoint(str): Checkpoint save path, default is None. load_checkpoint(str): Checkpoint save path, default is None.
visualization (bool): Whether to save the estimation result. Default is True. visualization (bool): Whether to save the estimation result. Default is True.
""" """
def __init__(self, load_checkpoint: str = None, visualization: bool = True): def __init__(self, load_checkpoint: str = None, visualization: bool = True):
super(HandPoseModel, self).__init__() super(HandPoseModel, self).__init__()
self.visualization = visualization self.visualization = visualization
self.resize_func = T.ResizeScaling() self.resize_func = T.ResizeScaling()
self.norm_func = T.Normalize(std=[1, 1, 1]) self.norm_func = T.Normalize(std=[1, 1, 1])
self.hand_detect = P.HandDetect() self.hand_detect = P.HandDetect()
...@@ -59,6 +56,7 @@ class HandPoseModel(nn.Layer): ...@@ -59,6 +56,7 @@ class HandPoseModel(nn.Layer):
self.remove_pad = P.RemovePadding() self.remove_pad = P.RemovePadding()
self.draw_pose = P.DrawPose() self.draw_pose = P.DrawPose()
self.draw_hand = P.DrawHandPose() self.draw_hand = P.DrawHandPose()
no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3', \ no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3', \
'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6'] 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
...@@ -105,9 +103,7 @@ class HandPoseModel(nn.Layer): ...@@ -105,9 +103,7 @@ class HandPoseModel(nn.Layer):
else: else:
checkpoint = os.path.join(self.directory, 'openpose_hand.pdparams') checkpoint = os.path.join(self.directory, 'openpose_hand.pdparams')
if not os.path.exists(checkpoint): model_dict = paddle.load(checkpoint)
os.system('wget https://paddlehub.bj.bcebos.com/dygraph/pose/openpose_hand.pdparams -O ' + checkpoint)
model_dict = paddle.load(checkpoint)[0]
self.set_dict(model_dict) self.set_dict(model_dict)
print("load pretrained checkpoint success") print("load pretrained checkpoint success")
...@@ -115,10 +111,10 @@ class HandPoseModel(nn.Layer): ...@@ -115,10 +111,10 @@ class HandPoseModel(nn.Layer):
layers = [] layers = []
for layer_name, v in block.items(): for layer_name, v in block.items():
if 'pool' in layer_name: if 'pool' in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2]) layer = nn.MaxPool2D(kernel_size=v[0], stride=v[1], padding=v[2])
layers.append((layer_name, layer)) layers.append((layer_name, layer))
else: else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) conv2d = nn.Conv2D(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4])
layers.append((layer_name, conv2d)) layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers: if layer_name not in no_relu_layers:
layers.append(('relu_' + layer_name, nn.ReLU())) layers.append(('relu_' + layer_name, nn.ReLU()))
...@@ -197,10 +193,3 @@ class HandPoseModel(nn.Layer): ...@@ -197,10 +193,3 @@ class HandPoseModel(nn.Layer):
save_path = os.path.join(save_path, img_path.rsplit("/", 1)[-1]) save_path = os.path.join(save_path, img_path.rsplit("/", 1)[-1])
cv2.imwrite(save_path, canvas) cv2.imwrite(save_path, canvas)
return all_hand_peaks return all_hand_peaks
if __name__ == "__main__":
paddle.disable_static()
model = HandPoseModel()
model.eval()
out1 = model.predict("detect_hand4.jpg")
...@@ -13,7 +13,6 @@ from paddlehub.module.cv_module import StyleTransferModule ...@@ -13,7 +13,6 @@ from paddlehub.module.cv_module import StyleTransferModule
class GramMatrix(nn.Layer): class GramMatrix(nn.Layer):
"""Calculate gram matrix""" """Calculate gram matrix"""
def forward(self, y): def forward(self, y):
(b, ch, h, w) = y.shape (b, ch, h, w) = y.shape
features = y.reshape((b, ch, w * h)) features = y.reshape((b, ch, w * h))
...@@ -24,7 +23,6 @@ class GramMatrix(nn.Layer): ...@@ -24,7 +23,6 @@ class GramMatrix(nn.Layer):
class ConvLayer(nn.Layer): class ConvLayer(nn.Layer):
"""Basic conv layer with reflection padding layer""" """Basic conv layer with reflection padding layer"""
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int): def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int):
super(ConvLayer, self).__init__() super(ConvLayer, self).__init__()
pad = int(np.floor(kernel_size / 2)) pad = int(np.floor(kernel_size / 2))
...@@ -52,7 +50,6 @@ class UpsampleConvLayer(nn.Layer): ...@@ -52,7 +50,6 @@ class UpsampleConvLayer(nn.Layer):
Return: Return:
img(paddle.Tensor): UpsampleConvLayer output. img(paddle.Tensor): UpsampleConvLayer output.
""" """
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int, upsample=None): def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int, upsample=None):
super(UpsampleConvLayer, self).__init__() super(UpsampleConvLayer, self).__init__()
self.upsample = upsample self.upsample = upsample
...@@ -87,7 +84,6 @@ class Bottleneck(nn.Layer): ...@@ -87,7 +84,6 @@ class Bottleneck(nn.Layer):
Return: Return:
img(paddle.Tensor): Bottleneck output. img(paddle.Tensor): Bottleneck output.
""" """
def __init__(self, def __init__(self,
inplanes: int, inplanes: int,
planes: int, planes: int,
...@@ -101,8 +97,8 @@ class Bottleneck(nn.Layer): ...@@ -101,8 +97,8 @@ class Bottleneck(nn.Layer):
self.residual_layer = nn.Conv2D(inplanes, planes * self.expansion, kernel_size=1, stride=stride) self.residual_layer = nn.Conv2D(inplanes, planes * self.expansion, kernel_size=1, stride=stride)
conv_block = (norm_layer(inplanes), nn.ReLU(), nn.Conv2D(inplanes, planes, kernel_size=1, stride=1), conv_block = (norm_layer(inplanes), nn.ReLU(), nn.Conv2D(inplanes, planes, kernel_size=1, stride=1),
norm_layer(planes), nn.ReLU(), ConvLayer(planes, planes, kernel_size=3, stride=stride), norm_layer(planes), nn.ReLU(), ConvLayer(planes, planes, kernel_size=3, stride=stride),
norm_layer(planes), nn.ReLU(), nn.Conv2D( norm_layer(planes), nn.ReLU(), nn.Conv2D(planes, planes * self.expansion, kernel_size=1,
planes, planes * self.expansion, kernel_size=1, stride=1)) stride=1))
self.conv_block = nn.Sequential(*conv_block) self.conv_block = nn.Sequential(*conv_block)
def forward(self, x: paddle.Tensor): def forward(self, x: paddle.Tensor):
...@@ -128,12 +124,14 @@ class UpBottleneck(nn.Layer): ...@@ -128,12 +124,14 @@ class UpBottleneck(nn.Layer):
Return: Return:
img(paddle.Tensor): UpBottleneck output. img(paddle.Tensor): UpBottleneck output.
""" """
def __init__(self, inplanes: int, planes: int, stride: int = 2, norm_layer: nn.Layer = nn.BatchNorm2D): def __init__(self, inplanes: int, planes: int, stride: int = 2, norm_layer: nn.Layer = nn.BatchNorm2D):
super(UpBottleneck, self).__init__() super(UpBottleneck, self).__init__()
self.expansion = 4 self.expansion = 4
self.residual_layer = UpsampleConvLayer( self.residual_layer = UpsampleConvLayer(inplanes,
inplanes, planes * self.expansion, kernel_size=1, stride=1, upsample=stride) planes * self.expansion,
kernel_size=1,
stride=1,
upsample=stride)
conv_block = [] conv_block = []
conv_block += [norm_layer(inplanes), nn.ReLU(), nn.Conv2D(inplanes, planes, kernel_size=1, stride=1)] conv_block += [norm_layer(inplanes), nn.ReLU(), nn.Conv2D(inplanes, planes, kernel_size=1, stride=1)]
conv_block += [ conv_block += [
...@@ -164,7 +162,6 @@ class Inspiration(nn.Layer): ...@@ -164,7 +162,6 @@ class Inspiration(nn.Layer):
Return: Return:
img(paddle.Tensor): UpBottleneck output. img(paddle.Tensor): UpBottleneck output.
""" """
def __init__(self, C: int, B: int = 1): def __init__(self, C: int, B: int = 1):
super(Inspiration, self).__init__() super(Inspiration, self).__init__()
...@@ -181,8 +178,8 @@ class Inspiration(nn.Layer): ...@@ -181,8 +178,8 @@ class Inspiration(nn.Layer):
self.P = paddle.bmm(self.weight.expand_as(self.G), self.G) self.P = paddle.bmm(self.weight.expand_as(self.G), self.G)
x = paddle.bmm( x = paddle.bmm(
self.P.transpose((0, 2, 1)).expand((X.shape[0], self.C, self.C)), X.reshape((X.shape[0], X.shape[1], self.P.transpose((0, 2, 1)).expand((X.shape[0], self.C, self.C)), X.reshape(
-1))).reshape(X.shape) (X.shape[0], X.shape[1], -1))).reshape(X.shape)
return x return x
def __repr__(self): def __repr__(self):
...@@ -192,7 +189,6 @@ class Inspiration(nn.Layer): ...@@ -192,7 +189,6 @@ class Inspiration(nn.Layer):
class Vgg16(nn.Layer): class Vgg16(nn.Layer):
""" First four layers from Vgg16.""" """ First four layers from Vgg16."""
def __init__(self): def __init__(self):
super(Vgg16, self).__init__() super(Vgg16, self).__init__()
self.conv1_1 = nn.Conv2D(3, 64, kernel_size=3, stride=1, padding=1) self.conv1_1 = nn.Conv2D(3, 64, kernel_size=3, stride=1, padding=1)
...@@ -214,9 +210,6 @@ class Vgg16(nn.Layer): ...@@ -214,9 +210,6 @@ class Vgg16(nn.Layer):
self.conv5_3 = nn.Conv2D(512, 512, kernel_size=3, stride=1, padding=1) self.conv5_3 = nn.Conv2D(512, 512, kernel_size=3, stride=1, padding=1)
checkpoint = os.path.join(MODULE_HOME, 'msgnet', 'vgg16.pdparams') checkpoint = os.path.join(MODULE_HOME, 'msgnet', 'vgg16.pdparams')
if not os.path.exists(checkpoint):
os.system('wget https://bj.bcebos.com/paddlehub/model/image/image_editing/vgg_paddle.pdparams -O ' +
checkpoint)
model_dict = paddle.load(checkpoint) model_dict = paddle.load(checkpoint)
self.set_dict(model_dict) self.set_dict(model_dict)
print("load pretrained vgg16 checkpoint success") print("load pretrained vgg16 checkpoint success")
...@@ -270,8 +263,12 @@ class MSGNet(nn.Layer): ...@@ -270,8 +263,12 @@ class MSGNet(nn.Layer):
Return: Return:
img(paddle.Tensor): MSGNet output. img(paddle.Tensor): MSGNet output.
""" """
def __init__(self,
def __init__(self, input_nc=3, output_nc=3, ngf=128, n_blocks=6, norm_layer=nn.InstanceNorm2D, input_nc=3,
output_nc=3,
ngf=128,
n_blocks=6,
norm_layer=nn.InstanceNorm2D,
load_checkpoint=None): load_checkpoint=None):
super(MSGNet, self).__init__() super(MSGNet, self).__init__()
self.gram = GramMatrix() self.gram = GramMatrix()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册