未验证 提交 c0039d13 编写于 作者: W wangna11BD 提交者: GitHub

fix install ppgan bug (#686)

* fix install ppgan bug

* fix install ppgan
上级 d1225d09
...@@ -23,7 +23,7 @@ from .animegan_predictor import AnimeGANPredictor ...@@ -23,7 +23,7 @@ from .animegan_predictor import AnimeGANPredictor
from .midas_predictor import MiDaSPredictor from .midas_predictor import MiDaSPredictor
from .photo2cartoon_predictor import Photo2CartoonPredictor from .photo2cartoon_predictor import Photo2CartoonPredictor
from .styleganv2_predictor import StyleGANv2Predictor from .styleganv2_predictor import StyleGANv2Predictor
from .styleganv2clip_predictor import StyleGANv2ClipPredictor # from .styleganv2clip_predictor import StyleGANv2ClipPredictor
from .styleganv2fitting_predictor import StyleGANv2FittingPredictor from .styleganv2fitting_predictor import StyleGANv2FittingPredictor
from .styleganv2mixing_predictor import StyleGANv2MixingPredictor from .styleganv2mixing_predictor import StyleGANv2MixingPredictor
from .styleganv2editing_predictor import StyleGANv2EditingPredictor from .styleganv2editing_predictor import StyleGANv2EditingPredictor
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
from .model_irse import Backbone
from paddle.vision.transforms import Resize
from ..builder import CRITERIONS
from ppgan.utils.download import get_path_from_url
model_cfgs = {
'model_urls':
'https://paddlegan.bj.bcebos.com/models/model_ir_se50.pdparams',
}
@CRITERIONS.register()
class IDLoss(paddle.nn.Layer):
def __init__(self, base_dir='./'):
super(IDLoss, self).__init__()
print('Loading ResNet ArcFace')
self.facenet = Backbone(input_size=112,
num_layers=50,
drop_ratio=0.6,
mode='ir_se')
facenet_weights_path = os.path.join(base_dir, 'data/gpen/weights',
'model_ir_se50.pdparams')
if not os.path.isfile(facenet_weights_path):
facenet_weights_path = get_path_from_url(model_cfgs['model_urls'])
self.facenet.load_dict(paddle.load(facenet_weights_path))
self.face_pool = paddle.nn.AdaptiveAvgPool2D((112, 112))
self.facenet.eval()
def extract_feats(self, x):
_, _, h, w = x.shape
assert h == w
ss = h // 256
x = x[:, :, 35 * ss:-33 * ss, 32 * ss:-36 * ss]
transform = Resize(size=(112, 112))
for num in range(x.shape[0]):
mid_feats = transform(x[num]).unsqueeze(0)
if num == 0:
x_feats = mid_feats
else:
x_feats = paddle.concat([x_feats, mid_feats], axis=0)
x_feats = self.facenet(x_feats)
return x_feats
def forward(self, y_hat, y, x):
n_samples = x.shape[0]
y_feats = self.extract_feats(y)
y_hat_feats = self.extract_feats(y_hat)
y_feats = y_feats.detach()
loss = 0
count = 0
for i in range(n_samples):
diff_target = y_hat_feats[i].dot(y_feats[i])
loss += 1 - diff_target
count += 1
return loss / count
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
from .helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(nn.Layer):
def __init__(self,
input_size,
num_layers,
mode='ir',
drop_ratio=0.4,
affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100,
152], "num_layers should be 50, 100 or 152"
assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = paddle.nn.Sequential(
nn.Conv2D(3, 64, (3, 3), 1, 1, bias_attr=False), nn.BatchNorm2D(64),
nn.PReLU(64))
if input_size == 112:
self.output_layer = nn.Sequential(nn.BatchNorm2D(512),
nn.Dropout(drop_ratio), Flatten(),
nn.Linear(512 * 7 * 7, 512),
nn.BatchNorm1D(512))
else:
self.output_layer = nn.Sequential(nn.BatchNorm2D(512),
nn.Dropout(drop_ratio), Flatten(),
nn.Linear(512 * 14 * 14, 512),
nn.BatchNorm1D(512))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(
unit_module(bottleneck.in_channel, bottleneck.depth,
bottleneck.stride))
self.body = nn.Sequential(*modules)
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
x = self.output_layer(x)
return l2_norm(x)
...@@ -9,4 +9,4 @@ from .gradient_penalty import GradientPenalty ...@@ -9,4 +9,4 @@ from .gradient_penalty import GradientPenalty
from .builder import build_criterion from .builder import build_criterion
from .ssim import SSIM from .ssim import SSIM
from .IDLoss.id_loss import IDLoss from .id_loss import IDLoss
...@@ -12,10 +12,20 @@ ...@@ -12,10 +12,20 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
from collections import namedtuple from collections import namedtuple
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
from paddle.vision.transforms import Resize
from .builder import CRITERIONS
from ppgan.utils.download import get_path_from_url
model_cfgs = {
'model_urls':
'https://paddlegan.bj.bcebos.com/models/model_ir_se50.pdparams',
}
class Flatten(nn.Layer): class Flatten(nn.Layer):
...@@ -139,3 +149,107 @@ class bottleneck_IR_SE(nn.Layer): ...@@ -139,3 +149,107 @@ class bottleneck_IR_SE(nn.Layer):
shortcut = self.shortcut_layer(x) shortcut = self.shortcut_layer(x)
res = self.res_layer(x) res = self.res_layer(x)
return res + shortcut return res + shortcut
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(nn.Layer):
def __init__(self,
input_size,
num_layers,
mode='ir',
drop_ratio=0.4,
affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100,
152], "num_layers should be 50, 100 or 152"
assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = paddle.nn.Sequential(
nn.Conv2D(3, 64, (3, 3), 1, 1, bias_attr=False), nn.BatchNorm2D(64),
nn.PReLU(64))
if input_size == 112:
self.output_layer = nn.Sequential(nn.BatchNorm2D(512),
nn.Dropout(drop_ratio), Flatten(),
nn.Linear(512 * 7 * 7, 512),
nn.BatchNorm1D(512))
else:
self.output_layer = nn.Sequential(nn.BatchNorm2D(512),
nn.Dropout(drop_ratio), Flatten(),
nn.Linear(512 * 14 * 14, 512),
nn.BatchNorm1D(512))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(
unit_module(bottleneck.in_channel, bottleneck.depth,
bottleneck.stride))
self.body = nn.Sequential(*modules)
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
x = self.output_layer(x)
return l2_norm(x)
@CRITERIONS.register()
class IDLoss(paddle.nn.Layer):
def __init__(self, base_dir='./'):
super(IDLoss, self).__init__()
print('Loading ResNet ArcFace')
self.facenet = Backbone(input_size=112,
num_layers=50,
drop_ratio=0.6,
mode='ir_se')
facenet_weights_path = os.path.join(base_dir, 'data/gpen/weights',
'model_ir_se50.pdparams')
if not os.path.isfile(facenet_weights_path):
facenet_weights_path = get_path_from_url(model_cfgs['model_urls'])
self.facenet.load_dict(paddle.load(facenet_weights_path))
self.face_pool = paddle.nn.AdaptiveAvgPool2D((112, 112))
self.facenet.eval()
def extract_feats(self, x):
_, _, h, w = x.shape
assert h == w
ss = h // 256
x = x[:, :, 35 * ss:-33 * ss, 32 * ss:-36 * ss]
transform = Resize(size=(112, 112))
for num in range(x.shape[0]):
mid_feats = transform(x[num]).unsqueeze(0)
if num == 0:
x_feats = mid_feats
else:
x_feats = paddle.concat([x_feats, mid_feats], axis=0)
x_feats = self.facenet(x_feats)
return x_feats
def forward(self, y_hat, y, x):
n_samples = x.shape[0]
y_feats = self.extract_feats(y)
y_hat_feats = self.extract_feats(y_hat)
y_feats = y_feats.detach()
loss = 0
count = 0
for i in range(n_samples):
diff_target = y_hat_feats[i].dot(y_feats[i])
loss += 1 - diff_target
count += 1
return loss / count
...@@ -21,7 +21,7 @@ from .generators.builder import build_generator ...@@ -21,7 +21,7 @@ from .generators.builder import build_generator
from .discriminators.builder import build_discriminator from .discriminators.builder import build_discriminator
from ..modules.init import init_weights from ..modules.init import init_weights
from .criterions.IDLoss.id_loss import IDLoss from .criterions.id_loss import IDLoss
from paddle.nn import functional as F from paddle.nn import functional as F
from paddle import autograd from paddle import autograd
import math import math
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册