diff --git a/ppgan/models/generators/generator_firstorder.py b/ppgan/models/generators/generator_firstorder.py index 72d1b2c9a98cfc9f26b1b6c5e99c8eb3f896b2a9..61f488b568b09e71bcaa3c5da7557a5d275f6d1f 100755 --- a/ppgan/models/generators/generator_firstorder.py +++ b/ppgan/models/generators/generator_firstorder.py @@ -251,7 +251,7 @@ class Transform: theta_part_a = theta[:, :, :, :2] theta_part_b = theta[:, :, :, 2:] - transformed = paddle.fluid.layers.matmul( + transformed = paddle.matmul( *broadcast(theta_part_a, coordinates)) + theta_part_b #M*p + m0 transformed = transformed.squeeze(-1) if self.tps: diff --git a/ppgan/models/starganv2_model.py b/ppgan/models/starganv2_model.py index f7d5e5e6aed068b6deb1db5111889eb31bee33f4..3203264a8eec7f489d4e6fbb667915f44e2d153f 100755 --- a/ppgan/models/starganv2_model.py +++ b/ppgan/models/starganv2_model.py @@ -2,7 +2,6 @@ # Users should be careful about adopting these functions in any commercial matters. # https://github.com/clovaai/stargan-v2#license -from paddle.fluid.layers.nn import soft_relu from .base_model import BaseModel from paddle import nn diff --git a/ppgan/models/wav2lip_hq_model.py b/ppgan/models/wav2lip_hq_model.py index 034e81f9ffbd399fd8a716d8f8d8aa7f2905de0e..5661b44ef4c1c5ea3117a511db30f3c2b3e95ce0 100644 --- a/ppgan/models/wav2lip_hq_model.py +++ b/ppgan/models/wav2lip_hq_model.py @@ -103,8 +103,7 @@ class Wav2LipModelHq(BaseModel): self.l1_loss = self.recon_loss(self.g, self.y) if self.disc_wt > 0.: - if isinstance(self.nets['netDH'], paddle.DataParallel - ): #paddle.fluid.dygraph.parallel.DataParallel) + if isinstance(self.nets['netDH'], paddle.DataParallel): self.perceptual_loss = self.nets[ 'netDH']._layers.perceptual_forward(self.g) else: @@ -175,8 +174,7 @@ class Wav2LipModelHq(BaseModel): self.eval_recon_losses.append(l1loss.numpy().item()) if self.disc_wt > 0.: - if isinstance(self.nets['netDH'], paddle.DataParallel - ): #paddle.fluid.dygraph.parallel.DataParallel) + if isinstance(self.nets['netDH'], paddle.DataParallel): perceptual_loss = self.nets[ 'netDH']._layers.perceptual_forward( self.g).numpy().item() diff --git a/ppgan/modules/first_order.py b/ppgan/modules/first_order.py index b6b113bcc023a1fe43d1d5d9ebec56393ea04cb7..bf9bb9029f3668ae9f0c962639094109a95f3609 100644 --- a/ppgan/modules/first_order.py +++ b/ppgan/modules/first_order.py @@ -468,7 +468,8 @@ class AntiAliasInterpolation2d(nn.Layer): inv_scale = 1 / self.scale int_inv_scale = int(inv_scale) assert (inv_scale == int_inv_scale) - #out = out[:, :, ::int_inv_scale, ::int_inv_scale] + # lite: fluid resize_nearest + # out = paddle.fluid.layers.resize_nearest(out, scale=self.scale) + out = out[:, :, ::int_inv_scale, ::int_inv_scale] # patch end - out = paddle.fluid.layers.resize_nearest(out, scale=self.scale) return out diff --git a/tools/fom_infer.py b/tools/fom_infer.py index e664ec38ed0692816cc6fc9b68d161d782bc02a5..6ad7d4ef2480a103240bcb76b16b073539651dc4 100644 --- a/tools/fom_infer.py +++ b/tools/fom_infer.py @@ -5,7 +5,6 @@ import cv2 import imageio import time from tqdm import tqdm -import paddle.fluid as fluid import os from functools import reduce import paddle @@ -99,11 +98,11 @@ def main(): driving_paths = [driving_path] # 创建 config - kp_detector_config = paddle_infer.Config(os.path.join( - args.model_path, "kp_detector.pdmodel"), + kp_detector_config = paddle_infer.Config( + os.path.join(args.model_path, "kp_detector.pdmodel"), os.path.join(args.model_path, "kp_detector.pdiparams")) - generator_config = paddle_infer.Config(os.path.join( - args.model_path, "generator.pdmodel"), + generator_config = paddle_infer.Config( + os.path.join(args.model_path, "generator.pdmodel"), os.path.join(args.model_path, "generator.pdiparams")) if args.device == "gpu": kp_detector_config.enable_use_gpu(100, 0) @@ -194,11 +193,12 @@ def main(): generator_output_handle = generator_predictor.get_output_handle( generator_output_names[0]) output_data = generator_output_handle.copy_to_cpu() - loss = paddle.abs(paddle.to_tensor(output_data) - - paddle.to_tensor(driving_video[i])).mean().cpu().numpy() + loss = paddle.abs( + paddle.to_tensor(output_data) - + paddle.to_tensor(driving_video[i])).mean().cpu().numpy() test_loss.append(loss) output_data = np.transpose(output_data, [0, 2, 3, 1])[0] * 255.0 - + #Todo:add blazeface static model #frame = source_img.copy() #frame[left:right, up:bottom] = cv2.resize(output_data.astype(np.uint8), (bottom - up, right - left), cv2.INTER_AREA) @@ -210,8 +210,7 @@ def main(): fps=fps) metric_file = os.path.join(args.output_path, "metric.txt") log_file = open(metric_file, 'a') - loss_string = "Metric {}: {:.4f}".format( - "l1 loss", np.mean(test_loss)) + loss_string = "Metric {}: {:.4f}".format("l1 loss", np.mean(test_loss)) log_file.close()