diff --git a/modules/image/image_processing/prnet/README.md b/modules/image/image_processing/prnet/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..bcd25aa999afcf08890d9fd83b6bcb5cf3fdc674
--- /dev/null
+++ b/modules/image/image_processing/prnet/README.md
@@ -0,0 +1,151 @@
+# prnet
+
+|模型名称|prnet|
+| :--- | :---: |
+|类别|图像 - 图像生成|
+|网络|PRN|
+|数据集|300W-LP|
+|是否支持Fine-tuning|否|
+|模型大小|154MB|
+|最新更新日期|2021-11-20|
+|数据指标|-|
+
+
+## 一、模型基本信息
+
+- ### 应用效果展示
+ - 样例结果示例:
+
+
+
+ 输入原图像
+
+
+
+ 输入参考图像
+
+
+
+ 输出图像
+
+
+
+- ### 模型介绍
+
+ - PRNet提出一种方法同时重建3D的脸部结构和脸部对齐,可应用于脸部对齐、3D脸重建、脸部纹理编辑等任务。该模块引入了脸部纹理编辑的功能,可以将参考图像的脸部纹理转移到原图像上。
+
+ - 更多详情参考:[Joint 3D Face Reconstruction and Dense Alignment with Position Map Regression Network](https://arxiv.org/pdf/1803.07835.pdf)
+
+
+
+## 二、安装
+
+- ### 1、环境依赖
+ - dlib
+ - scikit-image
+
+- ### 2、安装
+
+ - ```shell
+ $ hub install prnet
+ ```
+ - 如您安装时遇到问题,可参考:[零基础windows安装](../../../../docs/docs_ch/get_start/windows_quickstart.md)
+ | [零基础Linux安装](../../../../docs/docs_ch/get_start/linux_quickstart.md) | [零基础MacOS安装](../../../../docs/docs_ch/get_start/mac_quickstart.md)
+
+## 三、模型API预测
+
+- ### 1、命令行预测
+
+ - ```shell
+ $ hub run prnet --source "/PATH/TO/IMAGE1" --ref "/PATH/TO/IMAGE2"
+ ```
+ - 通过命令行方式实现脸部纹理编辑的调用,更多请见 [PaddleHub命令行指令](../../../../docs/docs_ch/tutorial/cmd_usage.rst)
+
+- ### 2、预测代码示例
+
+ - ```python
+ import paddlehub as hub
+ module = hub.Module(name="prnet")
+ source_path = "/PATH/TO/IMAGE1"
+ ref_path = "/PATH/TO/IMAGE2"
+ module.face_swap(paths=[{'source':input_path, 'ref':ref_path}],
+ mode = 0,
+ output_dir='./swapping_result/',
+ use_gpu=True,
+ visualization=True)
+ ```
+
+- ### 3、API
+
+ - ```python
+ def face_swap(self,
+ images=None,
+ paths=None,
+ mode = 0,
+ output_dir='./swapping_result/',
+ use_gpu=False,
+ visualization=True):
+ ```
+ - 脸部纹理编辑API,将参考图像的脸部纹理转移到原图像上。
+
+ - **参数**
+ - images (list[dict]): data of images, 每一个元素都为一个 dict,有关键字 source, ref, 相应取值为:
+ - source (numpy.ndarray): 待转换的图片,shape 为 \[H, W, C\],BGR格式;
+ - ref (numpy.ndarray) : 参考图像,shape为 \[H, W, C\],BGR格式;
+ - paths (list[str]): paths to images, 每一个元素都为一个dict, 有关键字 source, ref, 相应取值为:
+ - source (str): 待转换的图片的路径;
+ - ref (str) : 参考图像的路径;
+ - mode(int): option, 0表示改变局部纹理, 1表示改变整个脸;
+ - output\_dir (str): 结果保存的路径;
+ - use\_gpu (bool): 是否使用 GPU;
+ - visualization(bool): 是否保存结果到本地文件夹
+
+## 四、服务部署
+
+- PaddleHub Serving可以部署一个在线图像风格转换服务。
+
+- ### 第一步:启动PaddleHub Serving
+
+ - 运行启动命令:
+ - ```shell
+ $ hub serving start -m prnet
+ ```
+
+ - 这样就完成了一个图像风格转换的在线服务API的部署,默认端口号为8866。
+
+ - **NOTE:** 如使用GPU预测,则需要在启动服务之前,请设置CUDA\_VISIBLE\_DEVICES环境变量,否则不用设置。
+
+- ### 第二步:发送预测请求
+
+ - 配置好服务端,以下数行代码即可实现发送预测请求,获取预测结果
+
+ - ```python
+ import requests
+ import json
+ import rawpy
+ import base64
+
+
+ def cv2_to_base64(image):
+ data = cv2.imencode('.jpg', image)[1]
+ return base64.b64encode(data.tostring()).decode('utf8')
+
+ # 发送HTTP请求
+ data = {'images':[{'source': cv2_to_base64(cv2.imread("/PATH/TO/IMAGE1")), 'ref':cv2_to_base64(cv2.imread("/PATH/TO/IMAGE2"))}]}
+ headers = {"Content-type": "application/json"}
+ url = "http://127.0.0.1:8866/predict/prnet/"
+ r = requests.post(url=url, headers=headers, data=json.dumps(data))
+
+ # 打印预测结果
+ print(r.json()["results"])
+
+
+## 五、更新历史
+
+* 1.0.0
+
+ 初始发布
+
+ - ```shell
+ $ hub install prnet==1.0.0
+ ```
diff --git a/modules/image/image_processing/prnet/api.py b/modules/image/image_processing/prnet/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..6da6676915bc6d15da9d5f004716abbe7186cd2e
--- /dev/null
+++ b/modules/image/image_processing/prnet/api.py
@@ -0,0 +1,191 @@
+import numpy as np
+import os
+
+from skimage.io import imread, imsave
+from skimage.transform import estimate_transform, warp
+from time import time
+import paddle
+
+from .predictor import PosPrediction
+
+
+class PRN:
+ ''' Joint 3D Face Reconstruction and Dense Alignment with Position Map Regression Network
+ Args:
+ is_dlib(bool, optional): If true, dlib is used for detecting faces.
+ prefix(str, optional): If run at another folder, the absolute path is needed to load the data.
+ '''
+
+ def __init__(self, is_dlib=False, prefix='.'):
+
+ # resolution of input and output image size.
+ self.resolution_inp = 256
+ self.resolution_op = 256
+
+ #---- load detectors
+ if is_dlib:
+ import dlib
+ detector_path = os.path.join(prefix, 'Data/net-data/mmod_human_face_detector.dat')
+ self.face_detector = dlib.cnn_face_detection_model_v1(detector_path)
+
+ #---- load PRN
+ params = paddle.load(os.path.join(prefix, "pd_model/model.pdparams"))
+ self.pos_predictor = PosPrediction(params, self.resolution_inp, self.resolution_op)
+
+ # uv file
+ self.uv_kpt_ind = np.loadtxt(os.path.join(prefix,
+ 'Data/uv-data/uv_kpt_ind.txt')).astype(np.int32) # 2 x 68 get kpt
+ self.face_ind = np.loadtxt(os.path.join(prefix, 'Data/uv-data/face_ind.txt')).astype(
+ np.int32) # get valid vertices in the pos map
+ self.triangles = np.loadtxt(os.path.join(prefix, 'Data/uv-data/triangles.txt')).astype(np.int32) # ntri x 3
+
+ self.uv_coords = self.generate_uv_coords()
+
+ def generate_uv_coords(self):
+ resolution = self.resolution_op
+ uv_coords = np.meshgrid(range(resolution), range(resolution))
+ uv_coords = np.transpose(np.array(uv_coords), [1, 2, 0])
+ uv_coords = np.reshape(uv_coords, [resolution**2, -1])
+ uv_coords = uv_coords[self.face_ind, :]
+ uv_coords = np.hstack((uv_coords[:, :2], np.zeros([uv_coords.shape[0], 1])))
+ return uv_coords
+
+ def dlib_detect(self, image):
+ return self.face_detector(image, 1)
+
+ def net_forward(self, image):
+ ''' The core of out method: regress the position map of a given image.
+ Args:
+ image: (256,256,3) array. value range: 0~1
+ Returns:
+ pos: the 3D position map. (256, 256, 3) array.
+ '''
+ return self.pos_predictor.predict(image)
+
+ def process(self, input, image_info=None):
+ ''' process image with crop operation.
+ Args:
+ input: (h,w,3) array or str(image path). image value range:1~255.
+ image_info(optional): the bounding box information of faces. if None, will use dlib to detect face.
+
+ Returns:
+ pos: the 3D position map. (256, 256, 3).
+ '''
+ if isinstance(input, str):
+ try:
+ image = imread(input)
+ except IOError:
+ print("error opening file: ", input)
+ return None
+ else:
+ image = input
+
+ if image.ndim < 3:
+ image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
+
+ if image_info is not None:
+ if np.max(image_info.shape) > 4: # key points to get bounding box
+ kpt = image_info
+ if kpt.shape[0] > 3:
+ kpt = kpt.T
+ left = np.min(kpt[0, :])
+ right = np.max(kpt[0, :])
+ top = np.min(kpt[1, :])
+ bottom = np.max(kpt[1, :])
+ else: # bounding box
+ bbox = image_info
+ left = bbox[0]
+ right = bbox[1]
+ top = bbox[2]
+ bottom = bbox[3]
+ old_size = (right - left + bottom - top) / 2
+ center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])
+ size = int(old_size * 1.6)
+ else:
+ detected_faces = self.dlib_detect(image)
+ if len(detected_faces) == 0:
+ print('warning: no detected face')
+ return None
+
+ d = detected_faces[
+ 0].rect ## only use the first detected face (assume that each input image only contains one face)
+ left = d.left()
+ right = d.right()
+ top = d.top()
+ bottom = d.bottom()
+ old_size = (right - left + bottom - top) / 2
+ center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size * 0.14])
+ size = int(old_size * 1.58)
+
+ # crop image
+ src_pts = np.array([[center[0] - size / 2, center[1] - size / 2], [center[0] - size / 2, center[1] + size / 2],
+ [center[0] + size / 2, center[1] - size / 2]])
+ DST_PTS = np.array([[0, 0], [0, self.resolution_inp - 1], [self.resolution_inp - 1, 0]])
+ tform = estimate_transform('similarity', src_pts, DST_PTS)
+
+ image = image / 255.
+ cropped_image = warp(image, tform.inverse, output_shape=(self.resolution_inp, self.resolution_inp))
+
+ # run our net
+ #st = time()
+ cropped_pos = self.net_forward(cropped_image)
+ #print 'net time:', time() - st
+
+ # restore
+ cropped_vertices = np.reshape(cropped_pos, [-1, 3]).T
+ z = cropped_vertices[2, :].copy() / tform.params[0, 0]
+ cropped_vertices[2, :] = 1
+ vertices = np.dot(np.linalg.inv(tform.params), cropped_vertices)
+ vertices = np.vstack((vertices[:2, :], z))
+ pos = np.reshape(vertices.T, [self.resolution_op, self.resolution_op, 3])
+
+ return pos
+
+ def get_landmarks(self, pos):
+ '''
+ Args:
+ pos: the 3D position map. shape = (256, 256, 3).
+ Returns:
+ kpt: 68 3D landmarks. shape = (68, 3).
+ '''
+ kpt = pos[self.uv_kpt_ind[1, :], self.uv_kpt_ind[0, :], :]
+ return kpt
+
+ def get_vertices(self, pos):
+ '''
+ Args:
+ pos: the 3D position map. shape = (256, 256, 3).
+ Returns:
+ vertices: the vertices(point cloud). shape = (num of points, 3). n is about 40K here.
+ '''
+ all_vertices = np.reshape(pos, [self.resolution_op**2, -1])
+ vertices = all_vertices[self.face_ind, :]
+
+ return vertices
+
+ def get_colors_from_texture(self, texture):
+ '''
+ Args:
+ texture: the texture map. shape = (256, 256, 3).
+ Returns:
+ colors: the corresponding colors of vertices. shape = (num of points, 3). n is 45128 here.
+ '''
+ all_colors = np.reshape(texture, [self.resolution_op**2, -1])
+ colors = all_colors[self.face_ind, :]
+
+ return colors
+
+ def get_colors(self, image, vertices):
+ '''
+ Args:
+ pos: the 3D position map. shape = (256, 256, 3).
+ Returns:
+ colors: the corresponding colors of vertices. shape = (num of points, 3). n is 45128 here.
+ '''
+ [h, w, _] = image.shape
+ vertices[:, 0] = np.minimum(np.maximum(vertices[:, 0], 0), w - 1) # x
+ vertices[:, 1] = np.minimum(np.maximum(vertices[:, 1], 0), h - 1) # y
+ ind = np.round(vertices).astype(np.int32)
+ colors = image[ind[:, 1], ind[:, 0], :] # n x 3
+
+ return colors
diff --git a/modules/image/image_processing/prnet/module.py b/modules/image/image_processing/prnet/module.py
new file mode 100644
index 0000000000000000000000000000000000000000..596cece246dfb1dd5716d6fcaccf7c7540354e81
--- /dev/null
+++ b/modules/image/image_processing/prnet/module.py
@@ -0,0 +1,226 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import argparse
+import copy
+
+import paddle
+import paddlehub as hub
+from paddlehub.module.module import moduleinfo, runnable, serving
+import numpy as np
+import cv2
+from skimage.io import imread
+from skimage.transform import rescale, resize
+
+from .util import base64_to_cv2
+from .predictor import PosPrediction
+from .utils.render import render_texture
+from .api import PRN
+
+
+@moduleinfo(name="prnet", type="CV/", author="paddlepaddle", author_email="", summary="", version="1.0.0")
+class PRNet:
+ def __init__(self):
+ self.pretrained_model = os.path.join(self.directory, "pd_model/model.pdparams")
+ self.network = PRN(is_dlib=True, prefix=self.directory)
+
+ def face_swap(self,
+ images=None,
+ paths=None,
+ mode=0,
+ output_dir='./swapping_result/',
+ use_gpu=False,
+ visualization=True):
+ '''
+ Denoise a raw image in the low-light scene.
+
+ images (list[dict]): data of images, 每一个元素都为一个 dict,有关键字 source, ref, 相应取值为:
+ - source (numpy.ndarray): 待转换的图片,shape 为 \[H, W, C\],BGR格式;
+ - ref (numpy.ndarray) : 参考图像,shape为 \[H, W, C\],BGR格式;
+ paths (list[str]): paths to images, 每一个元素都为一个dict, 有关键字 source, ref, 相应取值为:
+ - source (str): 待转换的图片的路径;
+ - ref (str) : 参考图像的路径;
+ mode: option, 0 for change part of texture, 1 for change whole face
+ output_dir: the dir to save the results
+ use_gpu: if True, use gpu to perform the computation, otherwise cpu.
+ visualization: if True, save results in output_dir.
+ '''
+ results = []
+ paddle.disable_static()
+ place = 'gpu:0' if use_gpu else 'cpu'
+ place = paddle.set_device(place)
+ if images == None and paths == None:
+ print('No image provided. Please input an image or a image path.')
+ return
+
+ if images != None:
+ for image_dict in images:
+ source_img = image_dict['source'][:, :, ::-1]
+ ref_img = image_dict['ref'][:, :, ::-1]
+ results.append(self.texture_editing(source_img, ref_img, mode))
+
+ if paths != None:
+ for path_dict in paths:
+ source_img = cv2.imread(path_dict['source'])[:, :, ::-1]
+ ref_img = cv2.imread(path_dict['ref'])[:, :, ::-1]
+ results.append(self.texture_editing(source_img, ref_img, mode))
+
+ if visualization == True:
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir, exist_ok=True)
+ for i, out in enumerate(results):
+ cv2.imwrite(os.path.join(output_dir, 'output_{}.png'.format(i)), out[:, :, ::-1])
+
+ return results
+
+ def texture_editing(self, source_img, ref_img, mode):
+ # read image
+ image = source_img
+ [h, w, _] = image.shape
+ prn = self.network
+ #-- 1. 3d reconstruction -> get texture.
+ pos = prn.process(image)
+ vertices = prn.get_vertices(pos)
+ image = image / 255.
+ texture = cv2.remap(
+ image,
+ pos[:, :, :2].astype(np.float32),
+ None,
+ interpolation=cv2.INTER_NEAREST,
+ borderMode=cv2.BORDER_CONSTANT,
+ borderValue=(0))
+
+ #-- 2. Texture Editing
+ Mode = mode
+ # change part of texture(for data augumentation/selfie editing. Here modify eyes for example)
+ if Mode == 0:
+ # load eye mask
+ uv_face_eye = imread(os.path.join(self.directory, 'Data/uv-data/uv_face_eyes.png'), as_gray=True) / 255.
+ uv_face = imread(os.path.join(self.directory, 'Data/uv-data/uv_face.png'), as_gray=True) / 255.
+ eye_mask = (abs(uv_face_eye - uv_face) > 0).astype(np.float32)
+
+ # texture from another image or a processed texture
+ ref_image = ref_img
+ ref_pos = prn.process(ref_image)
+ ref_image = ref_image / 255.
+ ref_texture = cv2.remap(
+ ref_image,
+ ref_pos[:, :, :2].astype(np.float32),
+ None,
+ interpolation=cv2.INTER_NEAREST,
+ borderMode=cv2.BORDER_CONSTANT,
+ borderValue=(0))
+
+ # modify texture
+ new_texture = texture * (1 - eye_mask[:, :, np.newaxis]) + ref_texture * eye_mask[:, :, np.newaxis]
+
+ # change whole face(face swap)
+ elif Mode == 1:
+ # texture from another image or a processed texture
+ ref_image = ref_img
+ ref_pos = prn.process(ref_image)
+ ref_image = ref_image / 255.
+ ref_texture = cv2.remap(
+ ref_image,
+ ref_pos[:, :, :2].astype(np.float32),
+ None,
+ interpolation=cv2.INTER_NEAREST,
+ borderMode=cv2.BORDER_CONSTANT,
+ borderValue=(0))
+ ref_vertices = prn.get_vertices(ref_pos)
+ new_texture = ref_texture #(texture + ref_texture)/2.
+
+ else:
+ print('Wrong Mode! Mode should be 0 or 1.')
+ exit()
+
+ #-- 3. remap to input image.(render)
+ vis_colors = np.ones((vertices.shape[0], 1))
+ face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c=1)
+ face_mask = np.squeeze(face_mask > 0).astype(np.float32)
+
+ new_colors = prn.get_colors_from_texture(new_texture)
+ new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c=3)
+ new_image = image * (1 - face_mask[:, :, np.newaxis]) + new_image * face_mask[:, :, np.newaxis]
+
+ # Possion Editing for blending image
+ vis_ind = np.argwhere(face_mask > 0)
+ vis_min = np.min(vis_ind, 0)
+ vis_max = np.max(vis_ind, 0)
+ center = (int((vis_min[1] + vis_max[1]) / 2 + 0.5), int((vis_min[0] + vis_max[0]) / 2 + 0.5))
+ output = cv2.seamlessClone((new_image * 255).astype(np.uint8), (image * 255).astype(np.uint8),
+ (face_mask * 255).astype(np.uint8), center, cv2.NORMAL_CLONE)
+
+ return output
+
+ @runnable
+ def run_cmd(self, argvs: list):
+ """
+ Run as a command.
+ """
+ self.parser = argparse.ArgumentParser(
+ description="Run the {} module.".format(self.name),
+ prog='hub run {}'.format(self.name),
+ usage='%(prog)s',
+ add_help=True)
+
+ self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
+ self.arg_config_group = self.parser.add_argument_group(
+ title="Config options", description="Run configuration for controlling module behavior, not required.")
+ self.add_module_config_arg()
+ self.add_module_input_arg()
+ self.args = self.parser.parse_args(argvs)
+
+ self.face_swap(
+ paths=[{
+ 'source': self.args.source,
+ 'ref': self.args.ref
+ }],
+ mode=self.args.mode,
+ output_dir=self.args.output_dir,
+ use_gpu=self.args.use_gpu,
+ visualization=self.args.visualization)
+
+ @serving
+ def serving_method(self, images, **kwargs):
+ """
+ Run as a service.
+ """
+ images_decode = copy.deepcopy(images)
+ for image in images_decode:
+ image['source'] = base64_to_cv2(image['source'])
+ image['ref'] = base64_to_cv2(image['ref'])
+ results = self.face_swap(images_decode, **kwargs)
+ tolist = [result.tolist() for result in results]
+ return tolist
+
+ def add_module_config_arg(self):
+ """
+ Add the command config options.
+ """
+ self.arg_config_group.add_argument(
+ '--mode', type=int, default=0, help='process option, 0 for part texture, 1 for whole face.', choices=[0, 1])
+ self.arg_config_group.add_argument('--use_gpu', action='store_true', help="use GPU or not")
+
+ self.arg_config_group.add_argument(
+ '--output_dir', type=str, default='swapping_result', help='output directory for saving result.')
+ self.arg_config_group.add_argument('--visualization', type=bool, default=False, help='save results or not.')
+
+ def add_module_input_arg(self):
+ """
+ Add the command input options.
+ """
+ self.arg_input_group.add_argument('--source', type=str, help="path to source image.")
+ self.arg_input_group.add_argument('--ref', type=str, help="path to reference image.")
diff --git a/modules/image/image_processing/prnet/pd_model/x2paddle_code.py b/modules/image/image_processing/prnet/pd_model/x2paddle_code.py
new file mode 100755
index 0000000000000000000000000000000000000000..c1a3e9af6f8f4f2d05459d734bd63fb11965307d
--- /dev/null
+++ b/modules/image/image_processing/prnet/pd_model/x2paddle_code.py
@@ -0,0 +1,1547 @@
+import paddle
+import math
+
+
+class TFModel(paddle.nn.Layer):
+ def __init__(self):
+ super(TFModel, self).__init__()
+ self.conv0 = paddle.nn.Conv2D(
+ weight_attr='conv0.weight',
+ bias_attr=False,
+ in_channels=3,
+ out_channels=16,
+ kernel_size=[4, 4],
+ padding='SAME')
+ self.bn0 = paddle.nn.BatchNorm(
+ num_channels=16,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_Conv_BatchNorm_FusedBatchNorm_resfcn256_Conv_BatchNorm_gamma',
+ bias_attr='resfcn256_Conv_BatchNorm_FusedBatchNorm_resfcn256_Conv_BatchNorm_beta',
+ moving_mean_name='resfcn256_Conv_BatchNorm_FusedBatchNorm_resfcn256_Conv_BatchNorm_moving_mean',
+ moving_variance_name='resfcn256_Conv_BatchNorm_FusedBatchNorm_resfcn256_Conv_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu0 = paddle.nn.ReLU()
+ self.conv1 = paddle.nn.Conv2D(
+ weight_attr='conv1.weight',
+ bias_attr=False,
+ in_channels=16,
+ out_channels=32,
+ kernel_size=[1, 1],
+ stride=2,
+ padding='SAME')
+ self.conv2 = paddle.nn.Conv2D(
+ weight_attr='conv2.weight',
+ bias_attr=False,
+ in_channels=16,
+ out_channels=16,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn1 = paddle.nn.BatchNorm(
+ num_channels=16,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_Conv_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_Conv_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_Conv_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_Conv_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu1 = paddle.nn.ReLU()
+ self.conv3 = paddle.nn.Conv2D(
+ weight_attr='conv3.weight',
+ bias_attr=False,
+ in_channels=16,
+ out_channels=16,
+ kernel_size=[4, 4],
+ stride=2,
+ padding='SAME')
+ self.bn2 = paddle.nn.BatchNorm(
+ num_channels=16,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_Conv_1_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_Conv_1_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_Conv_1_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_Conv_1_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu2 = paddle.nn.ReLU()
+ self.conv4 = paddle.nn.Conv2D(
+ weight_attr='conv4.weight',
+ bias_attr=False,
+ in_channels=16,
+ out_channels=32,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn3 = paddle.nn.BatchNorm(
+ num_channels=32,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_BatchNorm_FusedBatchNorm_resfcn256_resBlock_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_BatchNorm_FusedBatchNorm_resfcn256_resBlock_BatchNorm_beta',
+ moving_mean_name='resfcn256_resBlock_BatchNorm_FusedBatchNorm_resfcn256_resBlock_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_BatchNorm_FusedBatchNorm_resfcn256_resBlock_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu3 = paddle.nn.ReLU()
+ self.conv5 = paddle.nn.Conv2D(
+ weight_attr='conv5.weight',
+ bias_attr=False,
+ in_channels=32,
+ out_channels=16,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn4 = paddle.nn.BatchNorm(
+ num_channels=16,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_1_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_1_Conv_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_1_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_1_Conv_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_1_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_1_Conv_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_1_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_1_Conv_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu4 = paddle.nn.ReLU()
+ self.conv6 = paddle.nn.Conv2D(
+ weight_attr='conv6.weight',
+ bias_attr=False,
+ in_channels=16,
+ out_channels=16,
+ kernel_size=[4, 4],
+ padding='SAME')
+ self.bn5 = paddle.nn.BatchNorm(
+ num_channels=16,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_resBlock_1_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_1_Conv_1_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_1_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_1_Conv_1_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_1_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_1_Conv_1_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_1_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_1_Conv_1_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu5 = paddle.nn.ReLU()
+ self.conv7 = paddle.nn.Conv2D(
+ weight_attr='conv7.weight',
+ bias_attr=False,
+ in_channels=16,
+ out_channels=32,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn6 = paddle.nn.BatchNorm(
+ num_channels=32,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_1_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_1_BatchNorm_beta',
+ moving_mean_name='resfcn256_resBlock_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_1_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_1_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu6 = paddle.nn.ReLU()
+ self.conv8 = paddle.nn.Conv2D(
+ weight_attr='conv8.weight',
+ bias_attr=False,
+ in_channels=32,
+ out_channels=64,
+ kernel_size=[1, 1],
+ stride=2,
+ padding='SAME')
+ self.conv9 = paddle.nn.Conv2D(
+ weight_attr='conv9.weight',
+ bias_attr=False,
+ in_channels=32,
+ out_channels=32,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn7 = paddle.nn.BatchNorm(
+ num_channels=32,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_2_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_2_Conv_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_2_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_2_Conv_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_2_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_2_Conv_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_2_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_2_Conv_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu7 = paddle.nn.ReLU()
+ self.conv10 = paddle.nn.Conv2D(
+ weight_attr='conv10.weight',
+ bias_attr=False,
+ in_channels=32,
+ out_channels=32,
+ kernel_size=[4, 4],
+ stride=2,
+ padding='SAME')
+ self.bn8 = paddle.nn.BatchNorm(
+ num_channels=32,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_resBlock_2_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_2_Conv_1_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_2_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_2_Conv_1_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_2_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_2_Conv_1_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_2_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_2_Conv_1_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu8 = paddle.nn.ReLU()
+ self.conv11 = paddle.nn.Conv2D(
+ weight_attr='conv11.weight',
+ bias_attr=False,
+ in_channels=32,
+ out_channels=64,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn9 = paddle.nn.BatchNorm(
+ num_channels=64,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_2_BatchNorm_FusedBatchNorm_resfcn256_resBlock_2_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_2_BatchNorm_FusedBatchNorm_resfcn256_resBlock_2_BatchNorm_beta',
+ moving_mean_name='resfcn256_resBlock_2_BatchNorm_FusedBatchNorm_resfcn256_resBlock_2_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_2_BatchNorm_FusedBatchNorm_resfcn256_resBlock_2_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu9 = paddle.nn.ReLU()
+ self.conv12 = paddle.nn.Conv2D(
+ weight_attr='conv12.weight',
+ bias_attr=False,
+ in_channels=64,
+ out_channels=32,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn10 = paddle.nn.BatchNorm(
+ num_channels=32,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_3_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_3_Conv_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_3_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_3_Conv_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_3_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_3_Conv_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_3_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_3_Conv_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu10 = paddle.nn.ReLU()
+ self.conv13 = paddle.nn.Conv2D(
+ weight_attr='conv13.weight',
+ bias_attr=False,
+ in_channels=32,
+ out_channels=32,
+ kernel_size=[4, 4],
+ padding='SAME')
+ self.bn11 = paddle.nn.BatchNorm(
+ num_channels=32,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_resBlock_3_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_3_Conv_1_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_3_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_3_Conv_1_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_3_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_3_Conv_1_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_3_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_3_Conv_1_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu11 = paddle.nn.ReLU()
+ self.conv14 = paddle.nn.Conv2D(
+ weight_attr='conv14.weight',
+ bias_attr=False,
+ in_channels=32,
+ out_channels=64,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn12 = paddle.nn.BatchNorm(
+ num_channels=64,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_3_BatchNorm_FusedBatchNorm_resfcn256_resBlock_3_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_3_BatchNorm_FusedBatchNorm_resfcn256_resBlock_3_BatchNorm_beta',
+ moving_mean_name='resfcn256_resBlock_3_BatchNorm_FusedBatchNorm_resfcn256_resBlock_3_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_3_BatchNorm_FusedBatchNorm_resfcn256_resBlock_3_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu12 = paddle.nn.ReLU()
+ self.conv15 = paddle.nn.Conv2D(
+ weight_attr='conv15.weight',
+ bias_attr=False,
+ in_channels=64,
+ out_channels=128,
+ kernel_size=[1, 1],
+ stride=2,
+ padding='SAME')
+ self.conv16 = paddle.nn.Conv2D(
+ weight_attr='conv16.weight',
+ bias_attr=False,
+ in_channels=64,
+ out_channels=64,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn13 = paddle.nn.BatchNorm(
+ num_channels=64,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_4_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_4_Conv_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_4_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_4_Conv_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_4_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_4_Conv_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_4_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_4_Conv_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu13 = paddle.nn.ReLU()
+ self.conv17 = paddle.nn.Conv2D(
+ weight_attr='conv17.weight',
+ bias_attr=False,
+ in_channels=64,
+ out_channels=64,
+ kernel_size=[4, 4],
+ stride=2,
+ padding='SAME')
+ self.bn14 = paddle.nn.BatchNorm(
+ num_channels=64,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_resBlock_4_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_4_Conv_1_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_4_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_4_Conv_1_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_4_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_4_Conv_1_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_4_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_4_Conv_1_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu14 = paddle.nn.ReLU()
+ self.conv18 = paddle.nn.Conv2D(
+ weight_attr='conv18.weight',
+ bias_attr=False,
+ in_channels=64,
+ out_channels=128,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn15 = paddle.nn.BatchNorm(
+ num_channels=128,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_4_BatchNorm_FusedBatchNorm_resfcn256_resBlock_4_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_4_BatchNorm_FusedBatchNorm_resfcn256_resBlock_4_BatchNorm_beta',
+ moving_mean_name='resfcn256_resBlock_4_BatchNorm_FusedBatchNorm_resfcn256_resBlock_4_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_4_BatchNorm_FusedBatchNorm_resfcn256_resBlock_4_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu15 = paddle.nn.ReLU()
+ self.conv19 = paddle.nn.Conv2D(
+ weight_attr='conv19.weight',
+ bias_attr=False,
+ in_channels=128,
+ out_channels=64,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn16 = paddle.nn.BatchNorm(
+ num_channels=64,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_5_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_5_Conv_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_5_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_5_Conv_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_5_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_5_Conv_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_5_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_5_Conv_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu16 = paddle.nn.ReLU()
+ self.conv20 = paddle.nn.Conv2D(
+ weight_attr='conv20.weight',
+ bias_attr=False,
+ in_channels=64,
+ out_channels=64,
+ kernel_size=[4, 4],
+ padding='SAME')
+ self.bn17 = paddle.nn.BatchNorm(
+ num_channels=64,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_resBlock_5_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_5_Conv_1_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_5_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_5_Conv_1_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_5_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_5_Conv_1_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_5_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_5_Conv_1_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu17 = paddle.nn.ReLU()
+ self.conv21 = paddle.nn.Conv2D(
+ weight_attr='conv21.weight',
+ bias_attr=False,
+ in_channels=64,
+ out_channels=128,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn18 = paddle.nn.BatchNorm(
+ num_channels=128,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_5_BatchNorm_FusedBatchNorm_resfcn256_resBlock_5_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_5_BatchNorm_FusedBatchNorm_resfcn256_resBlock_5_BatchNorm_beta',
+ moving_mean_name='resfcn256_resBlock_5_BatchNorm_FusedBatchNorm_resfcn256_resBlock_5_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_5_BatchNorm_FusedBatchNorm_resfcn256_resBlock_5_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu18 = paddle.nn.ReLU()
+ self.conv22 = paddle.nn.Conv2D(
+ weight_attr='conv22.weight',
+ bias_attr=False,
+ in_channels=128,
+ out_channels=256,
+ kernel_size=[1, 1],
+ stride=2,
+ padding='SAME')
+ self.conv23 = paddle.nn.Conv2D(
+ weight_attr='conv23.weight',
+ bias_attr=False,
+ in_channels=128,
+ out_channels=128,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn19 = paddle.nn.BatchNorm(
+ num_channels=128,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_6_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_6_Conv_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_6_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_6_Conv_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_6_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_6_Conv_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_6_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_6_Conv_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu19 = paddle.nn.ReLU()
+ self.conv24 = paddle.nn.Conv2D(
+ weight_attr='conv24.weight',
+ bias_attr=False,
+ in_channels=128,
+ out_channels=128,
+ kernel_size=[4, 4],
+ stride=2,
+ padding='SAME')
+ self.bn20 = paddle.nn.BatchNorm(
+ num_channels=128,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_resBlock_6_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_6_Conv_1_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_6_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_6_Conv_1_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_6_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_6_Conv_1_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_6_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_6_Conv_1_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu20 = paddle.nn.ReLU()
+ self.conv25 = paddle.nn.Conv2D(
+ weight_attr='conv25.weight',
+ bias_attr=False,
+ in_channels=128,
+ out_channels=256,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn21 = paddle.nn.BatchNorm(
+ num_channels=256,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_6_BatchNorm_FusedBatchNorm_resfcn256_resBlock_6_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_6_BatchNorm_FusedBatchNorm_resfcn256_resBlock_6_BatchNorm_beta',
+ moving_mean_name='resfcn256_resBlock_6_BatchNorm_FusedBatchNorm_resfcn256_resBlock_6_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_6_BatchNorm_FusedBatchNorm_resfcn256_resBlock_6_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu21 = paddle.nn.ReLU()
+ self.conv26 = paddle.nn.Conv2D(
+ weight_attr='conv26.weight',
+ bias_attr=False,
+ in_channels=256,
+ out_channels=128,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn22 = paddle.nn.BatchNorm(
+ num_channels=128,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_7_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_7_Conv_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_7_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_7_Conv_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_7_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_7_Conv_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_7_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_7_Conv_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu22 = paddle.nn.ReLU()
+ self.conv27 = paddle.nn.Conv2D(
+ weight_attr='conv27.weight',
+ bias_attr=False,
+ in_channels=128,
+ out_channels=128,
+ kernel_size=[4, 4],
+ padding='SAME')
+ self.bn23 = paddle.nn.BatchNorm(
+ num_channels=128,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_resBlock_7_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_7_Conv_1_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_7_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_7_Conv_1_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_7_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_7_Conv_1_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_7_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_7_Conv_1_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu23 = paddle.nn.ReLU()
+ self.conv28 = paddle.nn.Conv2D(
+ weight_attr='conv28.weight',
+ bias_attr=False,
+ in_channels=128,
+ out_channels=256,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn24 = paddle.nn.BatchNorm(
+ num_channels=256,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_7_BatchNorm_FusedBatchNorm_resfcn256_resBlock_7_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_7_BatchNorm_FusedBatchNorm_resfcn256_resBlock_7_BatchNorm_beta',
+ moving_mean_name='resfcn256_resBlock_7_BatchNorm_FusedBatchNorm_resfcn256_resBlock_7_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_7_BatchNorm_FusedBatchNorm_resfcn256_resBlock_7_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu24 = paddle.nn.ReLU()
+ self.conv29 = paddle.nn.Conv2D(
+ weight_attr='conv29.weight',
+ bias_attr=False,
+ in_channels=256,
+ out_channels=512,
+ kernel_size=[1, 1],
+ stride=2,
+ padding='SAME')
+ self.conv30 = paddle.nn.Conv2D(
+ weight_attr='conv30.weight',
+ bias_attr=False,
+ in_channels=256,
+ out_channels=256,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn25 = paddle.nn.BatchNorm(
+ num_channels=256,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_8_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_8_Conv_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_8_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_8_Conv_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_8_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_8_Conv_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_8_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_8_Conv_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu25 = paddle.nn.ReLU()
+ self.conv31 = paddle.nn.Conv2D(
+ weight_attr='conv31.weight',
+ bias_attr=False,
+ in_channels=256,
+ out_channels=256,
+ kernel_size=[4, 4],
+ stride=2,
+ padding='SAME')
+ self.bn26 = paddle.nn.BatchNorm(
+ num_channels=256,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_resBlock_8_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_8_Conv_1_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_8_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_8_Conv_1_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_8_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_8_Conv_1_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_8_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_8_Conv_1_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu26 = paddle.nn.ReLU()
+ self.conv32 = paddle.nn.Conv2D(
+ weight_attr='conv32.weight',
+ bias_attr=False,
+ in_channels=256,
+ out_channels=512,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn27 = paddle.nn.BatchNorm(
+ num_channels=512,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_8_BatchNorm_FusedBatchNorm_resfcn256_resBlock_8_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_8_BatchNorm_FusedBatchNorm_resfcn256_resBlock_8_BatchNorm_beta',
+ moving_mean_name='resfcn256_resBlock_8_BatchNorm_FusedBatchNorm_resfcn256_resBlock_8_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_8_BatchNorm_FusedBatchNorm_resfcn256_resBlock_8_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu27 = paddle.nn.ReLU()
+ self.conv33 = paddle.nn.Conv2D(
+ weight_attr='conv33.weight',
+ bias_attr=False,
+ in_channels=512,
+ out_channels=256,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn28 = paddle.nn.BatchNorm(
+ num_channels=256,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_9_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_9_Conv_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_9_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_9_Conv_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_9_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_9_Conv_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_9_Conv_BatchNorm_FusedBatchNorm_resfcn256_resBlock_9_Conv_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu28 = paddle.nn.ReLU()
+ self.conv34 = paddle.nn.Conv2D(
+ weight_attr='conv34.weight',
+ bias_attr=False,
+ in_channels=256,
+ out_channels=256,
+ kernel_size=[4, 4],
+ padding='SAME')
+ self.bn29 = paddle.nn.BatchNorm(
+ num_channels=256,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_resBlock_9_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_9_Conv_1_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_9_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_9_Conv_1_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_resBlock_9_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_9_Conv_1_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_9_Conv_1_BatchNorm_FusedBatchNorm_resfcn256_resBlock_9_Conv_1_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu29 = paddle.nn.ReLU()
+ self.conv35 = paddle.nn.Conv2D(
+ weight_attr='conv35.weight',
+ bias_attr=False,
+ in_channels=256,
+ out_channels=512,
+ kernel_size=[1, 1],
+ padding='SAME')
+ self.bn30 = paddle.nn.BatchNorm(
+ num_channels=512,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_resBlock_9_BatchNorm_FusedBatchNorm_resfcn256_resBlock_9_BatchNorm_gamma',
+ bias_attr='resfcn256_resBlock_9_BatchNorm_FusedBatchNorm_resfcn256_resBlock_9_BatchNorm_beta',
+ moving_mean_name='resfcn256_resBlock_9_BatchNorm_FusedBatchNorm_resfcn256_resBlock_9_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_resBlock_9_BatchNorm_FusedBatchNorm_resfcn256_resBlock_9_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu30 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_conv2d_transpose_conv36_weight = self.create_parameter(
+ shape=(512, 512, 4, 4), attr='conv36.weight')
+ self.bn31 = paddle.nn.BatchNorm(
+ num_channels=512,
+ epsilon=0.0010000000474974513,
+ param_attr='resfcn256_Conv2d_transpose_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_BatchNorm_gamma',
+ bias_attr='resfcn256_Conv2d_transpose_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu31 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_1_conv2d_transpose_conv37_weight = self.create_parameter(
+ shape=(512, 256, 4, 4), attr='conv37.weight')
+ self.bn32 = paddle.nn.BatchNorm(
+ num_channels=256,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_1_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_1_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_1_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_1_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_1_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_1_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_1_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_1_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu32 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_2_conv2d_transpose_conv38_weight = self.create_parameter(
+ shape=(256, 256, 4, 4), attr='conv38.weight')
+ self.bn33 = paddle.nn.BatchNorm(
+ num_channels=256,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_2_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_2_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_2_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_2_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_2_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_2_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_2_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_2_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu33 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_3_conv2d_transpose_conv39_weight = self.create_parameter(
+ shape=(256, 256, 4, 4), attr='conv39.weight')
+ self.bn34 = paddle.nn.BatchNorm(
+ num_channels=256,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_3_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_3_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_3_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_3_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_3_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_3_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_3_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_3_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu34 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_4_conv2d_transpose_conv40_weight = self.create_parameter(
+ shape=(256, 128, 4, 4), attr='conv40.weight')
+ self.bn35 = paddle.nn.BatchNorm(
+ num_channels=128,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_4_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_4_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_4_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_4_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_4_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_4_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_4_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_4_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu35 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_5_conv2d_transpose_conv41_weight = self.create_parameter(
+ shape=(128, 128, 4, 4), attr='conv41.weight')
+ self.bn36 = paddle.nn.BatchNorm(
+ num_channels=128,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_5_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_5_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_5_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_5_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_5_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_5_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_5_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_5_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu36 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_6_conv2d_transpose_conv42_weight = self.create_parameter(
+ shape=(128, 128, 4, 4), attr='conv42.weight')
+ self.bn37 = paddle.nn.BatchNorm(
+ num_channels=128,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_6_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_6_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_6_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_6_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_6_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_6_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_6_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_6_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu37 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_7_conv2d_transpose_conv43_weight = self.create_parameter(
+ shape=(128, 64, 4, 4), attr='conv43.weight')
+ self.bn38 = paddle.nn.BatchNorm(
+ num_channels=64,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_7_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_7_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_7_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_7_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_7_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_7_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_7_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_7_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu38 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_8_conv2d_transpose_conv44_weight = self.create_parameter(
+ shape=(64, 64, 4, 4), attr='conv44.weight')
+ self.bn39 = paddle.nn.BatchNorm(
+ num_channels=64,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_8_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_8_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_8_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_8_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_8_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_8_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_8_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_8_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu39 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_9_conv2d_transpose_conv45_weight = self.create_parameter(
+ shape=(64, 64, 4, 4), attr='conv45.weight')
+ self.bn40 = paddle.nn.BatchNorm(
+ num_channels=64,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_9_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_9_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_9_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_9_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_9_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_9_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_9_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_9_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu40 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_10_conv2d_transpose_conv46_weight = self.create_parameter(
+ shape=(64, 32, 4, 4), attr='conv46.weight')
+ self.bn41 = paddle.nn.BatchNorm(
+ num_channels=32,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_10_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_10_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_10_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_10_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_10_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_10_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_10_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_10_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu41 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_11_conv2d_transpose_conv47_weight = self.create_parameter(
+ shape=(32, 32, 4, 4), attr='conv47.weight')
+ self.bn42 = paddle.nn.BatchNorm(
+ num_channels=32,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_11_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_11_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_11_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_11_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_11_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_11_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_11_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_11_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu42 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_12_conv2d_transpose_conv48_weight = self.create_parameter(
+ shape=(32, 16, 4, 4), attr='conv48.weight')
+ self.bn43 = paddle.nn.BatchNorm(
+ num_channels=16,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_12_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_12_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_12_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_12_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_12_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_12_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_12_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_12_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu43 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_13_conv2d_transpose_conv49_weight = self.create_parameter(
+ shape=(16, 16, 4, 4), attr='conv49.weight')
+ self.bn44 = paddle.nn.BatchNorm(
+ num_channels=16,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_13_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_13_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_13_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_13_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_13_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_13_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_13_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_13_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu44 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_14_conv2d_transpose_conv50_weight = self.create_parameter(
+ shape=(16, 3, 4, 4), attr='conv50.weight')
+ self.bn45 = paddle.nn.BatchNorm(
+ num_channels=3,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_14_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_14_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_14_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_14_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_14_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_14_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_14_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_14_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu45 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_15_conv2d_transpose_conv51_weight = self.create_parameter(
+ shape=(3, 3, 4, 4), attr='conv51.weight')
+ self.bn46 = paddle.nn.BatchNorm(
+ num_channels=3,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_15_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_15_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_15_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_15_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_15_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_15_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_15_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_15_BatchNorm_moving_variance',
+ is_test=True)
+ self.relu46 = paddle.nn.ReLU()
+ self.resfcn256_Conv2d_transpose_16_conv2d_transpose_conv52_weight = self.create_parameter(
+ shape=(3, 3, 4, 4), attr='conv52.weight')
+ self.bn47 = paddle.nn.BatchNorm(
+ num_channels=3,
+ epsilon=0.0010000000474974513,
+ param_attr=
+ 'resfcn256_Conv2d_transpose_16_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_16_BatchNorm_gamma',
+ bias_attr=
+ 'resfcn256_Conv2d_transpose_16_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_16_BatchNorm_beta',
+ moving_mean_name=
+ 'resfcn256_Conv2d_transpose_16_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_16_BatchNorm_moving_mean',
+ moving_variance_name=
+ 'resfcn256_Conv2d_transpose_16_BatchNorm_FusedBatchNorm_resfcn256_Conv2d_transpose_16_BatchNorm_moving_variance',
+ is_test=True)
+ self.sigmoid0 = paddle.nn.Sigmoid()
+
+ def forward(self, Placeholder):
+ resfcn256_Conv2d_transpose_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=512)
+ resfcn256_Conv2d_transpose_1_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=2)
+ resfcn256_Conv2d_transpose_1_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=2)
+ resfcn256_Conv2d_transpose_1_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=256)
+ resfcn256_Conv2d_transpose_2_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_2_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_2_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=256)
+ resfcn256_Conv2d_transpose_3_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_3_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_3_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=256)
+ resfcn256_Conv2d_transpose_4_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=2)
+ resfcn256_Conv2d_transpose_4_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=2)
+ resfcn256_Conv2d_transpose_4_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=128)
+ resfcn256_Conv2d_transpose_5_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_5_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_5_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=128)
+ resfcn256_Conv2d_transpose_6_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_6_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_6_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=128)
+ resfcn256_Conv2d_transpose_7_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=2)
+ resfcn256_Conv2d_transpose_7_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=2)
+ resfcn256_Conv2d_transpose_7_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=64)
+ resfcn256_Conv2d_transpose_8_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_8_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_8_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=64)
+ resfcn256_Conv2d_transpose_9_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_9_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_9_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=64)
+ resfcn256_Conv2d_transpose_10_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=2)
+ resfcn256_Conv2d_transpose_10_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=2)
+ resfcn256_Conv2d_transpose_10_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=32)
+ resfcn256_Conv2d_transpose_11_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_11_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_11_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=32)
+ resfcn256_Conv2d_transpose_12_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=2)
+ resfcn256_Conv2d_transpose_12_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=2)
+ resfcn256_Conv2d_transpose_12_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=16)
+ resfcn256_Conv2d_transpose_13_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_13_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_13_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=16)
+ resfcn256_Conv2d_transpose_14_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_14_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_14_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=3)
+ resfcn256_Conv2d_transpose_15_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_15_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_15_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=3)
+ resfcn256_Conv2d_transpose_16_mul_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_16_mul_1_y = paddle.full(dtype='int32', shape=[1], fill_value=1)
+ resfcn256_Conv2d_transpose_16_stack_3 = paddle.full(dtype='int32', shape=[1], fill_value=3)
+ conv2d_transpose_0 = paddle.transpose(x=Placeholder, perm=[0, 3, 1, 2])
+ resfcn256_Conv_Conv2D = self.conv0(conv2d_transpose_0)
+ resfcn256_Conv_BatchNorm_FusedBatchNorm = self.bn0(resfcn256_Conv_Conv2D)
+ resfcn256_Conv_Relu = self.relu0(resfcn256_Conv_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_shortcut_Conv2D = self.conv1(resfcn256_Conv_Relu)
+ resfcn256_resBlock_Conv_Conv2D = self.conv2(resfcn256_Conv_Relu)
+ resfcn256_resBlock_Conv_BatchNorm_FusedBatchNorm = self.bn1(resfcn256_resBlock_Conv_Conv2D)
+ resfcn256_resBlock_Conv_Relu = self.relu1(resfcn256_resBlock_Conv_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_Conv_1_Conv2D = self.conv3(resfcn256_resBlock_Conv_Relu)
+ resfcn256_resBlock_Conv_1_BatchNorm_FusedBatchNorm = self.bn2(resfcn256_resBlock_Conv_1_Conv2D)
+ resfcn256_resBlock_Conv_1_Relu = self.relu2(resfcn256_resBlock_Conv_1_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_Conv_2_Conv2D = self.conv4(resfcn256_resBlock_Conv_1_Relu)
+ resfcn256_resBlock_add = paddle.add(x=resfcn256_resBlock_Conv_2_Conv2D, y=resfcn256_resBlock_shortcut_Conv2D)
+ resfcn256_resBlock_BatchNorm_FusedBatchNorm = self.bn3(resfcn256_resBlock_add)
+ resfcn256_resBlock_Relu = self.relu3(resfcn256_resBlock_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_1_Conv_Conv2D = self.conv5(resfcn256_resBlock_Relu)
+ resfcn256_resBlock_1_Conv_BatchNorm_FusedBatchNorm = self.bn4(resfcn256_resBlock_1_Conv_Conv2D)
+ resfcn256_resBlock_1_Conv_Relu = self.relu4(resfcn256_resBlock_1_Conv_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_1_Conv_1_Conv2D = self.conv6(resfcn256_resBlock_1_Conv_Relu)
+ resfcn256_resBlock_1_Conv_1_BatchNorm_FusedBatchNorm = self.bn5(resfcn256_resBlock_1_Conv_1_Conv2D)
+ resfcn256_resBlock_1_Conv_1_Relu = self.relu5(resfcn256_resBlock_1_Conv_1_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_1_Conv_2_Conv2D = self.conv7(resfcn256_resBlock_1_Conv_1_Relu)
+ resfcn256_resBlock_1_add = paddle.add(x=resfcn256_resBlock_1_Conv_2_Conv2D, y=resfcn256_resBlock_Relu)
+ resfcn256_resBlock_1_BatchNorm_FusedBatchNorm = self.bn6(resfcn256_resBlock_1_add)
+ resfcn256_resBlock_1_Relu = self.relu6(resfcn256_resBlock_1_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_2_shortcut_Conv2D = self.conv8(resfcn256_resBlock_1_Relu)
+ resfcn256_resBlock_2_Conv_Conv2D = self.conv9(resfcn256_resBlock_1_Relu)
+ resfcn256_resBlock_2_Conv_BatchNorm_FusedBatchNorm = self.bn7(resfcn256_resBlock_2_Conv_Conv2D)
+ resfcn256_resBlock_2_Conv_Relu = self.relu7(resfcn256_resBlock_2_Conv_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_2_Conv_1_Conv2D = self.conv10(resfcn256_resBlock_2_Conv_Relu)
+ resfcn256_resBlock_2_Conv_1_BatchNorm_FusedBatchNorm = self.bn8(resfcn256_resBlock_2_Conv_1_Conv2D)
+ resfcn256_resBlock_2_Conv_1_Relu = self.relu8(resfcn256_resBlock_2_Conv_1_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_2_Conv_2_Conv2D = self.conv11(resfcn256_resBlock_2_Conv_1_Relu)
+ resfcn256_resBlock_2_add = paddle.add(
+ x=resfcn256_resBlock_2_Conv_2_Conv2D, y=resfcn256_resBlock_2_shortcut_Conv2D)
+ resfcn256_resBlock_2_BatchNorm_FusedBatchNorm = self.bn9(resfcn256_resBlock_2_add)
+ resfcn256_resBlock_2_Relu = self.relu9(resfcn256_resBlock_2_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_3_Conv_Conv2D = self.conv12(resfcn256_resBlock_2_Relu)
+ resfcn256_resBlock_3_Conv_BatchNorm_FusedBatchNorm = self.bn10(resfcn256_resBlock_3_Conv_Conv2D)
+ resfcn256_resBlock_3_Conv_Relu = self.relu10(resfcn256_resBlock_3_Conv_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_3_Conv_1_Conv2D = self.conv13(resfcn256_resBlock_3_Conv_Relu)
+ resfcn256_resBlock_3_Conv_1_BatchNorm_FusedBatchNorm = self.bn11(resfcn256_resBlock_3_Conv_1_Conv2D)
+ resfcn256_resBlock_3_Conv_1_Relu = self.relu11(resfcn256_resBlock_3_Conv_1_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_3_Conv_2_Conv2D = self.conv14(resfcn256_resBlock_3_Conv_1_Relu)
+ resfcn256_resBlock_3_add = paddle.add(x=resfcn256_resBlock_3_Conv_2_Conv2D, y=resfcn256_resBlock_2_Relu)
+ resfcn256_resBlock_3_BatchNorm_FusedBatchNorm = self.bn12(resfcn256_resBlock_3_add)
+ resfcn256_resBlock_3_Relu = self.relu12(resfcn256_resBlock_3_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_4_shortcut_Conv2D = self.conv15(resfcn256_resBlock_3_Relu)
+ resfcn256_resBlock_4_Conv_Conv2D = self.conv16(resfcn256_resBlock_3_Relu)
+ resfcn256_resBlock_4_Conv_BatchNorm_FusedBatchNorm = self.bn13(resfcn256_resBlock_4_Conv_Conv2D)
+ resfcn256_resBlock_4_Conv_Relu = self.relu13(resfcn256_resBlock_4_Conv_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_4_Conv_1_Conv2D = self.conv17(resfcn256_resBlock_4_Conv_Relu)
+ resfcn256_resBlock_4_Conv_1_BatchNorm_FusedBatchNorm = self.bn14(resfcn256_resBlock_4_Conv_1_Conv2D)
+ resfcn256_resBlock_4_Conv_1_Relu = self.relu14(resfcn256_resBlock_4_Conv_1_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_4_Conv_2_Conv2D = self.conv18(resfcn256_resBlock_4_Conv_1_Relu)
+ resfcn256_resBlock_4_add = paddle.add(
+ x=resfcn256_resBlock_4_Conv_2_Conv2D, y=resfcn256_resBlock_4_shortcut_Conv2D)
+ resfcn256_resBlock_4_BatchNorm_FusedBatchNorm = self.bn15(resfcn256_resBlock_4_add)
+ resfcn256_resBlock_4_Relu = self.relu15(resfcn256_resBlock_4_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_5_Conv_Conv2D = self.conv19(resfcn256_resBlock_4_Relu)
+ resfcn256_resBlock_5_Conv_BatchNorm_FusedBatchNorm = self.bn16(resfcn256_resBlock_5_Conv_Conv2D)
+ resfcn256_resBlock_5_Conv_Relu = self.relu16(resfcn256_resBlock_5_Conv_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_5_Conv_1_Conv2D = self.conv20(resfcn256_resBlock_5_Conv_Relu)
+ resfcn256_resBlock_5_Conv_1_BatchNorm_FusedBatchNorm = self.bn17(resfcn256_resBlock_5_Conv_1_Conv2D)
+ resfcn256_resBlock_5_Conv_1_Relu = self.relu17(resfcn256_resBlock_5_Conv_1_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_5_Conv_2_Conv2D = self.conv21(resfcn256_resBlock_5_Conv_1_Relu)
+ resfcn256_resBlock_5_add = paddle.add(x=resfcn256_resBlock_5_Conv_2_Conv2D, y=resfcn256_resBlock_4_Relu)
+ resfcn256_resBlock_5_BatchNorm_FusedBatchNorm = self.bn18(resfcn256_resBlock_5_add)
+ resfcn256_resBlock_5_Relu = self.relu18(resfcn256_resBlock_5_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_6_shortcut_Conv2D = self.conv22(resfcn256_resBlock_5_Relu)
+ resfcn256_resBlock_6_Conv_Conv2D = self.conv23(resfcn256_resBlock_5_Relu)
+ resfcn256_resBlock_6_Conv_BatchNorm_FusedBatchNorm = self.bn19(resfcn256_resBlock_6_Conv_Conv2D)
+ resfcn256_resBlock_6_Conv_Relu = self.relu19(resfcn256_resBlock_6_Conv_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_6_Conv_1_Conv2D = self.conv24(resfcn256_resBlock_6_Conv_Relu)
+ resfcn256_resBlock_6_Conv_1_BatchNorm_FusedBatchNorm = self.bn20(resfcn256_resBlock_6_Conv_1_Conv2D)
+ resfcn256_resBlock_6_Conv_1_Relu = self.relu20(resfcn256_resBlock_6_Conv_1_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_6_Conv_2_Conv2D = self.conv25(resfcn256_resBlock_6_Conv_1_Relu)
+ resfcn256_resBlock_6_add = paddle.add(
+ x=resfcn256_resBlock_6_Conv_2_Conv2D, y=resfcn256_resBlock_6_shortcut_Conv2D)
+ resfcn256_resBlock_6_BatchNorm_FusedBatchNorm = self.bn21(resfcn256_resBlock_6_add)
+ resfcn256_resBlock_6_Relu = self.relu21(resfcn256_resBlock_6_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_7_Conv_Conv2D = self.conv26(resfcn256_resBlock_6_Relu)
+ resfcn256_resBlock_7_Conv_BatchNorm_FusedBatchNorm = self.bn22(resfcn256_resBlock_7_Conv_Conv2D)
+ resfcn256_resBlock_7_Conv_Relu = self.relu22(resfcn256_resBlock_7_Conv_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_7_Conv_1_Conv2D = self.conv27(resfcn256_resBlock_7_Conv_Relu)
+ resfcn256_resBlock_7_Conv_1_BatchNorm_FusedBatchNorm = self.bn23(resfcn256_resBlock_7_Conv_1_Conv2D)
+ resfcn256_resBlock_7_Conv_1_Relu = self.relu23(resfcn256_resBlock_7_Conv_1_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_7_Conv_2_Conv2D = self.conv28(resfcn256_resBlock_7_Conv_1_Relu)
+ resfcn256_resBlock_7_add = paddle.add(x=resfcn256_resBlock_7_Conv_2_Conv2D, y=resfcn256_resBlock_6_Relu)
+ resfcn256_resBlock_7_BatchNorm_FusedBatchNorm = self.bn24(resfcn256_resBlock_7_add)
+ resfcn256_resBlock_7_Relu = self.relu24(resfcn256_resBlock_7_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_8_shortcut_Conv2D = self.conv29(resfcn256_resBlock_7_Relu)
+ resfcn256_resBlock_8_Conv_Conv2D = self.conv30(resfcn256_resBlock_7_Relu)
+ resfcn256_resBlock_8_Conv_BatchNorm_FusedBatchNorm = self.bn25(resfcn256_resBlock_8_Conv_Conv2D)
+ resfcn256_resBlock_8_Conv_Relu = self.relu25(resfcn256_resBlock_8_Conv_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_8_Conv_1_Conv2D = self.conv31(resfcn256_resBlock_8_Conv_Relu)
+ resfcn256_resBlock_8_Conv_1_BatchNorm_FusedBatchNorm = self.bn26(resfcn256_resBlock_8_Conv_1_Conv2D)
+ resfcn256_resBlock_8_Conv_1_Relu = self.relu26(resfcn256_resBlock_8_Conv_1_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_8_Conv_2_Conv2D = self.conv32(resfcn256_resBlock_8_Conv_1_Relu)
+ resfcn256_resBlock_8_add = paddle.add(
+ x=resfcn256_resBlock_8_Conv_2_Conv2D, y=resfcn256_resBlock_8_shortcut_Conv2D)
+ resfcn256_resBlock_8_BatchNorm_FusedBatchNorm = self.bn27(resfcn256_resBlock_8_add)
+ resfcn256_resBlock_8_Relu = self.relu27(resfcn256_resBlock_8_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_9_Conv_Conv2D = self.conv33(resfcn256_resBlock_8_Relu)
+ resfcn256_resBlock_9_Conv_BatchNorm_FusedBatchNorm = self.bn28(resfcn256_resBlock_9_Conv_Conv2D)
+ resfcn256_resBlock_9_Conv_Relu = self.relu28(resfcn256_resBlock_9_Conv_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_9_Conv_1_Conv2D = self.conv34(resfcn256_resBlock_9_Conv_Relu)
+ resfcn256_resBlock_9_Conv_1_BatchNorm_FusedBatchNorm = self.bn29(resfcn256_resBlock_9_Conv_1_Conv2D)
+ resfcn256_resBlock_9_Conv_1_Relu = self.relu29(resfcn256_resBlock_9_Conv_1_BatchNorm_FusedBatchNorm)
+ resfcn256_resBlock_9_Conv_2_Conv2D = self.conv35(resfcn256_resBlock_9_Conv_1_Relu)
+ resfcn256_resBlock_9_add = paddle.add(x=resfcn256_resBlock_9_Conv_2_Conv2D, y=resfcn256_resBlock_8_Relu)
+ resfcn256_resBlock_9_BatchNorm_FusedBatchNorm = self.bn30(resfcn256_resBlock_9_add)
+ resfcn256_resBlock_9_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_resBlock_9_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_resBlock_9_Relu = self.relu30(resfcn256_resBlock_9_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_Shape = paddle.shape(input=resfcn256_resBlock_9_Relu)
+ resfcn256_Conv2d_transpose_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_strided_slice_1, y=resfcn256_Conv2d_transpose_mul_y)
+ resfcn256_Conv2d_transpose_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_strided_slice_2, y=resfcn256_Conv2d_transpose_mul_1_y)
+ resfcn256_Conv2d_transpose_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_strided_slice, resfcn256_Conv2d_transpose_mul, resfcn256_Conv2d_transpose_mul_1,
+ resfcn256_Conv2d_transpose_stack_3
+ ])
+ resfcn256_Conv2d_transpose_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_stack, shape=[-1])
+ conv2dbackpropinput_transpose_0 = paddle.transpose(x=resfcn256_resBlock_9_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_conv2d_transpose_conv36_weight = self.resfcn256_Conv2d_transpose_conv2d_transpose_conv36_weight
+ resfcn256_Conv2d_transpose_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_0,
+ weight=resfcn256_Conv2d_transpose_conv2d_transpose_conv36_weight,
+ stride=[1, 1],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[8, 8])
+ resfcn256_Conv2d_transpose_BatchNorm_FusedBatchNorm = self.bn31(resfcn256_Conv2d_transpose_conv2d_transpose)
+ resfcn256_Conv2d_transpose_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_Relu = self.relu31(resfcn256_Conv2d_transpose_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_1_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_Relu)
+ resfcn256_Conv2d_transpose_1_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_1_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_1_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_1_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_1_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_1_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_1_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_1_strided_slice_1, y=resfcn256_Conv2d_transpose_1_mul_y)
+ resfcn256_Conv2d_transpose_1_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_1_strided_slice_2, y=resfcn256_Conv2d_transpose_1_mul_1_y)
+ resfcn256_Conv2d_transpose_1_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_1_strided_slice, resfcn256_Conv2d_transpose_1_mul,
+ resfcn256_Conv2d_transpose_1_mul_1, resfcn256_Conv2d_transpose_1_stack_3
+ ])
+ resfcn256_Conv2d_transpose_1_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_1_stack, shape=[-1])
+ conv2dbackpropinput_transpose_1 = paddle.transpose(x=resfcn256_Conv2d_transpose_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_1_conv2d_transpose_conv37_weight = self.resfcn256_Conv2d_transpose_1_conv2d_transpose_conv37_weight
+ resfcn256_Conv2d_transpose_1_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_1,
+ weight=resfcn256_Conv2d_transpose_1_conv2d_transpose_conv37_weight,
+ stride=[2, 2],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[16, 16])
+ resfcn256_Conv2d_transpose_1_BatchNorm_FusedBatchNorm = self.bn32(resfcn256_Conv2d_transpose_1_conv2d_transpose)
+ resfcn256_Conv2d_transpose_1_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_1_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_1_Relu = self.relu32(resfcn256_Conv2d_transpose_1_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_2_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_1_Relu)
+ resfcn256_Conv2d_transpose_2_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_2_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_2_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_2_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_2_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_2_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_2_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_2_strided_slice_1, y=resfcn256_Conv2d_transpose_2_mul_y)
+ resfcn256_Conv2d_transpose_2_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_2_strided_slice_2, y=resfcn256_Conv2d_transpose_2_mul_1_y)
+ resfcn256_Conv2d_transpose_2_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_2_strided_slice, resfcn256_Conv2d_transpose_2_mul,
+ resfcn256_Conv2d_transpose_2_mul_1, resfcn256_Conv2d_transpose_2_stack_3
+ ])
+ resfcn256_Conv2d_transpose_2_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_2_stack, shape=[-1])
+ conv2dbackpropinput_transpose_2 = paddle.transpose(x=resfcn256_Conv2d_transpose_1_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_2_conv2d_transpose_conv38_weight = self.resfcn256_Conv2d_transpose_2_conv2d_transpose_conv38_weight
+ resfcn256_Conv2d_transpose_2_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_2,
+ weight=resfcn256_Conv2d_transpose_2_conv2d_transpose_conv38_weight,
+ stride=[1, 1],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[16, 16])
+ resfcn256_Conv2d_transpose_2_BatchNorm_FusedBatchNorm = self.bn33(resfcn256_Conv2d_transpose_2_conv2d_transpose)
+ resfcn256_Conv2d_transpose_2_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_2_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_2_Relu = self.relu33(resfcn256_Conv2d_transpose_2_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_3_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_2_Relu)
+ resfcn256_Conv2d_transpose_3_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_3_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_3_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_3_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_3_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_3_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_3_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_3_strided_slice_1, y=resfcn256_Conv2d_transpose_3_mul_y)
+ resfcn256_Conv2d_transpose_3_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_3_strided_slice_2, y=resfcn256_Conv2d_transpose_3_mul_1_y)
+ resfcn256_Conv2d_transpose_3_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_3_strided_slice, resfcn256_Conv2d_transpose_3_mul,
+ resfcn256_Conv2d_transpose_3_mul_1, resfcn256_Conv2d_transpose_3_stack_3
+ ])
+ resfcn256_Conv2d_transpose_3_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_3_stack, shape=[-1])
+ conv2dbackpropinput_transpose_3 = paddle.transpose(x=resfcn256_Conv2d_transpose_2_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_3_conv2d_transpose_conv39_weight = self.resfcn256_Conv2d_transpose_3_conv2d_transpose_conv39_weight
+ resfcn256_Conv2d_transpose_3_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_3,
+ weight=resfcn256_Conv2d_transpose_3_conv2d_transpose_conv39_weight,
+ stride=[1, 1],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[16, 16])
+ resfcn256_Conv2d_transpose_3_BatchNorm_FusedBatchNorm = self.bn34(resfcn256_Conv2d_transpose_3_conv2d_transpose)
+ resfcn256_Conv2d_transpose_3_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_3_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_3_Relu = self.relu34(resfcn256_Conv2d_transpose_3_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_4_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_3_Relu)
+ resfcn256_Conv2d_transpose_4_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_4_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_4_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_4_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_4_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_4_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_4_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_4_strided_slice_1, y=resfcn256_Conv2d_transpose_4_mul_y)
+ resfcn256_Conv2d_transpose_4_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_4_strided_slice_2, y=resfcn256_Conv2d_transpose_4_mul_1_y)
+ resfcn256_Conv2d_transpose_4_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_4_strided_slice, resfcn256_Conv2d_transpose_4_mul,
+ resfcn256_Conv2d_transpose_4_mul_1, resfcn256_Conv2d_transpose_4_stack_3
+ ])
+ resfcn256_Conv2d_transpose_4_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_4_stack, shape=[-1])
+ conv2dbackpropinput_transpose_4 = paddle.transpose(x=resfcn256_Conv2d_transpose_3_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_4_conv2d_transpose_conv40_weight = self.resfcn256_Conv2d_transpose_4_conv2d_transpose_conv40_weight
+ resfcn256_Conv2d_transpose_4_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_4,
+ weight=resfcn256_Conv2d_transpose_4_conv2d_transpose_conv40_weight,
+ stride=[2, 2],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[32, 32])
+ resfcn256_Conv2d_transpose_4_BatchNorm_FusedBatchNorm = self.bn35(resfcn256_Conv2d_transpose_4_conv2d_transpose)
+ resfcn256_Conv2d_transpose_4_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_4_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_4_Relu = self.relu35(resfcn256_Conv2d_transpose_4_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_5_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_4_Relu)
+ resfcn256_Conv2d_transpose_5_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_5_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_5_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_5_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_5_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_5_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_5_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_5_strided_slice_1, y=resfcn256_Conv2d_transpose_5_mul_y)
+ resfcn256_Conv2d_transpose_5_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_5_strided_slice_2, y=resfcn256_Conv2d_transpose_5_mul_1_y)
+ resfcn256_Conv2d_transpose_5_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_5_strided_slice, resfcn256_Conv2d_transpose_5_mul,
+ resfcn256_Conv2d_transpose_5_mul_1, resfcn256_Conv2d_transpose_5_stack_3
+ ])
+ resfcn256_Conv2d_transpose_5_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_5_stack, shape=[-1])
+ conv2dbackpropinput_transpose_5 = paddle.transpose(x=resfcn256_Conv2d_transpose_4_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_5_conv2d_transpose_conv41_weight = self.resfcn256_Conv2d_transpose_5_conv2d_transpose_conv41_weight
+ resfcn256_Conv2d_transpose_5_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_5,
+ weight=resfcn256_Conv2d_transpose_5_conv2d_transpose_conv41_weight,
+ stride=[1, 1],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[32, 32])
+ resfcn256_Conv2d_transpose_5_BatchNorm_FusedBatchNorm = self.bn36(resfcn256_Conv2d_transpose_5_conv2d_transpose)
+ resfcn256_Conv2d_transpose_5_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_5_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_5_Relu = self.relu36(resfcn256_Conv2d_transpose_5_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_6_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_5_Relu)
+ resfcn256_Conv2d_transpose_6_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_6_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_6_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_6_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_6_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_6_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_6_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_6_strided_slice_1, y=resfcn256_Conv2d_transpose_6_mul_y)
+ resfcn256_Conv2d_transpose_6_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_6_strided_slice_2, y=resfcn256_Conv2d_transpose_6_mul_1_y)
+ resfcn256_Conv2d_transpose_6_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_6_strided_slice, resfcn256_Conv2d_transpose_6_mul,
+ resfcn256_Conv2d_transpose_6_mul_1, resfcn256_Conv2d_transpose_6_stack_3
+ ])
+ resfcn256_Conv2d_transpose_6_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_6_stack, shape=[-1])
+ conv2dbackpropinput_transpose_6 = paddle.transpose(x=resfcn256_Conv2d_transpose_5_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_6_conv2d_transpose_conv42_weight = self.resfcn256_Conv2d_transpose_6_conv2d_transpose_conv42_weight
+ resfcn256_Conv2d_transpose_6_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_6,
+ weight=resfcn256_Conv2d_transpose_6_conv2d_transpose_conv42_weight,
+ stride=[1, 1],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[32, 32])
+ resfcn256_Conv2d_transpose_6_BatchNorm_FusedBatchNorm = self.bn37(resfcn256_Conv2d_transpose_6_conv2d_transpose)
+ resfcn256_Conv2d_transpose_6_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_6_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_6_Relu = self.relu37(resfcn256_Conv2d_transpose_6_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_7_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_6_Relu)
+ resfcn256_Conv2d_transpose_7_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_7_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_7_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_7_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_7_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_7_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_7_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_7_strided_slice_1, y=resfcn256_Conv2d_transpose_7_mul_y)
+ resfcn256_Conv2d_transpose_7_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_7_strided_slice_2, y=resfcn256_Conv2d_transpose_7_mul_1_y)
+ resfcn256_Conv2d_transpose_7_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_7_strided_slice, resfcn256_Conv2d_transpose_7_mul,
+ resfcn256_Conv2d_transpose_7_mul_1, resfcn256_Conv2d_transpose_7_stack_3
+ ])
+ resfcn256_Conv2d_transpose_7_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_7_stack, shape=[-1])
+ conv2dbackpropinput_transpose_7 = paddle.transpose(x=resfcn256_Conv2d_transpose_6_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_7_conv2d_transpose_conv43_weight = self.resfcn256_Conv2d_transpose_7_conv2d_transpose_conv43_weight
+ resfcn256_Conv2d_transpose_7_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_7,
+ weight=resfcn256_Conv2d_transpose_7_conv2d_transpose_conv43_weight,
+ stride=[2, 2],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[64, 64])
+ resfcn256_Conv2d_transpose_7_BatchNorm_FusedBatchNorm = self.bn38(resfcn256_Conv2d_transpose_7_conv2d_transpose)
+ resfcn256_Conv2d_transpose_7_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_7_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_7_Relu = self.relu38(resfcn256_Conv2d_transpose_7_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_8_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_7_Relu)
+ resfcn256_Conv2d_transpose_8_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_8_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_8_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_8_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_8_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_8_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_8_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_8_strided_slice_1, y=resfcn256_Conv2d_transpose_8_mul_y)
+ resfcn256_Conv2d_transpose_8_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_8_strided_slice_2, y=resfcn256_Conv2d_transpose_8_mul_1_y)
+ resfcn256_Conv2d_transpose_8_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_8_strided_slice, resfcn256_Conv2d_transpose_8_mul,
+ resfcn256_Conv2d_transpose_8_mul_1, resfcn256_Conv2d_transpose_8_stack_3
+ ])
+ resfcn256_Conv2d_transpose_8_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_8_stack, shape=[-1])
+ conv2dbackpropinput_transpose_8 = paddle.transpose(x=resfcn256_Conv2d_transpose_7_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_8_conv2d_transpose_conv44_weight = self.resfcn256_Conv2d_transpose_8_conv2d_transpose_conv44_weight
+ resfcn256_Conv2d_transpose_8_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_8,
+ weight=resfcn256_Conv2d_transpose_8_conv2d_transpose_conv44_weight,
+ stride=[1, 1],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[64, 64])
+ resfcn256_Conv2d_transpose_8_BatchNorm_FusedBatchNorm = self.bn39(resfcn256_Conv2d_transpose_8_conv2d_transpose)
+ resfcn256_Conv2d_transpose_8_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_8_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_8_Relu = self.relu39(resfcn256_Conv2d_transpose_8_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_9_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_8_Relu)
+ resfcn256_Conv2d_transpose_9_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_9_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_9_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_9_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_9_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_9_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_9_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_9_strided_slice_1, y=resfcn256_Conv2d_transpose_9_mul_y)
+ resfcn256_Conv2d_transpose_9_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_9_strided_slice_2, y=resfcn256_Conv2d_transpose_9_mul_1_y)
+ resfcn256_Conv2d_transpose_9_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_9_strided_slice, resfcn256_Conv2d_transpose_9_mul,
+ resfcn256_Conv2d_transpose_9_mul_1, resfcn256_Conv2d_transpose_9_stack_3
+ ])
+ resfcn256_Conv2d_transpose_9_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_9_stack, shape=[-1])
+ conv2dbackpropinput_transpose_9 = paddle.transpose(x=resfcn256_Conv2d_transpose_8_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_9_conv2d_transpose_conv45_weight = self.resfcn256_Conv2d_transpose_9_conv2d_transpose_conv45_weight
+ resfcn256_Conv2d_transpose_9_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_9,
+ weight=resfcn256_Conv2d_transpose_9_conv2d_transpose_conv45_weight,
+ stride=[1, 1],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[64, 64])
+ resfcn256_Conv2d_transpose_9_BatchNorm_FusedBatchNorm = self.bn40(resfcn256_Conv2d_transpose_9_conv2d_transpose)
+ resfcn256_Conv2d_transpose_9_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_9_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_9_Relu = self.relu40(resfcn256_Conv2d_transpose_9_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_10_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_9_Relu)
+ resfcn256_Conv2d_transpose_10_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_10_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_10_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_10_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_10_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_10_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_10_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_10_strided_slice_1, y=resfcn256_Conv2d_transpose_10_mul_y)
+ resfcn256_Conv2d_transpose_10_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_10_strided_slice_2, y=resfcn256_Conv2d_transpose_10_mul_1_y)
+ resfcn256_Conv2d_transpose_10_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_10_strided_slice, resfcn256_Conv2d_transpose_10_mul,
+ resfcn256_Conv2d_transpose_10_mul_1, resfcn256_Conv2d_transpose_10_stack_3
+ ])
+ resfcn256_Conv2d_transpose_10_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_10_stack, shape=[-1])
+ conv2dbackpropinput_transpose_10 = paddle.transpose(x=resfcn256_Conv2d_transpose_9_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_10_conv2d_transpose_conv46_weight = self.resfcn256_Conv2d_transpose_10_conv2d_transpose_conv46_weight
+ resfcn256_Conv2d_transpose_10_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_10,
+ weight=resfcn256_Conv2d_transpose_10_conv2d_transpose_conv46_weight,
+ stride=[2, 2],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[128, 128])
+ resfcn256_Conv2d_transpose_10_BatchNorm_FusedBatchNorm = self.bn41(
+ resfcn256_Conv2d_transpose_10_conv2d_transpose)
+ resfcn256_Conv2d_transpose_10_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_10_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_10_Relu = self.relu41(resfcn256_Conv2d_transpose_10_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_11_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_10_Relu)
+ resfcn256_Conv2d_transpose_11_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_11_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_11_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_11_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_11_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_11_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_11_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_11_strided_slice_1, y=resfcn256_Conv2d_transpose_11_mul_y)
+ resfcn256_Conv2d_transpose_11_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_11_strided_slice_2, y=resfcn256_Conv2d_transpose_11_mul_1_y)
+ resfcn256_Conv2d_transpose_11_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_11_strided_slice, resfcn256_Conv2d_transpose_11_mul,
+ resfcn256_Conv2d_transpose_11_mul_1, resfcn256_Conv2d_transpose_11_stack_3
+ ])
+ resfcn256_Conv2d_transpose_11_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_11_stack, shape=[-1])
+ conv2dbackpropinput_transpose_11 = paddle.transpose(x=resfcn256_Conv2d_transpose_10_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_11_conv2d_transpose_conv47_weight = self.resfcn256_Conv2d_transpose_11_conv2d_transpose_conv47_weight
+ resfcn256_Conv2d_transpose_11_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_11,
+ weight=resfcn256_Conv2d_transpose_11_conv2d_transpose_conv47_weight,
+ stride=[1, 1],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[128, 128])
+ resfcn256_Conv2d_transpose_11_BatchNorm_FusedBatchNorm = self.bn42(
+ resfcn256_Conv2d_transpose_11_conv2d_transpose)
+ resfcn256_Conv2d_transpose_11_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_11_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_11_Relu = self.relu42(resfcn256_Conv2d_transpose_11_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_12_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_11_Relu)
+ resfcn256_Conv2d_transpose_12_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_12_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_12_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_12_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_12_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_12_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_12_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_12_strided_slice_1, y=resfcn256_Conv2d_transpose_12_mul_y)
+ resfcn256_Conv2d_transpose_12_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_12_strided_slice_2, y=resfcn256_Conv2d_transpose_12_mul_1_y)
+ resfcn256_Conv2d_transpose_12_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_12_strided_slice, resfcn256_Conv2d_transpose_12_mul,
+ resfcn256_Conv2d_transpose_12_mul_1, resfcn256_Conv2d_transpose_12_stack_3
+ ])
+ resfcn256_Conv2d_transpose_12_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_12_stack, shape=[-1])
+ conv2dbackpropinput_transpose_12 = paddle.transpose(x=resfcn256_Conv2d_transpose_11_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_12_conv2d_transpose_conv48_weight = self.resfcn256_Conv2d_transpose_12_conv2d_transpose_conv48_weight
+ resfcn256_Conv2d_transpose_12_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_12,
+ weight=resfcn256_Conv2d_transpose_12_conv2d_transpose_conv48_weight,
+ stride=[2, 2],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[256, 256])
+ resfcn256_Conv2d_transpose_12_BatchNorm_FusedBatchNorm = self.bn43(
+ resfcn256_Conv2d_transpose_12_conv2d_transpose)
+ resfcn256_Conv2d_transpose_12_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_12_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_12_Relu = self.relu43(resfcn256_Conv2d_transpose_12_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_13_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_12_Relu)
+ resfcn256_Conv2d_transpose_13_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_13_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_13_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_13_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_13_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_13_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_13_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_13_strided_slice_1, y=resfcn256_Conv2d_transpose_13_mul_y)
+ resfcn256_Conv2d_transpose_13_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_13_strided_slice_2, y=resfcn256_Conv2d_transpose_13_mul_1_y)
+ resfcn256_Conv2d_transpose_13_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_13_strided_slice, resfcn256_Conv2d_transpose_13_mul,
+ resfcn256_Conv2d_transpose_13_mul_1, resfcn256_Conv2d_transpose_13_stack_3
+ ])
+ resfcn256_Conv2d_transpose_13_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_13_stack, shape=[-1])
+ conv2dbackpropinput_transpose_13 = paddle.transpose(x=resfcn256_Conv2d_transpose_12_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_13_conv2d_transpose_conv49_weight = self.resfcn256_Conv2d_transpose_13_conv2d_transpose_conv49_weight
+ resfcn256_Conv2d_transpose_13_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_13,
+ weight=resfcn256_Conv2d_transpose_13_conv2d_transpose_conv49_weight,
+ stride=[1, 1],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[256, 256])
+ resfcn256_Conv2d_transpose_13_BatchNorm_FusedBatchNorm = self.bn44(
+ resfcn256_Conv2d_transpose_13_conv2d_transpose)
+ resfcn256_Conv2d_transpose_13_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_13_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_13_Relu = self.relu44(resfcn256_Conv2d_transpose_13_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_14_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_13_Relu)
+ resfcn256_Conv2d_transpose_14_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_14_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_14_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_14_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_14_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_14_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_14_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_14_strided_slice_1, y=resfcn256_Conv2d_transpose_14_mul_y)
+ resfcn256_Conv2d_transpose_14_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_14_strided_slice_2, y=resfcn256_Conv2d_transpose_14_mul_1_y)
+ resfcn256_Conv2d_transpose_14_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_14_strided_slice, resfcn256_Conv2d_transpose_14_mul,
+ resfcn256_Conv2d_transpose_14_mul_1, resfcn256_Conv2d_transpose_14_stack_3
+ ])
+ resfcn256_Conv2d_transpose_14_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_14_stack, shape=[-1])
+ conv2dbackpropinput_transpose_14 = paddle.transpose(x=resfcn256_Conv2d_transpose_13_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_14_conv2d_transpose_conv50_weight = self.resfcn256_Conv2d_transpose_14_conv2d_transpose_conv50_weight
+ resfcn256_Conv2d_transpose_14_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_14,
+ weight=resfcn256_Conv2d_transpose_14_conv2d_transpose_conv50_weight,
+ stride=[1, 1],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[256, 256])
+ resfcn256_Conv2d_transpose_14_BatchNorm_FusedBatchNorm = self.bn45(
+ resfcn256_Conv2d_transpose_14_conv2d_transpose)
+ resfcn256_Conv2d_transpose_14_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_14_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_14_Relu = self.relu45(resfcn256_Conv2d_transpose_14_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_15_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_14_Relu)
+ resfcn256_Conv2d_transpose_15_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_15_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_15_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_15_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_15_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_15_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_15_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_15_strided_slice_1, y=resfcn256_Conv2d_transpose_15_mul_y)
+ resfcn256_Conv2d_transpose_15_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_15_strided_slice_2, y=resfcn256_Conv2d_transpose_15_mul_1_y)
+ resfcn256_Conv2d_transpose_15_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_15_strided_slice, resfcn256_Conv2d_transpose_15_mul,
+ resfcn256_Conv2d_transpose_15_mul_1, resfcn256_Conv2d_transpose_15_stack_3
+ ])
+ resfcn256_Conv2d_transpose_15_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_15_stack, shape=[-1])
+ conv2dbackpropinput_transpose_15 = paddle.transpose(x=resfcn256_Conv2d_transpose_14_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_15_conv2d_transpose_conv51_weight = self.resfcn256_Conv2d_transpose_15_conv2d_transpose_conv51_weight
+ resfcn256_Conv2d_transpose_15_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_15,
+ weight=resfcn256_Conv2d_transpose_15_conv2d_transpose_conv51_weight,
+ stride=[1, 1],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[256, 256])
+ resfcn256_Conv2d_transpose_15_BatchNorm_FusedBatchNorm = self.bn46(
+ resfcn256_Conv2d_transpose_15_conv2d_transpose)
+ resfcn256_Conv2d_transpose_15_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_15_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_15_Relu = self.relu46(resfcn256_Conv2d_transpose_15_BatchNorm_FusedBatchNorm)
+ resfcn256_Conv2d_transpose_16_Shape = paddle.shape(input=resfcn256_Conv2d_transpose_15_Relu)
+ resfcn256_Conv2d_transpose_16_strided_slice = paddle.slice(
+ input=resfcn256_Conv2d_transpose_16_Shape, axes=[0], starts=[0], ends=[1])
+ resfcn256_Conv2d_transpose_16_strided_slice_1 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_16_Shape, axes=[0], starts=[1], ends=[2])
+ resfcn256_Conv2d_transpose_16_strided_slice_2 = paddle.slice(
+ input=resfcn256_Conv2d_transpose_16_Shape, axes=[0], starts=[2], ends=[3])
+ resfcn256_Conv2d_transpose_16_mul = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_16_strided_slice_1, y=resfcn256_Conv2d_transpose_16_mul_y)
+ resfcn256_Conv2d_transpose_16_mul_1 = paddle.multiply(
+ x=resfcn256_Conv2d_transpose_16_strided_slice_2, y=resfcn256_Conv2d_transpose_16_mul_1_y)
+ resfcn256_Conv2d_transpose_16_stack = paddle.stack(x=[
+ resfcn256_Conv2d_transpose_16_strided_slice, resfcn256_Conv2d_transpose_16_mul,
+ resfcn256_Conv2d_transpose_16_mul_1, resfcn256_Conv2d_transpose_16_stack_3
+ ])
+ resfcn256_Conv2d_transpose_16_stack = paddle.reshape(x=resfcn256_Conv2d_transpose_16_stack, shape=[-1])
+ conv2dbackpropinput_transpose_16 = paddle.transpose(x=resfcn256_Conv2d_transpose_15_Relu, perm=[0, 3, 1, 2])
+ resfcn256_Conv2d_transpose_16_conv2d_transpose_conv52_weight = self.resfcn256_Conv2d_transpose_16_conv2d_transpose_conv52_weight
+ resfcn256_Conv2d_transpose_16_conv2d_transpose = paddle.nn.functional.conv2d_transpose(
+ x=conv2dbackpropinput_transpose_16,
+ weight=resfcn256_Conv2d_transpose_16_conv2d_transpose_conv52_weight,
+ stride=[1, 1],
+ dilation=[1, 1],
+ padding='SAME',
+ output_size=[256, 256])
+ resfcn256_Conv2d_transpose_16_BatchNorm_FusedBatchNorm = self.bn47(
+ resfcn256_Conv2d_transpose_16_conv2d_transpose)
+ resfcn256_Conv2d_transpose_16_BatchNorm_FusedBatchNorm = paddle.transpose(
+ x=resfcn256_Conv2d_transpose_16_BatchNorm_FusedBatchNorm, perm=[0, 2, 3, 1])
+ resfcn256_Conv2d_transpose_16_Sigmoid = self.sigmoid0(resfcn256_Conv2d_transpose_16_BatchNorm_FusedBatchNorm)
+ return resfcn256_Conv2d_transpose_16_Sigmoid
+
+
+def main(Placeholder):
+ # There are 1 inputs.
+ # Placeholder: shape-[-1, 256, 256, 3], type-float32.
+
+ paddle.disable_static()
+ params = paddle.load('/work/ToTransferInHub/PRNet-Paddle/pd_model/model.pdparams')
+ model = TFModel()
+ model.set_dict(params, use_structured_name=False)
+ model.eval()
+ out = model(Placeholder)
+ return out
+
+
+if __name__ == '__main__':
+ tensor = paddle.randn([1, 256, 256, 3])
+ print(main(tensor).shape)
diff --git a/modules/image/image_processing/prnet/predictor.py b/modules/image/image_processing/prnet/predictor.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ecad2f969073299e354a6cf78380413d088ef79
--- /dev/null
+++ b/modules/image/image_processing/prnet/predictor.py
@@ -0,0 +1,29 @@
+import numpy as np
+import paddle
+
+from .pd_model.x2paddle_code import TFModel
+
+
+class PosPrediction():
+ def __init__(self, params, resolution_inp=256, resolution_op=256):
+ # -- hyper settings
+ self.resolution_inp = resolution_inp
+ self.resolution_op = resolution_op
+ self.MaxPos = resolution_inp * 1.1
+
+ # network type
+ self.network = TFModel()
+ self.network.set_dict(params, use_structured_name=False)
+ self.network.eval()
+
+ def predict(self, image):
+ paddle.disable_static()
+ image_tensor = paddle.to_tensor(image[np.newaxis, :, :, :], dtype='float32')
+ pos = self.network(image_tensor)
+ pos = pos.numpy()
+ pos = np.squeeze(pos)
+ return pos * self.MaxPos
+
+ def predict_batch(self, images):
+ pos = self.sess.run(self.x_op, feed_dict={self.x: images})
+ return pos * self.MaxPos
diff --git a/modules/image/image_processing/prnet/requirements.txt b/modules/image/image_processing/prnet/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5bb7941037ccb6157ac0494fcecc8bb65725f91f
--- /dev/null
+++ b/modules/image/image_processing/prnet/requirements.txt
@@ -0,0 +1,2 @@
+dlib
+scikit-image
diff --git a/modules/image/image_processing/prnet/util.py b/modules/image/image_processing/prnet/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..9629e72e5efa502eec1377cc1417322312468587
--- /dev/null
+++ b/modules/image/image_processing/prnet/util.py
@@ -0,0 +1,10 @@
+import base64
+import cv2
+import numpy as np
+
+
+def base64_to_cv2(b64str):
+ data = base64.b64decode(b64str.encode('utf8'))
+ data = np.fromstring(data, np.uint8)
+ data = cv2.imdecode(data, cv2.IMREAD_GRAYSCALE)
+ return data
diff --git a/modules/image/image_processing/prnet/utils/__init__.py b/modules/image/image_processing/prnet/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/modules/image/image_processing/prnet/utils/cv_plot.py b/modules/image/image_processing/prnet/utils/cv_plot.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4f19f512541f829054dba36ec38915761f69003
--- /dev/null
+++ b/modules/image/image_processing/prnet/utils/cv_plot.py
@@ -0,0 +1,73 @@
+import numpy as np
+import cv2
+
+end_list = np.array([17, 22, 27, 42, 48, 31, 36, 68], dtype=np.int32) - 1
+
+
+def plot_kpt(image, kpt):
+ ''' Draw 68 key points
+ Args:
+ image: the input image
+ kpt: (68, 3).
+ '''
+ image = image.copy()
+ kpt = np.round(kpt).astype(np.int32)
+ for i in range(kpt.shape[0]):
+ st = kpt[i, :2]
+ image = cv2.circle(image, (st[0], st[1]), 1, (0, 0, 255), 2)
+ if i in end_list:
+ continue
+ ed = kpt[i + 1, :2]
+ image = cv2.line(image, (st[0], st[1]), (ed[0], ed[1]), (255, 255, 255), 1)
+ return image
+
+
+def plot_vertices(image, vertices):
+ image = image.copy()
+ vertices = np.round(vertices).astype(np.int32)
+ for i in range(0, vertices.shape[0], 2):
+ st = vertices[i, :2]
+ image = cv2.circle(image, (st[0], st[1]), 1, (255, 0, 0), -1)
+ return image
+
+
+def plot_pose_box(image, P, kpt, color=(0, 255, 0), line_width=2):
+ ''' Draw a 3D box as annotation of pose. Ref:https://github.com/yinguobing/head-pose-estimation/blob/master/pose_estimator.py
+ Args:
+ image: the input image
+ P: (3, 4). Affine Camera Matrix.
+ kpt: (68, 3).
+ '''
+ image = image.copy()
+
+ point_3d = []
+ rear_size = 90
+ rear_depth = 0
+ point_3d.append((-rear_size, -rear_size, rear_depth))
+ point_3d.append((-rear_size, rear_size, rear_depth))
+ point_3d.append((rear_size, rear_size, rear_depth))
+ point_3d.append((rear_size, -rear_size, rear_depth))
+ point_3d.append((-rear_size, -rear_size, rear_depth))
+
+ front_size = 105
+ front_depth = 110
+ point_3d.append((-front_size, -front_size, front_depth))
+ point_3d.append((-front_size, front_size, front_depth))
+ point_3d.append((front_size, front_size, front_depth))
+ point_3d.append((front_size, -front_size, front_depth))
+ point_3d.append((-front_size, -front_size, front_depth))
+ point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)
+
+ # Map to 2d image points
+ point_3d_homo = np.hstack((point_3d, np.ones([point_3d.shape[0], 1]))) #n x 4
+ point_2d = point_3d_homo.dot(P.T)[:, :2]
+ point_2d[:, :2] = point_2d[:, :2] - np.mean(point_2d[:4, :2], 0) + np.mean(kpt[:27, :2], 0)
+ point_2d = np.int32(point_2d.reshape(-1, 2))
+
+ # Draw all the lines
+ cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)
+ cv2.line(image, tuple(point_2d[1]), tuple(point_2d[6]), color, line_width, cv2.LINE_AA)
+ cv2.line(image, tuple(point_2d[2]), tuple(point_2d[7]), color, line_width, cv2.LINE_AA)
+ cv2.line(image, tuple(point_2d[3]), tuple(point_2d[8]), color, line_width, cv2.LINE_AA)
+
+ return image
diff --git a/modules/image/image_processing/prnet/utils/estimate_pose.py b/modules/image/image_processing/prnet/utils/estimate_pose.py
new file mode 100644
index 0000000000000000000000000000000000000000..34be417f48bd995efdd0e942e9e087349e9856e3
--- /dev/null
+++ b/modules/image/image_processing/prnet/utils/estimate_pose.py
@@ -0,0 +1,96 @@
+import numpy as np
+from math import cos, sin, atan2, asin
+
+
+def isRotationMatrix(R):
+ ''' checks if a matrix is a valid rotation matrix(whether orthogonal or not)
+ '''
+ Rt = np.transpose(R)
+ shouldBeIdentity = np.dot(Rt, R)
+ I = np.identity(3, dtype=R.dtype)
+ n = np.linalg.norm(I - shouldBeIdentity)
+ return n < 1e-6
+
+
+def matrix2angle(R):
+ ''' compute three Euler angles from a Rotation Matrix. Ref: http://www.gregslabaugh.net/publications/euler.pdf
+ Args:
+ R: (3,3). rotation matrix
+ Returns:
+ x: yaw
+ y: pitch
+ z: roll
+ '''
+ # assert(isRotationMatrix(R))
+
+ if R[2, 0] != 1 or R[2, 0] != -1:
+ x = asin(R[2, 0])
+ y = atan2(R[2, 1] / cos(x), R[2, 2] / cos(x))
+ z = atan2(R[1, 0] / cos(x), R[0, 0] / cos(x))
+
+ else: # Gimbal lock
+ z = 0 #can be anything
+ if R[2, 0] == -1:
+ x = np.pi / 2
+ y = z + atan2(R[0, 1], R[0, 2])
+ else:
+ x = -np.pi / 2
+ y = -z + atan2(-R[0, 1], -R[0, 2])
+
+ return x, y, z
+
+
+def P2sRt(P):
+ ''' decompositing camera matrix P.
+ Args:
+ P: (3, 4). Affine Camera Matrix.
+ Returns:
+ s: scale factor.
+ R: (3, 3). rotation matrix.
+ t2d: (2,). 2d translation.
+ '''
+ t2d = P[:2, 3]
+ R1 = P[0:1, :3]
+ R2 = P[1:2, :3]
+ s = (np.linalg.norm(R1) + np.linalg.norm(R2)) / 2.0
+ r1 = R1 / np.linalg.norm(R1)
+ r2 = R2 / np.linalg.norm(R2)
+ r3 = np.cross(r1, r2)
+
+ R = np.concatenate((r1, r2, r3), 0)
+ return s, R, t2d
+
+
+def compute_similarity_transform(points_static, points_to_transform):
+ #http://nghiaho.com/?page_id=671
+ p0 = np.copy(points_static).T
+ p1 = np.copy(points_to_transform).T
+
+ t0 = -np.mean(p0, axis=1).reshape(3, 1)
+ t1 = -np.mean(p1, axis=1).reshape(3, 1)
+ t_final = t1 - t0
+
+ p0c = p0 + t0
+ p1c = p1 + t1
+
+ covariance_matrix = p0c.dot(p1c.T)
+ U, S, V = np.linalg.svd(covariance_matrix)
+ R = U.dot(V)
+ if np.linalg.det(R) < 0:
+ R[:, 2] *= -1
+
+ rms_d0 = np.sqrt(np.mean(np.linalg.norm(p0c, axis=0)**2))
+ rms_d1 = np.sqrt(np.mean(np.linalg.norm(p1c, axis=0)**2))
+
+ s = (rms_d0 / rms_d1)
+ P = np.c_[s * np.eye(3).dot(R), t_final]
+ return P
+
+
+def estimate_pose(vertices):
+ canonical_vertices = np.load('Data/uv-data/canonical_vertices.npy')
+ P = compute_similarity_transform(vertices, canonical_vertices)
+ _, R, _ = P2sRt(P) # decompose affine matrix to s, R, t
+ pose = matrix2angle(R)
+
+ return P, pose
diff --git a/modules/image/image_processing/prnet/utils/render.py b/modules/image/image_processing/prnet/utils/render.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed7c11a8cdbec6b83f830cb1d08e3c7a23448dbb
--- /dev/null
+++ b/modules/image/image_processing/prnet/utils/render.py
@@ -0,0 +1,355 @@
+'''
+Author: YadiraF
+Mail: fengyao@sjtu.edu.cn
+'''
+import numpy as np
+
+
+def isPointInTri(point, tri_points):
+ ''' Judge whether the point is in the triangle
+ Method:
+ http://blackpawn.com/texts/pointinpoly/
+ Args:
+ point: [u, v] or [x, y]
+ tri_points: three vertices(2d points) of a triangle. 2 coords x 3 vertices
+ Returns:
+ bool: true for in triangle
+ '''
+ tp = tri_points
+
+ # vectors
+ v0 = tp[:, 2] - tp[:, 0]
+ v1 = tp[:, 1] - tp[:, 0]
+ v2 = point - tp[:, 0]
+
+ # dot products
+ dot00 = np.dot(v0.T, v0)
+ dot01 = np.dot(v0.T, v1)
+ dot02 = np.dot(v0.T, v2)
+ dot11 = np.dot(v1.T, v1)
+ dot12 = np.dot(v1.T, v2)
+
+ # barycentric coordinates
+ if dot00 * dot11 - dot01 * dot01 == 0:
+ inverDeno = 0
+ else:
+ inverDeno = 1 / (dot00 * dot11 - dot01 * dot01)
+
+ u = (dot11 * dot02 - dot01 * dot12) * inverDeno
+ v = (dot00 * dot12 - dot01 * dot02) * inverDeno
+
+ # check if point in triangle
+ return (u >= 0) & (v >= 0) & (u + v < 1)
+
+
+def get_point_weight(point, tri_points):
+ ''' Get the weights of the position
+ Methods: https://gamedev.stackexchange.com/questions/23743/whats-the-most-efficient-way-to-find-barycentric-coordinates
+ -m1.compute the area of the triangles formed by embedding the point P inside the triangle
+ -m2.Christer Ericson's book "Real-Time Collision Detection". faster, so I used this.
+ Args:
+ point: [u, v] or [x, y]
+ tri_points: three vertices(2d points) of a triangle. 2 coords x 3 vertices
+ Returns:
+ w0: weight of v0
+ w1: weight of v1
+ w2: weight of v3
+ '''
+ tp = tri_points
+ # vectors
+ v0 = tp[:, 2] - tp[:, 0]
+ v1 = tp[:, 1] - tp[:, 0]
+ v2 = point - tp[:, 0]
+
+ # dot products
+ dot00 = np.dot(v0.T, v0)
+ dot01 = np.dot(v0.T, v1)
+ dot02 = np.dot(v0.T, v2)
+ dot11 = np.dot(v1.T, v1)
+ dot12 = np.dot(v1.T, v2)
+
+ # barycentric coordinates
+ if dot00 * dot11 - dot01 * dot01 == 0:
+ inverDeno = 0
+ else:
+ inverDeno = 1 / (dot00 * dot11 - dot01 * dot01)
+
+ u = (dot11 * dot02 - dot01 * dot12) * inverDeno
+ v = (dot00 * dot12 - dot01 * dot02) * inverDeno
+
+ w0 = 1 - u - v
+ w1 = v
+ w2 = u
+
+ return w0, w1, w2
+
+
+def render_texture(vertices, colors, triangles, h, w, c=3):
+ ''' render mesh by z buffer
+ Args:
+ vertices: 3 x nver
+ colors: 3 x nver
+ triangles: 3 x ntri
+ h: height
+ w: width
+ '''
+ # initial
+ image = np.zeros((h, w, c))
+
+ depth_buffer = np.zeros([h, w]) - 999999.
+ # triangle depth: approximate the depth to the average value of z in each vertex(v0, v1, v2), since the vertices are closed to each other
+ tri_depth = (vertices[2, triangles[0, :]] + vertices[2, triangles[1, :]] + vertices[2, triangles[2, :]]) / 3.
+ tri_tex = (colors[:, triangles[0, :]] + colors[:, triangles[1, :]] + colors[:, triangles[2, :]]) / 3.
+
+ for i in range(triangles.shape[1]):
+ tri = triangles[:, i] # 3 vertex indices
+
+ # the inner bounding box
+ umin = max(int(np.ceil(np.min(vertices[0, tri]))), 0)
+ umax = min(int(np.floor(np.max(vertices[0, tri]))), w - 1)
+
+ vmin = max(int(np.ceil(np.min(vertices[1, tri]))), 0)
+ vmax = min(int(np.floor(np.max(vertices[1, tri]))), h - 1)
+
+ if umax < umin or vmax < vmin:
+ continue
+
+ for u in range(umin, umax + 1):
+ for v in range(vmin, vmax + 1):
+ if tri_depth[i] > depth_buffer[v, u] and isPointInTri([u, v], vertices[:2, tri]):
+ depth_buffer[v, u] = tri_depth[i]
+ image[v, u, :] = tri_tex[:, i]
+ return image
+
+
+def map_texture(src_image,
+ src_vertices,
+ dst_vertices,
+ dst_triangle_buffer,
+ triangles,
+ h,
+ w,
+ c=3,
+ mapping_type='bilinear'):
+ '''
+ Args:
+ triangles: 3 x ntri
+
+ # src
+ src_image: height x width x nchannels
+ src_vertices: 3 x nver
+
+ # dst
+ dst_vertices: 3 x nver
+ dst_triangle_buffer: height x width. the triangle index of each pixel in dst image
+
+ Returns:
+ dst_image: height x width x nchannels
+
+ '''
+ [sh, sw, sc] = src_image.shape
+ dst_image = np.zeros((h, w, c))
+ for y in range(h):
+ for x in range(w):
+ tri_ind = dst_triangle_buffer[y, x]
+ if tri_ind < 0: # no tri in dst image
+ continue
+ #if src_triangles_vis[tri_ind]: # the corresponding triangle in src image is invisible
+ # continue
+
+ # then. For this triangle index, map corresponding pixels(in triangles) in src image to dst image
+ # Two Methods:
+ # M1. Calculate the corresponding affine matrix from src triangle to dst triangle. Then find the corresponding src position of this dst pixel.
+ # -- ToDo
+ # M2. Calculate the relative position of three vertices in dst triangle, then find the corresponding src position relative to three src vertices.
+ tri = triangles[:, tri_ind]
+ # dst weight, here directly use the center to approximate because the tri is small
+ # if tri_ind < 366:
+ # print tri_ind
+ w0, w1, w2 = get_point_weight([x, y], dst_vertices[:2, tri])
+ # else:
+ # w0 = w1 = w2 = 1./3
+ # src
+ src_texel = w0 * src_vertices[:2, tri[0]] + w1 * src_vertices[:2, tri[1]] + w2 * src_vertices[:2, tri[2]] #
+ #
+ if src_texel[0] < 0 or src_texel[0] > sw - 1 or src_texel[1] < 0 or src_texel[1] > sh - 1:
+ dst_image[y, x, :] = 0
+ continue
+ # As the coordinates of the transformed pixel in the image will most likely not lie on a texel, we have to choose how to
+ # calculate the pixel colors depending on the next texels
+ # there are three different texture interpolation methods: area, bilinear and nearest neighbour
+ # print y, x, src_texel
+ # nearest neighbour
+ if mapping_type == 'nearest':
+ dst_image[y, x, :] = src_image[int(round(src_texel[1])), int(round(src_texel[0])), :]
+ # bilinear
+ elif mapping_type == 'bilinear':
+ # next 4 pixels
+ ul = src_image[int(np.floor(src_texel[1])), int(np.floor(src_texel[0])), :]
+ ur = src_image[int(np.floor(src_texel[1])), int(np.ceil(src_texel[0])), :]
+ dl = src_image[int(np.ceil(src_texel[1])), int(np.floor(src_texel[0])), :]
+ dr = src_image[int(np.ceil(src_texel[1])), int(np.ceil(src_texel[0])), :]
+
+ yd = src_texel[1] - np.floor(src_texel[1])
+ xd = src_texel[0] - np.floor(src_texel[0])
+ dst_image[y, x, :] = ul * (1 - xd) * (1 - yd) + ur * xd * (1 - yd) + dl * (1 - xd) * yd + dr * xd * yd
+
+ return dst_image
+
+
+def get_depth_buffer(vertices, triangles, h, w):
+ '''
+ Args:
+ vertices: 3 x nver
+ triangles: 3 x ntri
+ h: height
+ w: width
+ Returns:
+ depth_buffer: height x width
+ ToDo:
+ whether to add x, y by 0.5? the center of the pixel?
+ m3. like somewhere is wrong
+ # Each triangle has 3 vertices & Each vertex has 3 coordinates x, y, z.
+ # Here, the bigger the z, the fronter the point.
+ '''
+ # initial
+ depth_buffer = np.zeros([h, w
+ ]) - 999999. #+ np.min(vertices[2,:]) - 999999. # set the initial z to the farest position
+
+ ## calculate the depth(z) of each triangle
+ #-m1. z = the center of shpere(through 3 vertices)
+ #center3d = (vertices[:, triangles[0,:]] + vertices[:,triangles[1,:]] + vertices[:, triangles[2,:]])/3.
+ #tri_depth = np.sum(center3d**2, axis = 0)
+ #-m2. z = the center of z(v0, v1, v2)
+ tri_depth = (vertices[2, triangles[0, :]] + vertices[2, triangles[1, :]] + vertices[2, triangles[2, :]]) / 3.
+
+ for i in range(triangles.shape[1]):
+ tri = triangles[:, i] # 3 vertex indices
+
+ # the inner bounding box
+ umin = max(int(np.ceil(np.min(vertices[0, tri]))), 0)
+ umax = min(int(np.floor(np.max(vertices[0, tri]))), w - 1)
+
+ vmin = max(int(np.ceil(np.min(vertices[1, tri]))), 0)
+ vmax = min(int(np.floor(np.max(vertices[1, tri]))), h - 1)
+
+ if umax < umin or vmax < vmin:
+ continue
+
+ for u in range(umin, umax + 1):
+ for v in range(vmin, vmax + 1):
+ #-m3. calculate the accurate depth(z) of each pixel by barycentric weights
+ #w0, w1, w2 = weightsOfpoint([u,v], vertices[:2, tri])
+ #tri_depth = w0*vertices[2,tri[0]] + w1*vertices[2,tri[1]] + w2*vertices[2,tri[2]]
+ if tri_depth[i] > depth_buffer[v, u]: # and is_pointIntri([u,v], vertices[:2, tri]):
+ depth_buffer[v, u] = tri_depth[i]
+
+ return depth_buffer
+
+
+def get_triangle_buffer(vertices, triangles, h, w):
+ '''
+ Args:
+ vertices: 3 x nver
+ triangles: 3 x ntri
+ h: height
+ w: width
+ Returns:
+ depth_buffer: height x width
+ ToDo:
+ whether to add x, y by 0.5? the center of the pixel?
+ m3. like somewhere is wrong
+ # Each triangle has 3 vertices & Each vertex has 3 coordinates x, y, z.
+ # Here, the bigger the z, the fronter the point.
+ '''
+ # initial
+ depth_buffer = np.zeros([h, w
+ ]) - 999999. #+ np.min(vertices[2,:]) - 999999. # set the initial z to the farest position
+ triangle_buffer = np.zeros_like(depth_buffer, dtype=np.int32) - 1 # if -1, the pixel has no triangle correspondance
+
+ ## calculate the depth(z) of each triangle
+ #-m1. z = the center of shpere(through 3 vertices)
+ #center3d = (vertices[:, triangles[0,:]] + vertices[:,triangles[1,:]] + vertices[:, triangles[2,:]])/3.
+ #tri_depth = np.sum(center3d**2, axis = 0)
+ #-m2. z = the center of z(v0, v1, v2)
+ tri_depth = (vertices[2, triangles[0, :]] + vertices[2, triangles[1, :]] + vertices[2, triangles[2, :]]) / 3.
+
+ for i in range(triangles.shape[1]):
+ tri = triangles[:, i] # 3 vertex indices
+
+ # the inner bounding box
+ umin = max(int(np.ceil(np.min(vertices[0, tri]))), 0)
+ umax = min(int(np.floor(np.max(vertices[0, tri]))), w - 1)
+
+ vmin = max(int(np.ceil(np.min(vertices[1, tri]))), 0)
+ vmax = min(int(np.floor(np.max(vertices[1, tri]))), h - 1)
+
+ if umax < umin or vmax < vmin:
+ continue
+
+ for u in range(umin, umax + 1):
+ for v in range(vmin, vmax + 1):
+ #-m3. calculate the accurate depth(z) of each pixel by barycentric weights
+ #w0, w1, w2 = weightsOfpoint([u,v], vertices[:2, tri])
+ #tri_depth = w0*vertices[2,tri[0]] + w1*vertices[2,tri[1]] + w2*vertices[2,tri[2]]
+ if tri_depth[i] > depth_buffer[v, u] and isPointInTri([u, v], vertices[:2, tri]):
+ depth_buffer[v, u] = tri_depth[i]
+ triangle_buffer[v, u] = i
+
+ return triangle_buffer
+
+
+def vis_of_vertices(vertices, triangles, h, w, depth_buffer=None):
+ '''
+ Args:
+ vertices: 3 x nver
+ triangles: 3 x ntri
+ depth_buffer: height x width
+ Returns:
+ vertices_vis: nver. the visibility of each vertex
+ '''
+ if depth_buffer == None:
+ depth_buffer = get_depth_buffer(vertices, triangles, h, w)
+
+ vertices_vis = np.zeros(vertices.shape[1], dtype=bool)
+
+ depth_tmp = np.zeros_like(depth_buffer) - 99999
+ for i in range(vertices.shape[1]):
+ vertex = vertices[:, i]
+
+ if np.floor(vertex[0]) < 0 or np.ceil(vertex[0]) > w - 1 or np.floor(vertex[1]) < 0 or np.ceil(
+ vertex[1]) > h - 1:
+ continue
+
+ # bilinear interp
+ # ul = depth_buffer[int(np.floor(vertex[1])), int(np.floor(vertex[0]))]
+ # ur = depth_buffer[int(np.floor(vertex[1])), int(np.ceil(vertex[0]))]
+ # dl = depth_buffer[int(np.ceil(vertex[1])), int(np.floor(vertex[0]))]
+ # dr = depth_buffer[int(np.ceil(vertex[1])), int(np.ceil(vertex[0]))]
+
+ # yd = vertex[1] - np.floor(vertex[1])
+ # xd = vertex[0] - np.floor(vertex[0])
+
+ # vertex_depth = ul*(1-xd)*(1-yd) + ur*xd*(1-yd) + dl*(1-xd)*yd + dr*xd*yd
+
+ # nearest
+ px = int(np.round(vertex[0]))
+ py = int(np.round(vertex[1]))
+
+ # if (vertex[2] > depth_buffer[ul[0], ul[1]]) & (vertex[2] > depth_buffer[ur[0], ur[1]]) & (vertex[2] > depth_buffer[dl[0], dl[1]]) & (vertex[2] > depth_buffer[dr[0], dr[1]]):
+ if vertex[2] < depth_tmp[py, px]:
+ continue
+
+ # if vertex[2] > depth_buffer[py, px]:
+ # vertices_vis[i] = True
+ # depth_tmp[py, px] = vertex[2]
+ # elif np.abs(vertex[2] - depth_buffer[py, px]) < 1:
+ # vertices_vis[i] = True
+
+ threshold = 2 # need to be optimized.
+ if np.abs(vertex[2] - depth_buffer[py, px]) < threshold:
+ # if np.abs(vertex[2] - vertex_depth) < threshold:
+ vertices_vis[i] = True
+ depth_tmp[py, px] = vertex[2]
+
+ return vertices_vis
diff --git a/modules/image/image_processing/prnet/utils/render_app.py b/modules/image/image_processing/prnet/utils/render_app.py
new file mode 100644
index 0000000000000000000000000000000000000000..a24c959ac1c8e35b64c7fcf0ea55053ff143cc34
--- /dev/null
+++ b/modules/image/image_processing/prnet/utils/render_app.py
@@ -0,0 +1,43 @@
+import numpy as np
+from utils.render import vis_of_vertices, render_texture
+from scipy import ndimage
+
+
+def get_visibility(vertices, triangles, h, w):
+ triangles = triangles.T
+ vertices_vis = vis_of_vertices(vertices.T, triangles, h, w)
+ vertices_vis = vertices_vis.astype(bool)
+ for k in range(2):
+ tri_vis = vertices_vis[triangles[0, :]] | vertices_vis[triangles[1, :]] | vertices_vis[triangles[2, :]]
+ ind = triangles[:, tri_vis]
+ vertices_vis[ind] = True
+ # for k in range(2):
+ # tri_vis = vertices_vis[triangles[0,:]] & vertices_vis[triangles[1,:]] & vertices_vis[triangles[2,:]]
+ # ind = triangles[:, tri_vis]
+ # vertices_vis[ind] = True
+ vertices_vis = vertices_vis.astype(np.float32) #1 for visible and 0 for non-visible
+ return vertices_vis
+
+
+def get_uv_mask(vertices_vis, triangles, uv_coords, h, w, resolution):
+ triangles = triangles.T
+ vertices_vis = vertices_vis.astype(np.float32)
+ uv_mask = render_texture(uv_coords.T, vertices_vis[np.newaxis, :], triangles, resolution, resolution, 1)
+ uv_mask = np.squeeze(uv_mask > 0)
+ uv_mask = ndimage.binary_closing(uv_mask)
+ uv_mask = ndimage.binary_erosion(uv_mask, structure=np.ones((4, 4)))
+ uv_mask = ndimage.binary_closing(uv_mask)
+ uv_mask = ndimage.binary_erosion(uv_mask, structure=np.ones((4, 4)))
+ uv_mask = ndimage.binary_erosion(uv_mask, structure=np.ones((4, 4)))
+ uv_mask = ndimage.binary_erosion(uv_mask, structure=np.ones((4, 4)))
+ uv_mask = uv_mask.astype(np.float32)
+
+ return np.squeeze(uv_mask)
+
+
+def get_depth_image(vertices, triangles, h, w, isShow=False):
+ z = vertices[:, 2:]
+ if isShow:
+ z = z / max(z)
+ depth_image = render_texture(vertices.T, z.T, triangles.T, h, w, 1)
+ return np.squeeze(depth_image)
diff --git a/modules/image/image_processing/prnet/utils/rotate_vertices.py b/modules/image/image_processing/prnet/utils/rotate_vertices.py
new file mode 100644
index 0000000000000000000000000000000000000000..daf953469b81729edfbc88c71ef4625b75397c4b
--- /dev/null
+++ b/modules/image/image_processing/prnet/utils/rotate_vertices.py
@@ -0,0 +1,12 @@
+import numpy as np
+
+
+# import scipy.io as
+def frontalize(vertices):
+ canonical_vertices = np.load('Data/uv-data/canonical_vertices.npy')
+
+ vertices_homo = np.hstack((vertices, np.ones([vertices.shape[0], 1]))) #n x 4
+ P = np.linalg.lstsq(vertices_homo, canonical_vertices)[0].T # Affine matrix. 3 x 4
+ front_vertices = vertices_homo.dot(P.T)
+
+ return front_vertices
diff --git a/modules/image/image_processing/prnet/utils/write.py b/modules/image/image_processing/prnet/utils/write.py
new file mode 100644
index 0000000000000000000000000000000000000000..7502f2def7ddc213c6a7aa75972ed8e756e4d0d2
--- /dev/null
+++ b/modules/image/image_processing/prnet/utils/write.py
@@ -0,0 +1,154 @@
+import numpy as np
+from skimage.io import imsave
+import os
+
+
+def write_asc(path, vertices):
+ '''
+ Args:
+ vertices: shape = (nver, 3)
+ '''
+ if path.split('.')[-1] == 'asc':
+ np.savetxt(path, vertices)
+ else:
+ np.savetxt(path + '.asc', vertices)
+
+
+def write_obj_with_colors(obj_name, vertices, triangles, colors):
+ ''' Save 3D face model with texture represented by colors.
+ Args:
+ obj_name: str
+ vertices: shape = (nver, 3)
+ colors: shape = (nver, 3)
+ triangles: shape = (ntri, 3)
+ '''
+ triangles = triangles.copy()
+ triangles += 1 # meshlab start with 1
+
+ if obj_name.split('.')[-1] != 'obj':
+ obj_name = obj_name + '.obj'
+
+ # write obj
+ with open(obj_name, 'w') as f:
+
+ # write vertices & colors
+ for i in range(vertices.shape[0]):
+ # s = 'v {} {} {} \n'.format(vertices[0,i], vertices[1,i], vertices[2,i])
+ s = 'v {} {} {} {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2], colors[i, 0],
+ colors[i, 1], colors[i, 2])
+ f.write(s)
+
+ # write f: ver ind/ uv ind
+ [k, ntri] = triangles.shape
+ for i in range(triangles.shape[0]):
+ # s = 'f {} {} {}\n'.format(triangles[i, 0], triangles[i, 1], triangles[i, 2])
+ s = 'f {} {} {}\n'.format(triangles[i, 2], triangles[i, 1], triangles[i, 0])
+ f.write(s)
+
+
+def write_obj_with_texture(obj_name, vertices, triangles, texture, uv_coords):
+ ''' Save 3D face model with texture represented by texture map.
+ Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
+ Args:
+ obj_name: str
+ vertices: shape = (nver, 3)
+ triangles: shape = (ntri, 3)
+ texture: shape = (256,256,3)
+ uv_coords: shape = (nver, 3) max value<=1
+ '''
+ if obj_name.split('.')[-1] != 'obj':
+ obj_name = obj_name + '.obj'
+ mtl_name = obj_name.replace('.obj', '.mtl')
+ texture_name = obj_name.replace('.obj', '_texture.png')
+
+ triangles = triangles.copy()
+ triangles += 1 # mesh lab start with 1
+
+ # write obj
+ with open(obj_name, 'w') as f:
+ # first line: write mtlib(material library)
+ s = "mtllib {}\n".format(os.path.abspath(mtl_name))
+ f.write(s)
+
+ # write vertices
+ for i in range(vertices.shape[0]):
+ s = 'v {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2])
+ f.write(s)
+
+ # write uv coords
+ for i in range(uv_coords.shape[0]):
+ s = 'vt {} {}\n'.format(uv_coords[i, 0], 1 - uv_coords[i, 1])
+ f.write(s)
+
+ f.write("usemtl FaceTexture\n")
+
+ # write f: ver ind/ uv ind
+ for i in range(triangles.shape[0]):
+ # s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[i,0], triangles[i,0], triangles[i,1], triangles[i,1], triangles[i,2], triangles[i,2])
+ s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[i, 2], triangles[i, 2], triangles[i, 1], triangles[i, 1],
+ triangles[i, 0], triangles[i, 0])
+ f.write(s)
+
+ # write mtl
+ with open(mtl_name, 'w') as f:
+ f.write("newmtl FaceTexture\n")
+ s = 'map_Kd {}\n'.format(os.path.abspath(texture_name)) # map to image
+ f.write(s)
+
+ # write texture as png
+ imsave(texture_name, texture)
+
+
+def write_obj_with_colors_texture(obj_name, vertices, colors, triangles, texture, uv_coords):
+ ''' Save 3D face model with texture.
+ Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
+ Args:
+ obj_name: str
+ vertices: shape = (nver, 3)
+ colors: shape = (nver, 3)
+ triangles: shape = (ntri, 3)
+ texture: shape = (256,256,3)
+ uv_coords: shape = (nver, 3) max value<=1
+ '''
+ if obj_name.split('.')[-1] != 'obj':
+ obj_name = obj_name + '.obj'
+ mtl_name = obj_name.replace('.obj', '.mtl')
+ texture_name = obj_name.replace('.obj', '_texture.png')
+
+ triangles = triangles.copy()
+ triangles += 1 # mesh lab start with 1
+
+ # write obj
+ with open(obj_name, 'w') as f:
+ # first line: write mtlib(material library)
+ s = "mtllib {}\n".format(os.path.abspath(mtl_name))
+ f.write(s)
+
+ # write vertices
+ for i in range(vertices.shape[0]):
+ s = 'v {} {} {} {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2], colors[i, 0],
+ colors[i, 1], colors[i, 2])
+ f.write(s)
+
+ # write uv coords
+ for i in range(uv_coords.shape[0]):
+ s = 'vt {} {}\n'.format(uv_coords[i, 0], 1 - uv_coords[i, 1])
+ f.write(s)
+
+ f.write("usemtl FaceTexture\n")
+
+ # write f: ver ind/ uv ind
+ for i in range(triangles.shape[0]):
+ # s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[i,0], triangles[i,0], triangles[i,1], triangles[i,1], triangles[i,2], triangles[i,2])
+ s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[i, 2], triangles[i, 2], triangles[i, 1], triangles[i, 1],
+ triangles[i, 0], triangles[i, 0])
+ f.write(s)
+
+ # write mtl
+ with open(mtl_name, 'w') as f:
+ f.write("newmtl FaceTexture\n")
+ s = 'map_Kd {}\n'.format(os.path.abspath(texture_name)) # map to image
+ f.write(s)
+
+ # write texture as png
+ imsave(texture_name, texture)