提交 046fc307 编写于 作者: Q qingqing01

Remove fluid to use 2.0 API

上级 b8cb839a
......@@ -20,7 +20,7 @@ import sys
import paddle.fluid as fluid
__all__ = ['check_gpu', 'check_version']
__all__ = ['check_gpu']
def check_gpu(use_gpu):
......@@ -40,19 +40,3 @@ def check_gpu(use_gpu):
sys.exit(1)
except Exception as e:
pass
def check_version():
"""
Log error and exit when the installed version of paddlepaddle is
not satisfied.
"""
err = "PaddlePaddle version 1.6 or higher is required, " \
"or a suitable develop version is satisfied as well. \n" \
"Please make sure the version is good with your code." \
try:
fluid.require_version('1.7.0')
except Exception as e:
print(err)
sys.exit(1)
......@@ -19,12 +19,13 @@ from __future__ import print_function
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.nn import Layer
from layers import ConvBN, DeConvBN
class ResnetBlock(fluid.dygraph.Layer):
class ResnetBlock(Layer):
def __init__(self, dim, dropout=False):
super(ResnetBlock, self).__init__()
self.dropout = dropout
......@@ -32,16 +33,16 @@ class ResnetBlock(fluid.dygraph.Layer):
self.conv1 = ConvBN(dim, dim, 3, 1, act=None)
def forward(self, inputs):
out_res = fluid.layers.pad2d(inputs, [1, 1, 1, 1], mode="reflect")
out_res = F.pad2d(inputs, [1, 1, 1, 1], mode="reflect")
out_res = self.conv0(out_res)
if self.dropout:
out_res = fluid.layers.dropout(out_res, dropout_prob=0.5)
out_res = fluid.layers.pad2d(out_res, [1, 1, 1, 1], mode="reflect")
out_res = F.dropout(out_res, p=0.5)
out_res = F.pad2d(out_res, [1, 1, 1, 1], mode="reflect")
out_res = self.conv1(out_res)
return out_res + inputs
class ResnetGenerator(fluid.dygraph.Layer):
class ResnetGenerator(Layer):
def __init__(self, input_channel, n_blocks=9, dropout=False):
super(ResnetGenerator, self).__init__()
......@@ -65,7 +66,7 @@ class ResnetGenerator(fluid.dygraph.Layer):
32, input_channel, 7, 1, norm=False, act=False, use_bias=True)
def forward(self, inputs):
pad_input = fluid.layers.pad2d(inputs, [3, 3, 3, 3], mode="reflect")
pad_input = F.pad2d(inputs, [3, 3, 3, 3], mode="reflect")
y = self.conv0(pad_input)
y = self.conv1(y)
y = self.conv2(y)
......@@ -73,13 +74,13 @@ class ResnetGenerator(fluid.dygraph.Layer):
y = resnet_block(y)
y = self.deconv0(y)
y = self.deconv1(y)
y = fluid.layers.pad2d(y, [3, 3, 3, 3], mode="reflect")
y = F.pad2d(y, [3, 3, 3, 3], mode="reflect")
y = self.conv3(y)
y = fluid.layers.tanh(y)
y = paddle.tanh(y)
return y
class NLayerDiscriminator(fluid.dygraph.Layer):
class NLayerDiscriminator(Layer):
def __init__(self, input_channel, d_dims=64, d_nlayers=3):
super(NLayerDiscriminator, self).__init__()
self.conv0 = ConvBN(
......@@ -186,10 +187,10 @@ class GLoss(paddle.nn.Layer):
def forward(self, input_A, input_B, fake_A, fake_B, cyc_A, cyc_B, idt_A,
idt_B, valid_A, valid_B):
def mse(a, b):
return fluid.layers.reduce_mean(fluid.layers.square(a - b))
return paddle.reduce_mean(paddle.square(a - b))
def mae(a, b): # L1Loss
return fluid.layers.reduce_mean(fluid.layers.abs(a - b))
return paddle.reduce_mean(paddle.abs(a - b))
g_A_loss = mse(valid_A, 1.)
g_B_loss = mse(valid_B, 1.)
......@@ -225,6 +226,6 @@ class DLoss(paddle.nn.Layer):
super(DLoss, self).__init__()
def forward(self, real, fake):
loss = fluid.layers.square(fake) + fluid.layers.square(real - 1.)
loss = fluid.layers.reduce_mean(loss / 2.0)
loss = paddle.square(fake) + paddle.square(real - 1.)
loss = paddle.reduce_mean(loss / 2.0)
return loss
......@@ -25,16 +25,15 @@ from PIL import Image
from scipy.misc import imsave
import paddle
import paddle.fluid as fluid
from paddle.static import InputSpec as Input
from check import check_gpu, check_version
from check import check_gpu
from cyclegan import Generator, GeneratorCombine
def main():
place = paddle.set_device(FLAGS.device)
fluid.enable_dygraph(place) if FLAGS.dynamic else None
paddle.disable_static(place) if FLAGS.dynamic else None
im_shape = [-1, 3, 256, 256]
input_A = Input(im_shape, 'float32', 'input_A')
......@@ -110,5 +109,4 @@ if __name__ == "__main__":
FLAGS = parser.parse_args()
print(FLAGS)
check_gpu(str.lower(FLAGS.device) == 'gpu')
check_version()
main()
......@@ -13,53 +13,50 @@
# limitations under the License.
from __future__ import division
import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Conv2D, Conv2DTranspose, BatchNorm
# cudnn is not better when batch size is 1.
use_cudnn = False
import numpy as np
class ConvBN(fluid.dygraph.Layer):
"""docstring for Conv2D"""
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
padding=0,
stddev=0.02,
norm=True,
is_test=False,
act='leaky_relu',
relufactor=0.0,
use_bias=False):
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn import Layer, Conv2d, BatchNorm2d, ConvTranspose2d
class ConvBN(Layer):
"""docstring for Conv2d"""
def __init__(
self,
num_channels,
num_filters,
filter_size,
stride=1,
padding=0,
stddev=0.02,
norm=True,
#is_test=False,
act='leaky_relu',
relufactor=0.0,
use_bias=False):
super(ConvBN, self).__init__()
pattr = fluid.ParamAttr(
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=stddev))
self.conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
pattr = paddle.ParamAttr(initializer=nn.initializer.Normal(
loc=0.0, scale=stddev))
self.conv = Conv2d(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=padding,
use_cudnn=use_cudnn,
param_attr=pattr,
weight_attr=pattr,
bias_attr=use_bias)
if norm:
self.bn = BatchNorm(
self.bn = BatchNorm2d(
num_filters,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.NormalInitializer(1.0,
0.02)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.0)),
is_test=False,
trainable_statistics=True)
weight_attr=paddle.ParamAttr(
initializer=nn.initializer.Normal(1.0, 0.02)),
bias_attr=paddle.ParamAttr(
initializer=nn.initializer.Constant(0.0)),
#is_test=False,
track_running_stats=True)
self.relufactor = relufactor
self.norm = norm
self.act = act
......@@ -70,52 +67,51 @@ class ConvBN(fluid.dygraph.Layer):
conv = self.bn(conv)
if self.act == 'leaky_relu':
conv = fluid.layers.leaky_relu(conv, alpha=self.relufactor)
conv = F.leaky_relu(conv, self.relufactor)
elif self.act == 'relu':
conv = fluid.layers.relu(conv)
conv = F.relu(conv)
else:
conv = conv
return conv
class DeConvBN(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
padding=[0, 0],
outpadding=[0, 0, 0, 0],
stddev=0.02,
act='leaky_relu',
norm=True,
is_test=False,
relufactor=0.0,
use_bias=False):
class DeConvBN(Layer):
def __init__(
self,
num_channels,
num_filters,
filter_size,
stride=1,
padding=[0, 0],
outpadding=[0, 0, 0, 0],
stddev=0.02,
act='leaky_relu',
norm=True,
#is_test=False,
relufactor=0.0,
use_bias=False):
super(DeConvBN, self).__init__()
pattr = fluid.ParamAttr(
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=stddev))
self._deconv = Conv2DTranspose(
pattr = paddle.ParamAttr(initializer=nn.initializer.Normal(
loc=0.0, scale=stddev))
self._deconv = ConvTranspose2d(
num_channels,
num_filters,
filter_size=filter_size,
kernel_size=filter_size,
stride=stride,
padding=padding,
param_attr=pattr,
weight_attr=pattr,
bias_attr=use_bias)
if norm:
self.bn = BatchNorm(
self.bn = BatchNorm2d(
num_filters,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.NormalInitializer(1.0,
0.02)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.0)),
is_test=False,
trainable_statistics=True)
weight_attr=paddle.ParamAttr(
initializer=nn.initializer.Normal(1.0, 0.02)),
bias_attr=paddle.ParamAttr(
initializer=nn.initializer.Constant(0.0)),
#is_test=False,
track_running_stats=True)
self.outpadding = outpadding
self.relufactor = relufactor
self.use_bias = use_bias
......@@ -124,16 +120,16 @@ class DeConvBN(fluid.dygraph.Layer):
def forward(self, inputs):
conv = self._deconv(inputs)
conv = fluid.layers.pad2d(
conv = F.pad2d(
conv, paddings=self.outpadding, mode='constant', pad_value=0.0)
if self.norm:
conv = self.bn(conv)
if self.act == 'leaky_relu':
conv = fluid.layers.leaky_relu(conv, alpha=self.relufactor)
conv = F.leaky_relu(conv, self.relufactor)
elif self.act == 'relu':
conv = fluid.layers.relu(conv)
conv = F.relu(conv)
else:
conv = conv
......
......@@ -22,17 +22,16 @@ import numpy as np
from scipy.misc import imsave
import paddle
import paddle.fluid as fluid
from paddle.static import InputSpec as Input
from check import check_gpu, check_version
from check import check_gpu
from cyclegan import Generator, GeneratorCombine
import data as data
def main():
place = paddle.set_device(FLAGS.device)
fluid.enable_dygraph(place) if FLAGS.dynamic else None
paddle.disable_static(place) if FLAGS.dynamic else None
im_shape = [-1, 3, 256, 256]
input_A = Input(im_shape, 'float32', 'input_A')
......@@ -104,5 +103,4 @@ if __name__ == "__main__":
FLAGS = parser.parse_args()
print(FLAGS)
check_gpu(str.lower(FLAGS.device) == 'gpu')
check_version()
main()
......@@ -23,10 +23,9 @@ import contextlib
import time
import paddle
import paddle.fluid as fluid
from paddle.static import InputSpec as Input
from check import check_gpu, check_version
from check import check_gpu
from cyclegan import Generator, Discriminator, GeneratorCombine, GLoss, DLoss
import data as data
......@@ -39,17 +38,17 @@ def opt(parameters):
lr = [1., 0.8, 0.6, 0.4, 0.2, 0.1]
bounds = [i * step_per_epoch for i in bounds]
lr = [i * lr_base for i in lr]
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.piecewise_decay(
optimizer = paddle.optimizer.Adam(
learning_rate=paddle.optimizer.lr_scheduler.PiecewiseLR(
boundaries=bounds, values=lr),
parameter_list=parameters,
parameters=parameters,
beta1=0.5)
return optimizer
def main():
place = paddle.set_device(FLAGS.device)
fluid.enable_dygraph(place) if FLAGS.dynamic else None
paddle.disable_static(place) if FLAGS.dynamic else None
im_shape = [None, 3, 256, 256]
input_A = Input(im_shape, 'float32', 'input_A')
......@@ -158,5 +157,4 @@ if __name__ == "__main__":
FLAGS = parser.parse_args()
print(FLAGS)
check_gpu(str.lower(FLAGS.device) == 'gpu')
check_version()
main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册