提交 046fc307 编写于 作者: Q qingqing01

Remove fluid to use 2.0 API

上级 b8cb839a
...@@ -20,7 +20,7 @@ import sys ...@@ -20,7 +20,7 @@ import sys
import paddle.fluid as fluid import paddle.fluid as fluid
__all__ = ['check_gpu', 'check_version'] __all__ = ['check_gpu']
def check_gpu(use_gpu): def check_gpu(use_gpu):
...@@ -40,19 +40,3 @@ def check_gpu(use_gpu): ...@@ -40,19 +40,3 @@ def check_gpu(use_gpu):
sys.exit(1) sys.exit(1)
except Exception as e: except Exception as e:
pass pass
def check_version():
"""
Log error and exit when the installed version of paddlepaddle is
not satisfied.
"""
err = "PaddlePaddle version 1.6 or higher is required, " \
"or a suitable develop version is satisfied as well. \n" \
"Please make sure the version is good with your code." \
try:
fluid.require_version('1.7.0')
except Exception as e:
print(err)
sys.exit(1)
...@@ -19,12 +19,13 @@ from __future__ import print_function ...@@ -19,12 +19,13 @@ from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.nn.functional as F
from paddle.nn import Layer
from layers import ConvBN, DeConvBN from layers import ConvBN, DeConvBN
class ResnetBlock(fluid.dygraph.Layer): class ResnetBlock(Layer):
def __init__(self, dim, dropout=False): def __init__(self, dim, dropout=False):
super(ResnetBlock, self).__init__() super(ResnetBlock, self).__init__()
self.dropout = dropout self.dropout = dropout
...@@ -32,16 +33,16 @@ class ResnetBlock(fluid.dygraph.Layer): ...@@ -32,16 +33,16 @@ class ResnetBlock(fluid.dygraph.Layer):
self.conv1 = ConvBN(dim, dim, 3, 1, act=None) self.conv1 = ConvBN(dim, dim, 3, 1, act=None)
def forward(self, inputs): def forward(self, inputs):
out_res = fluid.layers.pad2d(inputs, [1, 1, 1, 1], mode="reflect") out_res = F.pad2d(inputs, [1, 1, 1, 1], mode="reflect")
out_res = self.conv0(out_res) out_res = self.conv0(out_res)
if self.dropout: if self.dropout:
out_res = fluid.layers.dropout(out_res, dropout_prob=0.5) out_res = F.dropout(out_res, p=0.5)
out_res = fluid.layers.pad2d(out_res, [1, 1, 1, 1], mode="reflect") out_res = F.pad2d(out_res, [1, 1, 1, 1], mode="reflect")
out_res = self.conv1(out_res) out_res = self.conv1(out_res)
return out_res + inputs return out_res + inputs
class ResnetGenerator(fluid.dygraph.Layer): class ResnetGenerator(Layer):
def __init__(self, input_channel, n_blocks=9, dropout=False): def __init__(self, input_channel, n_blocks=9, dropout=False):
super(ResnetGenerator, self).__init__() super(ResnetGenerator, self).__init__()
...@@ -65,7 +66,7 @@ class ResnetGenerator(fluid.dygraph.Layer): ...@@ -65,7 +66,7 @@ class ResnetGenerator(fluid.dygraph.Layer):
32, input_channel, 7, 1, norm=False, act=False, use_bias=True) 32, input_channel, 7, 1, norm=False, act=False, use_bias=True)
def forward(self, inputs): def forward(self, inputs):
pad_input = fluid.layers.pad2d(inputs, [3, 3, 3, 3], mode="reflect") pad_input = F.pad2d(inputs, [3, 3, 3, 3], mode="reflect")
y = self.conv0(pad_input) y = self.conv0(pad_input)
y = self.conv1(y) y = self.conv1(y)
y = self.conv2(y) y = self.conv2(y)
...@@ -73,13 +74,13 @@ class ResnetGenerator(fluid.dygraph.Layer): ...@@ -73,13 +74,13 @@ class ResnetGenerator(fluid.dygraph.Layer):
y = resnet_block(y) y = resnet_block(y)
y = self.deconv0(y) y = self.deconv0(y)
y = self.deconv1(y) y = self.deconv1(y)
y = fluid.layers.pad2d(y, [3, 3, 3, 3], mode="reflect") y = F.pad2d(y, [3, 3, 3, 3], mode="reflect")
y = self.conv3(y) y = self.conv3(y)
y = fluid.layers.tanh(y) y = paddle.tanh(y)
return y return y
class NLayerDiscriminator(fluid.dygraph.Layer): class NLayerDiscriminator(Layer):
def __init__(self, input_channel, d_dims=64, d_nlayers=3): def __init__(self, input_channel, d_dims=64, d_nlayers=3):
super(NLayerDiscriminator, self).__init__() super(NLayerDiscriminator, self).__init__()
self.conv0 = ConvBN( self.conv0 = ConvBN(
...@@ -186,10 +187,10 @@ class GLoss(paddle.nn.Layer): ...@@ -186,10 +187,10 @@ class GLoss(paddle.nn.Layer):
def forward(self, input_A, input_B, fake_A, fake_B, cyc_A, cyc_B, idt_A, def forward(self, input_A, input_B, fake_A, fake_B, cyc_A, cyc_B, idt_A,
idt_B, valid_A, valid_B): idt_B, valid_A, valid_B):
def mse(a, b): def mse(a, b):
return fluid.layers.reduce_mean(fluid.layers.square(a - b)) return paddle.reduce_mean(paddle.square(a - b))
def mae(a, b): # L1Loss def mae(a, b): # L1Loss
return fluid.layers.reduce_mean(fluid.layers.abs(a - b)) return paddle.reduce_mean(paddle.abs(a - b))
g_A_loss = mse(valid_A, 1.) g_A_loss = mse(valid_A, 1.)
g_B_loss = mse(valid_B, 1.) g_B_loss = mse(valid_B, 1.)
...@@ -225,6 +226,6 @@ class DLoss(paddle.nn.Layer): ...@@ -225,6 +226,6 @@ class DLoss(paddle.nn.Layer):
super(DLoss, self).__init__() super(DLoss, self).__init__()
def forward(self, real, fake): def forward(self, real, fake):
loss = fluid.layers.square(fake) + fluid.layers.square(real - 1.) loss = paddle.square(fake) + paddle.square(real - 1.)
loss = fluid.layers.reduce_mean(loss / 2.0) loss = paddle.reduce_mean(loss / 2.0)
return loss return loss
...@@ -25,16 +25,15 @@ from PIL import Image ...@@ -25,16 +25,15 @@ from PIL import Image
from scipy.misc import imsave from scipy.misc import imsave
import paddle import paddle
import paddle.fluid as fluid
from paddle.static import InputSpec as Input from paddle.static import InputSpec as Input
from check import check_gpu, check_version from check import check_gpu
from cyclegan import Generator, GeneratorCombine from cyclegan import Generator, GeneratorCombine
def main(): def main():
place = paddle.set_device(FLAGS.device) place = paddle.set_device(FLAGS.device)
fluid.enable_dygraph(place) if FLAGS.dynamic else None paddle.disable_static(place) if FLAGS.dynamic else None
im_shape = [-1, 3, 256, 256] im_shape = [-1, 3, 256, 256]
input_A = Input(im_shape, 'float32', 'input_A') input_A = Input(im_shape, 'float32', 'input_A')
...@@ -110,5 +109,4 @@ if __name__ == "__main__": ...@@ -110,5 +109,4 @@ if __name__ == "__main__":
FLAGS = parser.parse_args() FLAGS = parser.parse_args()
print(FLAGS) print(FLAGS)
check_gpu(str.lower(FLAGS.device) == 'gpu') check_gpu(str.lower(FLAGS.device) == 'gpu')
check_version()
main() main()
...@@ -13,53 +13,50 @@ ...@@ -13,53 +13,50 @@
# limitations under the License. # limitations under the License.
from __future__ import division from __future__ import division
import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Conv2D, Conv2DTranspose, BatchNorm import paddle
import paddle.nn as nn
# cudnn is not better when batch size is 1. import paddle.nn.functional as F
use_cudnn = False from paddle.nn import Layer, Conv2d, BatchNorm2d, ConvTranspose2d
import numpy as np
class ConvBN(Layer):
class ConvBN(fluid.dygraph.Layer): """docstring for Conv2d"""
"""docstring for Conv2D"""
def __init__(
def __init__(self, self,
num_channels, num_channels,
num_filters, num_filters,
filter_size, filter_size,
stride=1, stride=1,
padding=0, padding=0,
stddev=0.02, stddev=0.02,
norm=True, norm=True,
is_test=False, #is_test=False,
act='leaky_relu', act='leaky_relu',
relufactor=0.0, relufactor=0.0,
use_bias=False): use_bias=False):
super(ConvBN, self).__init__() super(ConvBN, self).__init__()
pattr = fluid.ParamAttr( pattr = paddle.ParamAttr(initializer=nn.initializer.Normal(
initializer=fluid.initializer.NormalInitializer( loc=0.0, scale=stddev))
loc=0.0, scale=stddev)) self.conv = Conv2d(
self.conv = Conv2D( in_channels=num_channels,
num_channels=num_channels, out_channels=num_filters,
num_filters=num_filters, kernel_size=filter_size,
filter_size=filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
use_cudnn=use_cudnn, weight_attr=pattr,
param_attr=pattr,
bias_attr=use_bias) bias_attr=use_bias)
if norm: if norm:
self.bn = BatchNorm( self.bn = BatchNorm2d(
num_filters, num_filters,
param_attr=fluid.ParamAttr( weight_attr=paddle.ParamAttr(
initializer=fluid.initializer.NormalInitializer(1.0, initializer=nn.initializer.Normal(1.0, 0.02)),
0.02)), bias_attr=paddle.ParamAttr(
bias_attr=fluid.ParamAttr( initializer=nn.initializer.Constant(0.0)),
initializer=fluid.initializer.Constant(0.0)), #is_test=False,
is_test=False, track_running_stats=True)
trainable_statistics=True)
self.relufactor = relufactor self.relufactor = relufactor
self.norm = norm self.norm = norm
self.act = act self.act = act
...@@ -70,52 +67,51 @@ class ConvBN(fluid.dygraph.Layer): ...@@ -70,52 +67,51 @@ class ConvBN(fluid.dygraph.Layer):
conv = self.bn(conv) conv = self.bn(conv)
if self.act == 'leaky_relu': if self.act == 'leaky_relu':
conv = fluid.layers.leaky_relu(conv, alpha=self.relufactor) conv = F.leaky_relu(conv, self.relufactor)
elif self.act == 'relu': elif self.act == 'relu':
conv = fluid.layers.relu(conv) conv = F.relu(conv)
else: else:
conv = conv conv = conv
return conv return conv
class DeConvBN(fluid.dygraph.Layer): class DeConvBN(Layer):
def __init__(self, def __init__(
num_channels, self,
num_filters, num_channels,
filter_size, num_filters,
stride=1, filter_size,
padding=[0, 0], stride=1,
outpadding=[0, 0, 0, 0], padding=[0, 0],
stddev=0.02, outpadding=[0, 0, 0, 0],
act='leaky_relu', stddev=0.02,
norm=True, act='leaky_relu',
is_test=False, norm=True,
relufactor=0.0, #is_test=False,
use_bias=False): relufactor=0.0,
use_bias=False):
super(DeConvBN, self).__init__() super(DeConvBN, self).__init__()
pattr = fluid.ParamAttr( pattr = paddle.ParamAttr(initializer=nn.initializer.Normal(
initializer=fluid.initializer.NormalInitializer( loc=0.0, scale=stddev))
loc=0.0, scale=stddev)) self._deconv = ConvTranspose2d(
self._deconv = Conv2DTranspose(
num_channels, num_channels,
num_filters, num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
param_attr=pattr, weight_attr=pattr,
bias_attr=use_bias) bias_attr=use_bias)
if norm: if norm:
self.bn = BatchNorm( self.bn = BatchNorm2d(
num_filters, num_filters,
param_attr=fluid.ParamAttr( weight_attr=paddle.ParamAttr(
initializer=fluid.initializer.NormalInitializer(1.0, initializer=nn.initializer.Normal(1.0, 0.02)),
0.02)), bias_attr=paddle.ParamAttr(
bias_attr=fluid.ParamAttr( initializer=nn.initializer.Constant(0.0)),
initializer=fluid.initializer.Constant(0.0)), #is_test=False,
is_test=False, track_running_stats=True)
trainable_statistics=True)
self.outpadding = outpadding self.outpadding = outpadding
self.relufactor = relufactor self.relufactor = relufactor
self.use_bias = use_bias self.use_bias = use_bias
...@@ -124,16 +120,16 @@ class DeConvBN(fluid.dygraph.Layer): ...@@ -124,16 +120,16 @@ class DeConvBN(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
conv = self._deconv(inputs) conv = self._deconv(inputs)
conv = fluid.layers.pad2d( conv = F.pad2d(
conv, paddings=self.outpadding, mode='constant', pad_value=0.0) conv, paddings=self.outpadding, mode='constant', pad_value=0.0)
if self.norm: if self.norm:
conv = self.bn(conv) conv = self.bn(conv)
if self.act == 'leaky_relu': if self.act == 'leaky_relu':
conv = fluid.layers.leaky_relu(conv, alpha=self.relufactor) conv = F.leaky_relu(conv, self.relufactor)
elif self.act == 'relu': elif self.act == 'relu':
conv = fluid.layers.relu(conv) conv = F.relu(conv)
else: else:
conv = conv conv = conv
......
...@@ -22,17 +22,16 @@ import numpy as np ...@@ -22,17 +22,16 @@ import numpy as np
from scipy.misc import imsave from scipy.misc import imsave
import paddle import paddle
import paddle.fluid as fluid
from paddle.static import InputSpec as Input from paddle.static import InputSpec as Input
from check import check_gpu, check_version from check import check_gpu
from cyclegan import Generator, GeneratorCombine from cyclegan import Generator, GeneratorCombine
import data as data import data as data
def main(): def main():
place = paddle.set_device(FLAGS.device) place = paddle.set_device(FLAGS.device)
fluid.enable_dygraph(place) if FLAGS.dynamic else None paddle.disable_static(place) if FLAGS.dynamic else None
im_shape = [-1, 3, 256, 256] im_shape = [-1, 3, 256, 256]
input_A = Input(im_shape, 'float32', 'input_A') input_A = Input(im_shape, 'float32', 'input_A')
...@@ -104,5 +103,4 @@ if __name__ == "__main__": ...@@ -104,5 +103,4 @@ if __name__ == "__main__":
FLAGS = parser.parse_args() FLAGS = parser.parse_args()
print(FLAGS) print(FLAGS)
check_gpu(str.lower(FLAGS.device) == 'gpu') check_gpu(str.lower(FLAGS.device) == 'gpu')
check_version()
main() main()
...@@ -23,10 +23,9 @@ import contextlib ...@@ -23,10 +23,9 @@ import contextlib
import time import time
import paddle import paddle
import paddle.fluid as fluid
from paddle.static import InputSpec as Input from paddle.static import InputSpec as Input
from check import check_gpu, check_version from check import check_gpu
from cyclegan import Generator, Discriminator, GeneratorCombine, GLoss, DLoss from cyclegan import Generator, Discriminator, GeneratorCombine, GLoss, DLoss
import data as data import data as data
...@@ -39,17 +38,17 @@ def opt(parameters): ...@@ -39,17 +38,17 @@ def opt(parameters):
lr = [1., 0.8, 0.6, 0.4, 0.2, 0.1] lr = [1., 0.8, 0.6, 0.4, 0.2, 0.1]
bounds = [i * step_per_epoch for i in bounds] bounds = [i * step_per_epoch for i in bounds]
lr = [i * lr_base for i in lr] lr = [i * lr_base for i in lr]
optimizer = fluid.optimizer.Adam( optimizer = paddle.optimizer.Adam(
learning_rate=fluid.layers.piecewise_decay( learning_rate=paddle.optimizer.lr_scheduler.PiecewiseLR(
boundaries=bounds, values=lr), boundaries=bounds, values=lr),
parameter_list=parameters, parameters=parameters,
beta1=0.5) beta1=0.5)
return optimizer return optimizer
def main(): def main():
place = paddle.set_device(FLAGS.device) place = paddle.set_device(FLAGS.device)
fluid.enable_dygraph(place) if FLAGS.dynamic else None paddle.disable_static(place) if FLAGS.dynamic else None
im_shape = [None, 3, 256, 256] im_shape = [None, 3, 256, 256]
input_A = Input(im_shape, 'float32', 'input_A') input_A = Input(im_shape, 'float32', 'input_A')
...@@ -158,5 +157,4 @@ if __name__ == "__main__": ...@@ -158,5 +157,4 @@ if __name__ == "__main__":
FLAGS = parser.parse_args() FLAGS = parser.parse_args()
print(FLAGS) print(FLAGS)
check_gpu(str.lower(FLAGS.device) == 'gpu') check_gpu(str.lower(FLAGS.device) == 'gpu')
check_version()
main() main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册