未验证 提交 5b31853d 编写于 作者: L LielinJiang 提交者: GitHub

Merge pull request #11 from LielinJiang/adapt-2.0-api

Adapt 2.0 api
...@@ -28,7 +28,7 @@ dataset: ...@@ -28,7 +28,7 @@ dataset:
train: train:
name: UnpairedDataset name: UnpairedDataset
dataroot: data/cityscapes dataroot: data/cityscapes
num_workers: 4 num_workers: 0
phase: train phase: train
max_dataset_size: inf max_dataset_size: inf
direction: AtoB direction: AtoB
......
...@@ -25,7 +25,7 @@ dataset: ...@@ -25,7 +25,7 @@ dataset:
train: train:
name: PairedDataset name: PairedDataset
dataroot: data/cityscapes dataroot: data/cityscapes
num_workers: 4 num_workers: 0
phase: train phase: train
max_dataset_size: inf max_dataset_size: inf
direction: BtoA direction: BtoA
......
...@@ -3,7 +3,7 @@ import paddle ...@@ -3,7 +3,7 @@ import paddle
import numbers import numbers
import numpy as np import numpy as np
from multiprocessing import Manager from multiprocessing import Manager
from paddle.imperative import ParallelEnv from paddle import ParallelEnv
from paddle.incubate.hapi.distributed import DistributedBatchSampler from paddle.incubate.hapi.distributed import DistributedBatchSampler
from ..utils.registry import Registry from ..utils.registry import Registry
......
...@@ -4,7 +4,7 @@ import time ...@@ -4,7 +4,7 @@ import time
import logging import logging
import paddle import paddle
from paddle.imperative import ParallelEnv, DataParallel from paddle import ParallelEnv, DataParallel
from ..datasets.builder import build_dataloader from ..datasets.builder import build_dataloader
from ..models.builder import build_model from ..models.builder import build_model
...@@ -46,7 +46,7 @@ class Trainer: ...@@ -46,7 +46,7 @@ class Trainer:
self.time_count = {} self.time_count = {}
def distributed_data_parallel(self): def distributed_data_parallel(self):
strategy = paddle.imperative.prepare_context() strategy = paddle.prepare_context()
for name in self.model.model_names: for name in self.model.model_names:
if isinstance(name, str): if isinstance(name, str):
net = getattr(self.model, 'net' + name) net = getattr(self.model, 'net' + name)
...@@ -127,7 +127,7 @@ class Trainer: ...@@ -127,7 +127,7 @@ class Trainer:
@property @property
def current_learning_rate(self): def current_learning_rate(self):
return self.model.optimizers[0].current_step_lr() return self.model.optimizers[0].get_lr()
def visual(self, results_dir, visual_results=None): def visual(self, results_dir, visual_results=None):
self.model.compute_visuals() self.model.compute_visuals()
......
import paddle import paddle
from paddle.imperative import ParallelEnv from paddle import ParallelEnv
from .base_model import BaseModel from .base_model import BaseModel
from .builder import MODELS from .builder import MODELS
...@@ -93,14 +93,14 @@ class CycleGANModel(BaseModel): ...@@ -93,14 +93,14 @@ class CycleGANModel(BaseModel):
if AtoB: if AtoB:
if 'A' in input: if 'A' in input:
self.real_A = paddle.imperative.to_variable(input['A']) self.real_A = paddle.to_tensor(input['A'])
if 'B' in input: if 'B' in input:
self.real_B = paddle.imperative.to_variable(input['B']) self.real_B = paddle.to_tensor(input['B'])
else: else:
if 'B' in input: if 'B' in input:
self.real_A = paddle.imperative.to_variable(input['B']) self.real_A = paddle.to_tensor(input['B'])
if 'A' in input: if 'A' in input:
self.real_B = paddle.imperative.to_variable(input['A']) self.real_B = paddle.to_tensor(input['A'])
if 'A_paths' in input: if 'A_paths' in input:
self.image_paths = input['A_paths'] self.image_paths = input['A_paths']
......
...@@ -3,7 +3,7 @@ import functools ...@@ -3,7 +3,7 @@ import functools
import numpy as np import numpy as np
import paddle.nn as nn import paddle.nn as nn
from ...modules.nn import ReflectionPad2d, LeakyReLU, Tanh, Dropout, BCEWithLogitsLoss, Conv2DTranspose, Conv2D, Pad2D, MSELoss from ...modules.nn import ReflectionPad2d, LeakyReLU, Dropout, BCEWithLogitsLoss, Pad2D, MSELoss
from ...modules.norm import build_norm_layer from ...modules.norm import build_norm_layer
from .builder import DISCRIMINATORS from .builder import DISCRIMINATORS
...@@ -31,14 +31,14 @@ class NLayerDiscriminator(paddle.fluid.dygraph.Layer): ...@@ -31,14 +31,14 @@ class NLayerDiscriminator(paddle.fluid.dygraph.Layer):
kw = 4 kw = 4
padw = 1 padw = 1
sequence = [Conv2D(input_nc, ndf, filter_size=kw, stride=2, padding=padw), LeakyReLU(0.2, True)] sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), LeakyReLU(0.2, True)]
nf_mult = 1 nf_mult = 1
nf_mult_prev = 1 nf_mult_prev = 1
for n in range(1, n_layers): for n in range(1, n_layers):
nf_mult_prev = nf_mult nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8) nf_mult = min(2 ** n, 8)
sequence += [ sequence += [
Conv2D(ndf * nf_mult_prev, ndf * nf_mult, filter_size=kw, stride=2, padding=padw, bias_attr=use_bias), nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias_attr=use_bias),
norm_layer(ndf * nf_mult), norm_layer(ndf * nf_mult),
LeakyReLU(0.2, True) LeakyReLU(0.2, True)
] ]
...@@ -46,12 +46,12 @@ class NLayerDiscriminator(paddle.fluid.dygraph.Layer): ...@@ -46,12 +46,12 @@ class NLayerDiscriminator(paddle.fluid.dygraph.Layer):
nf_mult_prev = nf_mult nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8) nf_mult = min(2 ** n_layers, 8)
sequence += [ sequence += [
Conv2D(ndf * nf_mult_prev, ndf * nf_mult, filter_size=kw, stride=1, padding=padw, bias_attr=use_bias), nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias_attr=use_bias),
norm_layer(ndf * nf_mult), norm_layer(ndf * nf_mult),
LeakyReLU(0.2, True) LeakyReLU(0.2, True)
] ]
sequence += [Conv2D(ndf * nf_mult, 1, filter_size=kw, stride=1, padding=padw)] sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
self.model = nn.Sequential(*sequence) self.model = nn.Sequential(*sequence)
def forward(self, input): def forward(self, input):
......
...@@ -2,7 +2,7 @@ import paddle ...@@ -2,7 +2,7 @@ import paddle
import paddle.nn as nn import paddle.nn as nn
import functools import functools
from ...modules.nn import ReflectionPad2d, LeakyReLU, Tanh, Dropout, BCEWithLogitsLoss, Conv2DTranspose, Conv2D, Pad2D, MSELoss from ...modules.nn import ReflectionPad2d, LeakyReLU, Tanh, Dropout, BCEWithLogitsLoss, Pad2D, MSELoss
from ...modules.norm import build_norm_layer from ...modules.norm import build_norm_layer
from .builder import GENERATORS from .builder import GENERATORS
...@@ -37,7 +37,7 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer): ...@@ -37,7 +37,7 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
use_bias = norm_layer == nn.InstanceNorm use_bias = norm_layer == nn.InstanceNorm
model = [ReflectionPad2d(3), model = [ReflectionPad2d(3),
nn.Conv2D(input_nc, ngf, filter_size=7, padding=0, bias_attr=use_bias), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias_attr=use_bias),
norm_layer(ngf), norm_layer(ngf),
nn.ReLU()] nn.ReLU()]
...@@ -45,7 +45,7 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer): ...@@ -45,7 +45,7 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
for i in range(n_downsampling): # add downsampling layers for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i mult = 2 ** i
model += [ model += [
nn.Conv2D(ngf * mult, ngf * mult * 2, filter_size=3, stride=2, padding=1, bias_attr=use_bias), nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias_attr=use_bias),
norm_layer(ngf * mult * 2), norm_layer(ngf * mult * 2),
nn.ReLU()] nn.ReLU()]
...@@ -57,16 +57,16 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer): ...@@ -57,16 +57,16 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
for i in range(n_downsampling): # add upsampling layers for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i) mult = 2 ** (n_downsampling - i)
model += [ model += [
nn.Conv2DTranspose(ngf * mult, int(ngf * mult / 2), nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
filter_size=3, stride=2, kernel_size=3, stride=2,
padding=1, padding=1,
output_padding=1,
bias_attr=use_bias), bias_attr=use_bias),
Pad2D(paddings=[0, 1, 0, 1], mode='constant', pad_value=0.0),
norm_layer(int(ngf * mult / 2)), norm_layer(int(ngf * mult / 2)),
nn.ReLU()] nn.ReLU()]
model += [ReflectionPad2d(3)] model += [ReflectionPad2d(3)]
model += [nn.Conv2D(ngf, output_nc, filter_size=7, padding=0)] model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [Tanh()] model += [nn.Tanh()]
self.model = nn.Sequential(*model) self.model = nn.Sequential(*model)
...@@ -112,7 +112,7 @@ class ResnetBlock(paddle.fluid.dygraph.Layer): ...@@ -112,7 +112,7 @@ class ResnetBlock(paddle.fluid.dygraph.Layer):
else: else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type) raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2D(dim, dim, filter_size=3, padding=p, bias_attr=use_bias), norm_layer(dim), nn.ReLU()] conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias_attr=use_bias), norm_layer(dim), nn.ReLU()]
if use_dropout: if use_dropout:
conv_block += [Dropout(0.5)] conv_block += [Dropout(0.5)]
...@@ -125,7 +125,7 @@ class ResnetBlock(paddle.fluid.dygraph.Layer): ...@@ -125,7 +125,7 @@ class ResnetBlock(paddle.fluid.dygraph.Layer):
p = 1 p = 1
else: else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type) raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2D(dim, dim, filter_size=3, padding=p, bias_attr=use_bias), norm_layer(dim)] conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias_attr=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block) return nn.Sequential(*conv_block)
......
...@@ -2,7 +2,7 @@ import paddle ...@@ -2,7 +2,7 @@ import paddle
import paddle.nn as nn import paddle.nn as nn
import functools import functools
from ...modules.nn import ReflectionPad2d, LeakyReLU, Tanh, Dropout, Conv2DTranspose, Conv2D from ...modules.nn import ReflectionPad2d, LeakyReLU, Tanh, Dropout
from ...modules.norm import build_norm_layer from ...modules.norm import build_norm_layer
from .builder import GENERATORS from .builder import GENERATORS
...@@ -77,7 +77,7 @@ class UnetSkipConnectionBlock(paddle.fluid.dygraph.Layer): ...@@ -77,7 +77,7 @@ class UnetSkipConnectionBlock(paddle.fluid.dygraph.Layer):
use_bias = norm_layer == nn.InstanceNorm use_bias = norm_layer == nn.InstanceNorm
if input_nc is None: if input_nc is None:
input_nc = outer_nc input_nc = outer_nc
downconv = Conv2D(input_nc, inner_nc, filter_size=4, downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias_attr=use_bias) stride=2, padding=1, bias_attr=use_bias)
downrelu = LeakyReLU(0.2, True) downrelu = LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc) downnorm = norm_layer(inner_nc)
...@@ -85,22 +85,22 @@ class UnetSkipConnectionBlock(paddle.fluid.dygraph.Layer): ...@@ -85,22 +85,22 @@ class UnetSkipConnectionBlock(paddle.fluid.dygraph.Layer):
upnorm = norm_layer(outer_nc) upnorm = norm_layer(outer_nc)
if outermost: if outermost:
upconv = Conv2DTranspose(inner_nc * 2, outer_nc, upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
filter_size=4, stride=2, kernel_size=4, stride=2,
padding=1) padding=1)
down = [downconv] down = [downconv]
up = [uprelu, upconv, Tanh()] up = [uprelu, upconv, Tanh()]
model = down + [submodule] + up model = down + [submodule] + up
elif innermost: elif innermost:
upconv = Conv2DTranspose(inner_nc, outer_nc, upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
filter_size=4, stride=2, kernel_size=4, stride=2,
padding=1, bias_attr=use_bias) padding=1, bias_attr=use_bias)
down = [downrelu, downconv] down = [downrelu, downconv]
up = [uprelu, upconv, upnorm] up = [uprelu, upconv, upnorm]
model = down + up model = down + up
else: else:
upconv = Conv2DTranspose(inner_nc * 2, outer_nc, upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
filter_size=4, stride=2, kernel_size=4, stride=2,
padding=1, bias_attr=use_bias) padding=1, bias_attr=use_bias)
down = [downrelu, downconv, downnorm] down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm] up = [uprelu, upconv, upnorm]
......
import paddle import paddle
from paddle.imperative import ParallelEnv from paddle import ParallelEnv
from .base_model import BaseModel from .base_model import BaseModel
from .builder import MODELS from .builder import MODELS
...@@ -72,8 +72,8 @@ class Pix2PixModel(BaseModel): ...@@ -72,8 +72,8 @@ class Pix2PixModel(BaseModel):
""" """
AtoB = self.opt.dataset.train.direction == 'AtoB' AtoB = self.opt.dataset.train.direction == 'AtoB'
self.real_A = paddle.imperative.to_variable(input['A' if AtoB else 'B']) self.real_A = paddle.to_tensor(input['A' if AtoB else 'B'])
self.real_B = paddle.imperative.to_variable(input['B' if AtoB else 'A']) self.real_B = paddle.to_tensor(input['B' if AtoB else 'A'])
self.image_paths = input['A_paths' if AtoB else 'B_paths'] self.image_paths = input['A_paths' if AtoB else 'B_paths']
......
...@@ -129,88 +129,7 @@ def initial_type( ...@@ -129,88 +129,7 @@ def initial_type(
else: else:
bias_attr = False bias_attr = False
return param_attr, bias_attr return param_attr, bias_attr
class Conv2D(paddle.nn.Conv2D):
def __init__(self,
num_channels,
num_filters,
filter_size,
padding=0,
stride=1,
dilation=1,
groups=1,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
data_format="NCHW",
dtype='float32',
init_type='normal'):
param_attr, bias_attr = initial_type(
input=input,
op_type='conv',
fan_out=num_filters,
init=init_type,
use_bias=True if bias_attr != False else False,
filter_size=filter_size)
super(Conv2D, self).__init__(num_channels,
num_filters,
filter_size,
padding,
stride,
dilation,
groups,
param_attr,
bias_attr,
use_cudnn,
act,
data_format,
dtype)
class Conv2DTranspose(paddle.nn.Conv2DTranspose):
def __init__(self,
num_channels,
num_filters,
filter_size,
output_size=None,
padding=0,
stride=1,
dilation=1,
groups=1,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
data_format="NCHW",
dtype='float32',
init_type='normal'):
param_attr, bias_attr = initial_type(
input=input,
op_type='deconv',
fan_out=num_filters,
init=init_type,
use_bias=True if bias_attr != False else False,
filter_size=filter_size)
super(Conv2DTranspose, self).__init__(
num_channels,
num_filters,
filter_size,
output_size,
padding,
stride,
dilation,
groups,
param_attr,
bias_attr,
use_cudnn,
act,
data_format,
dtype)
class Pad2D(fluid.dygraph.Layer): class Pad2D(fluid.dygraph.Layer):
def __init__(self, paddings, mode, pad_value=0.0): def __init__(self, paddings, mode, pad_value=0.0):
......
...@@ -13,4 +13,4 @@ def build_optimizer(cfg, parameter_list=None): ...@@ -13,4 +13,4 @@ def build_optimizer(cfg, parameter_list=None):
opt_name = cfg_copy.pop('name') opt_name = cfg_copy.pop('name')
return getattr(paddle.optimizer, opt_name)(lr_scheduler, parameter_list=parameter_list, **cfg_copy) return getattr(paddle.optimizer, opt_name)(lr_scheduler, parameters=parameter_list, **cfg_copy)
...@@ -2,7 +2,7 @@ import logging ...@@ -2,7 +2,7 @@ import logging
import os import os
import sys import sys
from paddle.imperative import ParallelEnv from paddle import ParallelEnv
def setup_logger(output=None, name="ppgan"): def setup_logger(output=None, name="ppgan"):
......
...@@ -2,7 +2,7 @@ import os ...@@ -2,7 +2,7 @@ import os
import time import time
import paddle import paddle
from paddle.imperative import ParallelEnv from paddle import ParallelEnv
from .logger import setup_logger from .logger import setup_logger
...@@ -20,4 +20,4 @@ def setup(args, cfg): ...@@ -20,4 +20,4 @@ def setup(args, cfg):
place = paddle.fluid.CUDAPlace(ParallelEnv().dev_id) \ place = paddle.fluid.CUDAPlace(ParallelEnv().dev_id) \
if ParallelEnv().nranks > 1 else paddle.fluid.CUDAPlace(0) if ParallelEnv().nranks > 1 else paddle.fluid.CUDAPlace(0)
paddle.enable_imperative(place) paddle.disable_static(place)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册