未验证 提交 5b31853d 编写于 作者: L LielinJiang 提交者: GitHub

Merge pull request #11 from LielinJiang/adapt-2.0-api

Adapt 2.0 api
......@@ -28,7 +28,7 @@ dataset:
train:
name: UnpairedDataset
dataroot: data/cityscapes
num_workers: 4
num_workers: 0
phase: train
max_dataset_size: inf
direction: AtoB
......
......@@ -25,7 +25,7 @@ dataset:
train:
name: PairedDataset
dataroot: data/cityscapes
num_workers: 4
num_workers: 0
phase: train
max_dataset_size: inf
direction: BtoA
......
......@@ -3,7 +3,7 @@ import paddle
import numbers
import numpy as np
from multiprocessing import Manager
from paddle.imperative import ParallelEnv
from paddle import ParallelEnv
from paddle.incubate.hapi.distributed import DistributedBatchSampler
from ..utils.registry import Registry
......
......@@ -4,7 +4,7 @@ import time
import logging
import paddle
from paddle.imperative import ParallelEnv, DataParallel
from paddle import ParallelEnv, DataParallel
from ..datasets.builder import build_dataloader
from ..models.builder import build_model
......@@ -46,7 +46,7 @@ class Trainer:
self.time_count = {}
def distributed_data_parallel(self):
strategy = paddle.imperative.prepare_context()
strategy = paddle.prepare_context()
for name in self.model.model_names:
if isinstance(name, str):
net = getattr(self.model, 'net' + name)
......@@ -127,7 +127,7 @@ class Trainer:
@property
def current_learning_rate(self):
return self.model.optimizers[0].current_step_lr()
return self.model.optimizers[0].get_lr()
def visual(self, results_dir, visual_results=None):
self.model.compute_visuals()
......
import paddle
from paddle.imperative import ParallelEnv
from paddle import ParallelEnv
from .base_model import BaseModel
from .builder import MODELS
......@@ -93,14 +93,14 @@ class CycleGANModel(BaseModel):
if AtoB:
if 'A' in input:
self.real_A = paddle.imperative.to_variable(input['A'])
self.real_A = paddle.to_tensor(input['A'])
if 'B' in input:
self.real_B = paddle.imperative.to_variable(input['B'])
self.real_B = paddle.to_tensor(input['B'])
else:
if 'B' in input:
self.real_A = paddle.imperative.to_variable(input['B'])
self.real_A = paddle.to_tensor(input['B'])
if 'A' in input:
self.real_B = paddle.imperative.to_variable(input['A'])
self.real_B = paddle.to_tensor(input['A'])
if 'A_paths' in input:
self.image_paths = input['A_paths']
......
......@@ -3,7 +3,7 @@ import functools
import numpy as np
import paddle.nn as nn
from ...modules.nn import ReflectionPad2d, LeakyReLU, Tanh, Dropout, BCEWithLogitsLoss, Conv2DTranspose, Conv2D, Pad2D, MSELoss
from ...modules.nn import ReflectionPad2d, LeakyReLU, Dropout, BCEWithLogitsLoss, Pad2D, MSELoss
from ...modules.norm import build_norm_layer
from .builder import DISCRIMINATORS
......@@ -31,14 +31,14 @@ class NLayerDiscriminator(paddle.fluid.dygraph.Layer):
kw = 4
padw = 1
sequence = [Conv2D(input_nc, ndf, filter_size=kw, stride=2, padding=padw), LeakyReLU(0.2, True)]
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
Conv2D(ndf * nf_mult_prev, ndf * nf_mult, filter_size=kw, stride=2, padding=padw, bias_attr=use_bias),
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias_attr=use_bias),
norm_layer(ndf * nf_mult),
LeakyReLU(0.2, True)
]
......@@ -46,12 +46,12 @@ class NLayerDiscriminator(paddle.fluid.dygraph.Layer):
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
Conv2D(ndf * nf_mult_prev, ndf * nf_mult, filter_size=kw, stride=1, padding=padw, bias_attr=use_bias),
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias_attr=use_bias),
norm_layer(ndf * nf_mult),
LeakyReLU(0.2, True)
]
sequence += [Conv2D(ndf * nf_mult, 1, filter_size=kw, stride=1, padding=padw)]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
self.model = nn.Sequential(*sequence)
def forward(self, input):
......
......@@ -2,7 +2,7 @@ import paddle
import paddle.nn as nn
import functools
from ...modules.nn import ReflectionPad2d, LeakyReLU, Tanh, Dropout, BCEWithLogitsLoss, Conv2DTranspose, Conv2D, Pad2D, MSELoss
from ...modules.nn import ReflectionPad2d, LeakyReLU, Tanh, Dropout, BCEWithLogitsLoss, Pad2D, MSELoss
from ...modules.norm import build_norm_layer
from .builder import GENERATORS
......@@ -37,7 +37,7 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
use_bias = norm_layer == nn.InstanceNorm
model = [ReflectionPad2d(3),
nn.Conv2D(input_nc, ngf, filter_size=7, padding=0, bias_attr=use_bias),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias_attr=use_bias),
norm_layer(ngf),
nn.ReLU()]
......@@ -45,7 +45,7 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [
nn.Conv2D(ngf * mult, ngf * mult * 2, filter_size=3, stride=2, padding=1, bias_attr=use_bias),
nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias_attr=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU()]
......@@ -57,16 +57,16 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [
nn.Conv2DTranspose(ngf * mult, int(ngf * mult / 2),
filter_size=3, stride=2,
padding=1,
nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1,
output_padding=1,
bias_attr=use_bias),
Pad2D(paddings=[0, 1, 0, 1], mode='constant', pad_value=0.0),
norm_layer(int(ngf * mult / 2)),
nn.ReLU()]
model += [ReflectionPad2d(3)]
model += [nn.Conv2D(ngf, output_nc, filter_size=7, padding=0)]
model += [Tanh()]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
......@@ -112,7 +112,7 @@ class ResnetBlock(paddle.fluid.dygraph.Layer):
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2D(dim, dim, filter_size=3, padding=p, bias_attr=use_bias), norm_layer(dim), nn.ReLU()]
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias_attr=use_bias), norm_layer(dim), nn.ReLU()]
if use_dropout:
conv_block += [Dropout(0.5)]
......@@ -125,7 +125,7 @@ class ResnetBlock(paddle.fluid.dygraph.Layer):
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2D(dim, dim, filter_size=3, padding=p, bias_attr=use_bias), norm_layer(dim)]
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias_attr=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
......
......@@ -2,7 +2,7 @@ import paddle
import paddle.nn as nn
import functools
from ...modules.nn import ReflectionPad2d, LeakyReLU, Tanh, Dropout, Conv2DTranspose, Conv2D
from ...modules.nn import ReflectionPad2d, LeakyReLU, Tanh, Dropout
from ...modules.norm import build_norm_layer
from .builder import GENERATORS
......@@ -77,7 +77,7 @@ class UnetSkipConnectionBlock(paddle.fluid.dygraph.Layer):
use_bias = norm_layer == nn.InstanceNorm
if input_nc is None:
input_nc = outer_nc
downconv = Conv2D(input_nc, inner_nc, filter_size=4,
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias_attr=use_bias)
downrelu = LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
......@@ -85,22 +85,22 @@ class UnetSkipConnectionBlock(paddle.fluid.dygraph.Layer):
upnorm = norm_layer(outer_nc)
if outermost:
upconv = Conv2DTranspose(inner_nc * 2, outer_nc,
filter_size=4, stride=2,
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = Conv2DTranspose(inner_nc, outer_nc,
filter_size=4, stride=2,
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias_attr=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = Conv2DTranspose(inner_nc * 2, outer_nc,
filter_size=4, stride=2,
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias_attr=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
......
import paddle
from paddle.imperative import ParallelEnv
from paddle import ParallelEnv
from .base_model import BaseModel
from .builder import MODELS
......@@ -72,8 +72,8 @@ class Pix2PixModel(BaseModel):
"""
AtoB = self.opt.dataset.train.direction == 'AtoB'
self.real_A = paddle.imperative.to_variable(input['A' if AtoB else 'B'])
self.real_B = paddle.imperative.to_variable(input['B' if AtoB else 'A'])
self.real_A = paddle.to_tensor(input['A' if AtoB else 'B'])
self.real_B = paddle.to_tensor(input['B' if AtoB else 'A'])
self.image_paths = input['A_paths' if AtoB else 'B_paths']
......
......@@ -129,88 +129,7 @@ def initial_type(
else:
bias_attr = False
return param_attr, bias_attr
class Conv2D(paddle.nn.Conv2D):
def __init__(self,
num_channels,
num_filters,
filter_size,
padding=0,
stride=1,
dilation=1,
groups=1,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
data_format="NCHW",
dtype='float32',
init_type='normal'):
param_attr, bias_attr = initial_type(
input=input,
op_type='conv',
fan_out=num_filters,
init=init_type,
use_bias=True if bias_attr != False else False,
filter_size=filter_size)
super(Conv2D, self).__init__(num_channels,
num_filters,
filter_size,
padding,
stride,
dilation,
groups,
param_attr,
bias_attr,
use_cudnn,
act,
data_format,
dtype)
class Conv2DTranspose(paddle.nn.Conv2DTranspose):
def __init__(self,
num_channels,
num_filters,
filter_size,
output_size=None,
padding=0,
stride=1,
dilation=1,
groups=1,
param_attr=None,
bias_attr=None,
use_cudnn=True,
act=None,
data_format="NCHW",
dtype='float32',
init_type='normal'):
param_attr, bias_attr = initial_type(
input=input,
op_type='deconv',
fan_out=num_filters,
init=init_type,
use_bias=True if bias_attr != False else False,
filter_size=filter_size)
super(Conv2DTranspose, self).__init__(
num_channels,
num_filters,
filter_size,
output_size,
padding,
stride,
dilation,
groups,
param_attr,
bias_attr,
use_cudnn,
act,
data_format,
dtype)
class Pad2D(fluid.dygraph.Layer):
def __init__(self, paddings, mode, pad_value=0.0):
......
......@@ -13,4 +13,4 @@ def build_optimizer(cfg, parameter_list=None):
opt_name = cfg_copy.pop('name')
return getattr(paddle.optimizer, opt_name)(lr_scheduler, parameter_list=parameter_list, **cfg_copy)
return getattr(paddle.optimizer, opt_name)(lr_scheduler, parameters=parameter_list, **cfg_copy)
......@@ -2,7 +2,7 @@ import logging
import os
import sys
from paddle.imperative import ParallelEnv
from paddle import ParallelEnv
def setup_logger(output=None, name="ppgan"):
......
......@@ -2,7 +2,7 @@ import os
import time
import paddle
from paddle.imperative import ParallelEnv
from paddle import ParallelEnv
from .logger import setup_logger
......@@ -20,4 +20,4 @@ def setup(args, cfg):
place = paddle.fluid.CUDAPlace(ParallelEnv().dev_id) \
if ParallelEnv().nranks > 1 else paddle.fluid.CUDAPlace(0)
paddle.enable_imperative(place)
paddle.disable_static(place)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册