Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleGAN
提交
5b31853d
P
PaddleGAN
项目概览
PaddlePaddle
/
PaddleGAN
大约 1 年 前同步成功
通知
97
Star
7254
Fork
1210
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
4
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleGAN
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
4
Issue
4
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
5b31853d
编写于
8月 28, 2020
作者:
L
LielinJiang
提交者:
GitHub
8月 28, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #11 from LielinJiang/adapt-2.0-api
Adapt 2.0 api
上级
c35db907
9d6a5f0e
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
43 addition
and
124 deletion
+43
-124
configs/cyclegan_cityscapes.yaml
configs/cyclegan_cityscapes.yaml
+1
-1
configs/pix2pix_cityscapes.yaml
configs/pix2pix_cityscapes.yaml
+1
-1
ppgan/datasets/builder.py
ppgan/datasets/builder.py
+1
-1
ppgan/engine/trainer.py
ppgan/engine/trainer.py
+3
-3
ppgan/models/cycle_gan_model.py
ppgan/models/cycle_gan_model.py
+5
-5
ppgan/models/discriminators/nlayers.py
ppgan/models/discriminators/nlayers.py
+5
-5
ppgan/models/generators/resnet.py
ppgan/models/generators/resnet.py
+11
-11
ppgan/models/generators/unet.py
ppgan/models/generators/unet.py
+8
-8
ppgan/models/pix2pix_model.py
ppgan/models/pix2pix_model.py
+3
-3
ppgan/modules/nn.py
ppgan/modules/nn.py
+1
-82
ppgan/solver/optimizer.py
ppgan/solver/optimizer.py
+1
-1
ppgan/utils/logger.py
ppgan/utils/logger.py
+1
-1
ppgan/utils/setup.py
ppgan/utils/setup.py
+2
-2
未找到文件。
configs/cyclegan_cityscapes.yaml
浏览文件 @
5b31853d
...
@@ -28,7 +28,7 @@ dataset:
...
@@ -28,7 +28,7 @@ dataset:
train
:
train
:
name
:
UnpairedDataset
name
:
UnpairedDataset
dataroot
:
data/cityscapes
dataroot
:
data/cityscapes
num_workers
:
4
num_workers
:
0
phase
:
train
phase
:
train
max_dataset_size
:
inf
max_dataset_size
:
inf
direction
:
AtoB
direction
:
AtoB
...
...
configs/pix2pix_cityscapes.yaml
浏览文件 @
5b31853d
...
@@ -25,7 +25,7 @@ dataset:
...
@@ -25,7 +25,7 @@ dataset:
train
:
train
:
name
:
PairedDataset
name
:
PairedDataset
dataroot
:
data/cityscapes
dataroot
:
data/cityscapes
num_workers
:
4
num_workers
:
0
phase
:
train
phase
:
train
max_dataset_size
:
inf
max_dataset_size
:
inf
direction
:
BtoA
direction
:
BtoA
...
...
ppgan/datasets/builder.py
浏览文件 @
5b31853d
...
@@ -3,7 +3,7 @@ import paddle
...
@@ -3,7 +3,7 @@ import paddle
import
numbers
import
numbers
import
numpy
as
np
import
numpy
as
np
from
multiprocessing
import
Manager
from
multiprocessing
import
Manager
from
paddle
.imperative
import
ParallelEnv
from
paddle
import
ParallelEnv
from
paddle.incubate.hapi.distributed
import
DistributedBatchSampler
from
paddle.incubate.hapi.distributed
import
DistributedBatchSampler
from
..utils.registry
import
Registry
from
..utils.registry
import
Registry
...
...
ppgan/engine/trainer.py
浏览文件 @
5b31853d
...
@@ -4,7 +4,7 @@ import time
...
@@ -4,7 +4,7 @@ import time
import
logging
import
logging
import
paddle
import
paddle
from
paddle
.imperative
import
ParallelEnv
,
DataParallel
from
paddle
import
ParallelEnv
,
DataParallel
from
..datasets.builder
import
build_dataloader
from
..datasets.builder
import
build_dataloader
from
..models.builder
import
build_model
from
..models.builder
import
build_model
...
@@ -46,7 +46,7 @@ class Trainer:
...
@@ -46,7 +46,7 @@ class Trainer:
self
.
time_count
=
{}
self
.
time_count
=
{}
def
distributed_data_parallel
(
self
):
def
distributed_data_parallel
(
self
):
strategy
=
paddle
.
imperative
.
prepare_context
()
strategy
=
paddle
.
prepare_context
()
for
name
in
self
.
model
.
model_names
:
for
name
in
self
.
model
.
model_names
:
if
isinstance
(
name
,
str
):
if
isinstance
(
name
,
str
):
net
=
getattr
(
self
.
model
,
'net'
+
name
)
net
=
getattr
(
self
.
model
,
'net'
+
name
)
...
@@ -127,7 +127,7 @@ class Trainer:
...
@@ -127,7 +127,7 @@ class Trainer:
@
property
@
property
def
current_learning_rate
(
self
):
def
current_learning_rate
(
self
):
return
self
.
model
.
optimizers
[
0
].
current_step
_lr
()
return
self
.
model
.
optimizers
[
0
].
get
_lr
()
def
visual
(
self
,
results_dir
,
visual_results
=
None
):
def
visual
(
self
,
results_dir
,
visual_results
=
None
):
self
.
model
.
compute_visuals
()
self
.
model
.
compute_visuals
()
...
...
ppgan/models/cycle_gan_model.py
浏览文件 @
5b31853d
import
paddle
import
paddle
from
paddle
.imperative
import
ParallelEnv
from
paddle
import
ParallelEnv
from
.base_model
import
BaseModel
from
.base_model
import
BaseModel
from
.builder
import
MODELS
from
.builder
import
MODELS
...
@@ -93,14 +93,14 @@ class CycleGANModel(BaseModel):
...
@@ -93,14 +93,14 @@ class CycleGANModel(BaseModel):
if
AtoB
:
if
AtoB
:
if
'A'
in
input
:
if
'A'
in
input
:
self
.
real_A
=
paddle
.
imperative
.
to_variable
(
input
[
'A'
])
self
.
real_A
=
paddle
.
to_tensor
(
input
[
'A'
])
if
'B'
in
input
:
if
'B'
in
input
:
self
.
real_B
=
paddle
.
imperative
.
to_variable
(
input
[
'B'
])
self
.
real_B
=
paddle
.
to_tensor
(
input
[
'B'
])
else
:
else
:
if
'B'
in
input
:
if
'B'
in
input
:
self
.
real_A
=
paddle
.
imperative
.
to_variable
(
input
[
'B'
])
self
.
real_A
=
paddle
.
to_tensor
(
input
[
'B'
])
if
'A'
in
input
:
if
'A'
in
input
:
self
.
real_B
=
paddle
.
imperative
.
to_variable
(
input
[
'A'
])
self
.
real_B
=
paddle
.
to_tensor
(
input
[
'A'
])
if
'A_paths'
in
input
:
if
'A_paths'
in
input
:
self
.
image_paths
=
input
[
'A_paths'
]
self
.
image_paths
=
input
[
'A_paths'
]
...
...
ppgan/models/discriminators/nlayers.py
浏览文件 @
5b31853d
...
@@ -3,7 +3,7 @@ import functools
...
@@ -3,7 +3,7 @@ import functools
import
numpy
as
np
import
numpy
as
np
import
paddle.nn
as
nn
import
paddle.nn
as
nn
from
...modules.nn
import
ReflectionPad2d
,
LeakyReLU
,
Tanh
,
Dropout
,
BCEWithLogitsLoss
,
Conv2DTranspose
,
Conv2D
,
Pad2D
,
MSELoss
from
...modules.nn
import
ReflectionPad2d
,
LeakyReLU
,
Dropout
,
BCEWithLogitsLoss
,
Pad2D
,
MSELoss
from
...modules.norm
import
build_norm_layer
from
...modules.norm
import
build_norm_layer
from
.builder
import
DISCRIMINATORS
from
.builder
import
DISCRIMINATORS
...
@@ -31,14 +31,14 @@ class NLayerDiscriminator(paddle.fluid.dygraph.Layer):
...
@@ -31,14 +31,14 @@ class NLayerDiscriminator(paddle.fluid.dygraph.Layer):
kw
=
4
kw
=
4
padw
=
1
padw
=
1
sequence
=
[
Conv2D
(
input_nc
,
ndf
,
filter
_size
=
kw
,
stride
=
2
,
padding
=
padw
),
LeakyReLU
(
0.2
,
True
)]
sequence
=
[
nn
.
Conv2d
(
input_nc
,
ndf
,
kernel
_size
=
kw
,
stride
=
2
,
padding
=
padw
),
LeakyReLU
(
0.2
,
True
)]
nf_mult
=
1
nf_mult
=
1
nf_mult_prev
=
1
nf_mult_prev
=
1
for
n
in
range
(
1
,
n_layers
):
for
n
in
range
(
1
,
n_layers
):
nf_mult_prev
=
nf_mult
nf_mult_prev
=
nf_mult
nf_mult
=
min
(
2
**
n
,
8
)
nf_mult
=
min
(
2
**
n
,
8
)
sequence
+=
[
sequence
+=
[
Conv2D
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
filter
_size
=
kw
,
stride
=
2
,
padding
=
padw
,
bias_attr
=
use_bias
),
nn
.
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel
_size
=
kw
,
stride
=
2
,
padding
=
padw
,
bias_attr
=
use_bias
),
norm_layer
(
ndf
*
nf_mult
),
norm_layer
(
ndf
*
nf_mult
),
LeakyReLU
(
0.2
,
True
)
LeakyReLU
(
0.2
,
True
)
]
]
...
@@ -46,12 +46,12 @@ class NLayerDiscriminator(paddle.fluid.dygraph.Layer):
...
@@ -46,12 +46,12 @@ class NLayerDiscriminator(paddle.fluid.dygraph.Layer):
nf_mult_prev
=
nf_mult
nf_mult_prev
=
nf_mult
nf_mult
=
min
(
2
**
n_layers
,
8
)
nf_mult
=
min
(
2
**
n_layers
,
8
)
sequence
+=
[
sequence
+=
[
Conv2D
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
filter
_size
=
kw
,
stride
=
1
,
padding
=
padw
,
bias_attr
=
use_bias
),
nn
.
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel
_size
=
kw
,
stride
=
1
,
padding
=
padw
,
bias_attr
=
use_bias
),
norm_layer
(
ndf
*
nf_mult
),
norm_layer
(
ndf
*
nf_mult
),
LeakyReLU
(
0.2
,
True
)
LeakyReLU
(
0.2
,
True
)
]
]
sequence
+=
[
Conv2D
(
ndf
*
nf_mult
,
1
,
filter
_size
=
kw
,
stride
=
1
,
padding
=
padw
)]
sequence
+=
[
nn
.
Conv2d
(
ndf
*
nf_mult
,
1
,
kernel
_size
=
kw
,
stride
=
1
,
padding
=
padw
)]
self
.
model
=
nn
.
Sequential
(
*
sequence
)
self
.
model
=
nn
.
Sequential
(
*
sequence
)
def
forward
(
self
,
input
):
def
forward
(
self
,
input
):
...
...
ppgan/models/generators/resnet.py
浏览文件 @
5b31853d
...
@@ -2,7 +2,7 @@ import paddle
...
@@ -2,7 +2,7 @@ import paddle
import
paddle.nn
as
nn
import
paddle.nn
as
nn
import
functools
import
functools
from
...modules.nn
import
ReflectionPad2d
,
LeakyReLU
,
Tanh
,
Dropout
,
BCEWithLogitsLoss
,
Conv2DTranspose
,
Conv2D
,
Pad2D
,
MSELoss
from
...modules.nn
import
ReflectionPad2d
,
LeakyReLU
,
Tanh
,
Dropout
,
BCEWithLogitsLoss
,
Pad2D
,
MSELoss
from
...modules.norm
import
build_norm_layer
from
...modules.norm
import
build_norm_layer
from
.builder
import
GENERATORS
from
.builder
import
GENERATORS
...
@@ -37,7 +37,7 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
...
@@ -37,7 +37,7 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
use_bias
=
norm_layer
==
nn
.
InstanceNorm
use_bias
=
norm_layer
==
nn
.
InstanceNorm
model
=
[
ReflectionPad2d
(
3
),
model
=
[
ReflectionPad2d
(
3
),
nn
.
Conv2
D
(
input_nc
,
ngf
,
filter
_size
=
7
,
padding
=
0
,
bias_attr
=
use_bias
),
nn
.
Conv2
d
(
input_nc
,
ngf
,
kernel
_size
=
7
,
padding
=
0
,
bias_attr
=
use_bias
),
norm_layer
(
ngf
),
norm_layer
(
ngf
),
nn
.
ReLU
()]
nn
.
ReLU
()]
...
@@ -45,7 +45,7 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
...
@@ -45,7 +45,7 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
for
i
in
range
(
n_downsampling
):
# add downsampling layers
for
i
in
range
(
n_downsampling
):
# add downsampling layers
mult
=
2
**
i
mult
=
2
**
i
model
+=
[
model
+=
[
nn
.
Conv2
D
(
ngf
*
mult
,
ngf
*
mult
*
2
,
filter
_size
=
3
,
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
),
nn
.
Conv2
d
(
ngf
*
mult
,
ngf
*
mult
*
2
,
kernel
_size
=
3
,
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
),
norm_layer
(
ngf
*
mult
*
2
),
norm_layer
(
ngf
*
mult
*
2
),
nn
.
ReLU
()]
nn
.
ReLU
()]
...
@@ -57,16 +57,16 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
...
@@ -57,16 +57,16 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
for
i
in
range
(
n_downsampling
):
# add upsampling layers
for
i
in
range
(
n_downsampling
):
# add upsampling layers
mult
=
2
**
(
n_downsampling
-
i
)
mult
=
2
**
(
n_downsampling
-
i
)
model
+=
[
model
+=
[
nn
.
Conv2DTranspose
(
ngf
*
mult
,
int
(
ngf
*
mult
/
2
),
nn
.
ConvTranspose2d
(
ngf
*
mult
,
int
(
ngf
*
mult
/
2
),
filter_size
=
3
,
stride
=
2
,
kernel_size
=
3
,
stride
=
2
,
padding
=
1
,
padding
=
1
,
output_padding
=
1
,
bias_attr
=
use_bias
),
bias_attr
=
use_bias
),
Pad2D
(
paddings
=
[
0
,
1
,
0
,
1
],
mode
=
'constant'
,
pad_value
=
0.0
),
norm_layer
(
int
(
ngf
*
mult
/
2
)),
norm_layer
(
int
(
ngf
*
mult
/
2
)),
nn
.
ReLU
()]
nn
.
ReLU
()]
model
+=
[
ReflectionPad2d
(
3
)]
model
+=
[
ReflectionPad2d
(
3
)]
model
+=
[
nn
.
Conv2
D
(
ngf
,
output_nc
,
filter
_size
=
7
,
padding
=
0
)]
model
+=
[
nn
.
Conv2
d
(
ngf
,
output_nc
,
kernel
_size
=
7
,
padding
=
0
)]
model
+=
[
Tanh
()]
model
+=
[
nn
.
Tanh
()]
self
.
model
=
nn
.
Sequential
(
*
model
)
self
.
model
=
nn
.
Sequential
(
*
model
)
...
@@ -112,7 +112,7 @@ class ResnetBlock(paddle.fluid.dygraph.Layer):
...
@@ -112,7 +112,7 @@ class ResnetBlock(paddle.fluid.dygraph.Layer):
else
:
else
:
raise
NotImplementedError
(
'padding [%s] is not implemented'
%
padding_type
)
raise
NotImplementedError
(
'padding [%s] is not implemented'
%
padding_type
)
conv_block
+=
[
nn
.
Conv2
D
(
dim
,
dim
,
filter
_size
=
3
,
padding
=
p
,
bias_attr
=
use_bias
),
norm_layer
(
dim
),
nn
.
ReLU
()]
conv_block
+=
[
nn
.
Conv2
d
(
dim
,
dim
,
kernel
_size
=
3
,
padding
=
p
,
bias_attr
=
use_bias
),
norm_layer
(
dim
),
nn
.
ReLU
()]
if
use_dropout
:
if
use_dropout
:
conv_block
+=
[
Dropout
(
0.5
)]
conv_block
+=
[
Dropout
(
0.5
)]
...
@@ -125,7 +125,7 @@ class ResnetBlock(paddle.fluid.dygraph.Layer):
...
@@ -125,7 +125,7 @@ class ResnetBlock(paddle.fluid.dygraph.Layer):
p
=
1
p
=
1
else
:
else
:
raise
NotImplementedError
(
'padding [%s] is not implemented'
%
padding_type
)
raise
NotImplementedError
(
'padding [%s] is not implemented'
%
padding_type
)
conv_block
+=
[
nn
.
Conv2
D
(
dim
,
dim
,
filter
_size
=
3
,
padding
=
p
,
bias_attr
=
use_bias
),
norm_layer
(
dim
)]
conv_block
+=
[
nn
.
Conv2
d
(
dim
,
dim
,
kernel
_size
=
3
,
padding
=
p
,
bias_attr
=
use_bias
),
norm_layer
(
dim
)]
return
nn
.
Sequential
(
*
conv_block
)
return
nn
.
Sequential
(
*
conv_block
)
...
...
ppgan/models/generators/unet.py
浏览文件 @
5b31853d
...
@@ -2,7 +2,7 @@ import paddle
...
@@ -2,7 +2,7 @@ import paddle
import
paddle.nn
as
nn
import
paddle.nn
as
nn
import
functools
import
functools
from
...modules.nn
import
ReflectionPad2d
,
LeakyReLU
,
Tanh
,
Dropout
,
Conv2DTranspose
,
Conv2D
from
...modules.nn
import
ReflectionPad2d
,
LeakyReLU
,
Tanh
,
Dropout
from
...modules.norm
import
build_norm_layer
from
...modules.norm
import
build_norm_layer
from
.builder
import
GENERATORS
from
.builder
import
GENERATORS
...
@@ -77,7 +77,7 @@ class UnetSkipConnectionBlock(paddle.fluid.dygraph.Layer):
...
@@ -77,7 +77,7 @@ class UnetSkipConnectionBlock(paddle.fluid.dygraph.Layer):
use_bias
=
norm_layer
==
nn
.
InstanceNorm
use_bias
=
norm_layer
==
nn
.
InstanceNorm
if
input_nc
is
None
:
if
input_nc
is
None
:
input_nc
=
outer_nc
input_nc
=
outer_nc
downconv
=
Conv2D
(
input_nc
,
inner_nc
,
filter
_size
=
4
,
downconv
=
nn
.
Conv2d
(
input_nc
,
inner_nc
,
kernel
_size
=
4
,
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
)
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
)
downrelu
=
LeakyReLU
(
0.2
,
True
)
downrelu
=
LeakyReLU
(
0.2
,
True
)
downnorm
=
norm_layer
(
inner_nc
)
downnorm
=
norm_layer
(
inner_nc
)
...
@@ -85,22 +85,22 @@ class UnetSkipConnectionBlock(paddle.fluid.dygraph.Layer):
...
@@ -85,22 +85,22 @@ class UnetSkipConnectionBlock(paddle.fluid.dygraph.Layer):
upnorm
=
norm_layer
(
outer_nc
)
upnorm
=
norm_layer
(
outer_nc
)
if
outermost
:
if
outermost
:
upconv
=
Conv2DTranspose
(
inner_nc
*
2
,
outer_nc
,
upconv
=
nn
.
ConvTranspose2d
(
inner_nc
*
2
,
outer_nc
,
filter
_size
=
4
,
stride
=
2
,
kernel
_size
=
4
,
stride
=
2
,
padding
=
1
)
padding
=
1
)
down
=
[
downconv
]
down
=
[
downconv
]
up
=
[
uprelu
,
upconv
,
Tanh
()]
up
=
[
uprelu
,
upconv
,
Tanh
()]
model
=
down
+
[
submodule
]
+
up
model
=
down
+
[
submodule
]
+
up
elif
innermost
:
elif
innermost
:
upconv
=
Conv2DTranspose
(
inner_nc
,
outer_nc
,
upconv
=
nn
.
ConvTranspose2d
(
inner_nc
,
outer_nc
,
filter
_size
=
4
,
stride
=
2
,
kernel
_size
=
4
,
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
)
padding
=
1
,
bias_attr
=
use_bias
)
down
=
[
downrelu
,
downconv
]
down
=
[
downrelu
,
downconv
]
up
=
[
uprelu
,
upconv
,
upnorm
]
up
=
[
uprelu
,
upconv
,
upnorm
]
model
=
down
+
up
model
=
down
+
up
else
:
else
:
upconv
=
Conv2DTranspose
(
inner_nc
*
2
,
outer_nc
,
upconv
=
nn
.
ConvTranspose2d
(
inner_nc
*
2
,
outer_nc
,
filter
_size
=
4
,
stride
=
2
,
kernel
_size
=
4
,
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
)
padding
=
1
,
bias_attr
=
use_bias
)
down
=
[
downrelu
,
downconv
,
downnorm
]
down
=
[
downrelu
,
downconv
,
downnorm
]
up
=
[
uprelu
,
upconv
,
upnorm
]
up
=
[
uprelu
,
upconv
,
upnorm
]
...
...
ppgan/models/pix2pix_model.py
浏览文件 @
5b31853d
import
paddle
import
paddle
from
paddle
.imperative
import
ParallelEnv
from
paddle
import
ParallelEnv
from
.base_model
import
BaseModel
from
.base_model
import
BaseModel
from
.builder
import
MODELS
from
.builder
import
MODELS
...
@@ -72,8 +72,8 @@ class Pix2PixModel(BaseModel):
...
@@ -72,8 +72,8 @@ class Pix2PixModel(BaseModel):
"""
"""
AtoB
=
self
.
opt
.
dataset
.
train
.
direction
==
'AtoB'
AtoB
=
self
.
opt
.
dataset
.
train
.
direction
==
'AtoB'
self
.
real_A
=
paddle
.
imperative
.
to_variable
(
input
[
'A'
if
AtoB
else
'B'
])
self
.
real_A
=
paddle
.
to_tensor
(
input
[
'A'
if
AtoB
else
'B'
])
self
.
real_B
=
paddle
.
imperative
.
to_variable
(
input
[
'B'
if
AtoB
else
'A'
])
self
.
real_B
=
paddle
.
to_tensor
(
input
[
'B'
if
AtoB
else
'A'
])
self
.
image_paths
=
input
[
'A_paths'
if
AtoB
else
'B_paths'
]
self
.
image_paths
=
input
[
'A_paths'
if
AtoB
else
'B_paths'
]
...
...
ppgan/modules/nn.py
浏览文件 @
5b31853d
...
@@ -129,88 +129,7 @@ def initial_type(
...
@@ -129,88 +129,7 @@ def initial_type(
else
:
else
:
bias_attr
=
False
bias_attr
=
False
return
param_attr
,
bias_attr
return
param_attr
,
bias_attr
class
Conv2D
(
paddle
.
nn
.
Conv2D
):
def
__init__
(
self
,
num_channels
,
num_filters
,
filter_size
,
padding
=
0
,
stride
=
1
,
dilation
=
1
,
groups
=
1
,
param_attr
=
None
,
bias_attr
=
None
,
use_cudnn
=
True
,
act
=
None
,
data_format
=
"NCHW"
,
dtype
=
'float32'
,
init_type
=
'normal'
):
param_attr
,
bias_attr
=
initial_type
(
input
=
input
,
op_type
=
'conv'
,
fan_out
=
num_filters
,
init
=
init_type
,
use_bias
=
True
if
bias_attr
!=
False
else
False
,
filter_size
=
filter_size
)
super
(
Conv2D
,
self
).
__init__
(
num_channels
,
num_filters
,
filter_size
,
padding
,
stride
,
dilation
,
groups
,
param_attr
,
bias_attr
,
use_cudnn
,
act
,
data_format
,
dtype
)
class
Conv2DTranspose
(
paddle
.
nn
.
Conv2DTranspose
):
def
__init__
(
self
,
num_channels
,
num_filters
,
filter_size
,
output_size
=
None
,
padding
=
0
,
stride
=
1
,
dilation
=
1
,
groups
=
1
,
param_attr
=
None
,
bias_attr
=
None
,
use_cudnn
=
True
,
act
=
None
,
data_format
=
"NCHW"
,
dtype
=
'float32'
,
init_type
=
'normal'
):
param_attr
,
bias_attr
=
initial_type
(
input
=
input
,
op_type
=
'deconv'
,
fan_out
=
num_filters
,
init
=
init_type
,
use_bias
=
True
if
bias_attr
!=
False
else
False
,
filter_size
=
filter_size
)
super
(
Conv2DTranspose
,
self
).
__init__
(
num_channels
,
num_filters
,
filter_size
,
output_size
,
padding
,
stride
,
dilation
,
groups
,
param_attr
,
bias_attr
,
use_cudnn
,
act
,
data_format
,
dtype
)
class
Pad2D
(
fluid
.
dygraph
.
Layer
):
class
Pad2D
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
paddings
,
mode
,
pad_value
=
0.0
):
def
__init__
(
self
,
paddings
,
mode
,
pad_value
=
0.0
):
...
...
ppgan/solver/optimizer.py
浏览文件 @
5b31853d
...
@@ -13,4 +13,4 @@ def build_optimizer(cfg, parameter_list=None):
...
@@ -13,4 +13,4 @@ def build_optimizer(cfg, parameter_list=None):
opt_name
=
cfg_copy
.
pop
(
'name'
)
opt_name
=
cfg_copy
.
pop
(
'name'
)
return
getattr
(
paddle
.
optimizer
,
opt_name
)(
lr_scheduler
,
parameter
_list
=
parameter_list
,
**
cfg_copy
)
return
getattr
(
paddle
.
optimizer
,
opt_name
)(
lr_scheduler
,
parameter
s
=
parameter_list
,
**
cfg_copy
)
ppgan/utils/logger.py
浏览文件 @
5b31853d
...
@@ -2,7 +2,7 @@ import logging
...
@@ -2,7 +2,7 @@ import logging
import
os
import
os
import
sys
import
sys
from
paddle
.imperative
import
ParallelEnv
from
paddle
import
ParallelEnv
def
setup_logger
(
output
=
None
,
name
=
"ppgan"
):
def
setup_logger
(
output
=
None
,
name
=
"ppgan"
):
...
...
ppgan/utils/setup.py
浏览文件 @
5b31853d
...
@@ -2,7 +2,7 @@ import os
...
@@ -2,7 +2,7 @@ import os
import
time
import
time
import
paddle
import
paddle
from
paddle
.imperative
import
ParallelEnv
from
paddle
import
ParallelEnv
from
.logger
import
setup_logger
from
.logger
import
setup_logger
...
@@ -20,4 +20,4 @@ def setup(args, cfg):
...
@@ -20,4 +20,4 @@ def setup(args, cfg):
place
=
paddle
.
fluid
.
CUDAPlace
(
ParallelEnv
().
dev_id
)
\
place
=
paddle
.
fluid
.
CUDAPlace
(
ParallelEnv
().
dev_id
)
\
if
ParallelEnv
().
nranks
>
1
else
paddle
.
fluid
.
CUDAPlace
(
0
)
if
ParallelEnv
().
nranks
>
1
else
paddle
.
fluid
.
CUDAPlace
(
0
)
paddle
.
enable_imperative
(
place
)
paddle
.
disable_static
(
place
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录