Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
3ebf6aaf
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3ebf6aaf
编写于
11月 07, 2016
作者:
W
wangyang59
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fixed a gpu bug in trainer API to train gan using GPU
上级
c159e4dd
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
92 addition
and
101 deletion
+92
-101
demo/gan/.gitignore
demo/gan/.gitignore
+1
-1
demo/gan/gan_conf_image.py
demo/gan/gan_conf_image.py
+59
-73
demo/gan/gan_trainer.py
demo/gan/gan_trainer.py
+9
-9
demo/gan/gan_trainer_image.py
demo/gan/gan_trainer_image.py
+22
-16
paddle/api/Paddle.swig
paddle/api/Paddle.swig
+1
-2
未找到文件。
demo/gan/.gitignore
浏览文件 @
3ebf6aaf
...
@@ -2,5 +2,5 @@ output/
...
@@ -2,5 +2,5 @@ output/
*.png
*.png
.pydevproject
.pydevproject
.project
.project
train
Log.txt
train
.log
data/raw_data/
data/raw_data/
demo/gan/gan_conf_image.py
浏览文件 @
3ebf6aaf
...
@@ -41,39 +41,9 @@ settings(
...
@@ -41,39 +41,9 @@ settings(
learning_method
=
AdamOptimizer
()
learning_method
=
AdamOptimizer
()
)
)
def
convTrans_bn
(
input
,
channels
,
output_x
,
num_filters
,
imgSize
,
stride
,
name
,
param_attr
,
bias_attr
,
param_attr_bn
):
tmp
=
imgSize
-
(
output_x
-
1
)
*
stride
if
tmp
<=
1
or
tmp
>
5
:
raise
ValueError
(
"convTrans input-output dimension does not fit"
)
elif
tmp
<=
3
:
filter_size
=
tmp
+
2
padding
=
1
else
:
filter_size
=
tmp
padding
=
0
convTrans
=
img_conv_layer
(
input
,
filter_size
=
filter_size
,
num_filters
=
num_filters
,
name
=
name
+
"_convt"
,
num_channels
=
channels
,
act
=
LinearActivation
(),
groups
=
1
,
stride
=
stride
,
padding
=
padding
,
bias_attr
=
bias_attr
,
param_attr
=
param_attr
,
shared_biases
=
True
,
layer_attr
=
None
,
filter_size_y
=
None
,
stride_y
=
None
,
padding_y
=
None
,
trans
=
True
)
convTrans_bn
=
batch_norm_layer
(
convTrans
,
act
=
ReluActivation
(),
name
=
name
+
"_convt_bn"
,
bias_attr
=
bias_attr
,
param_attr
=
param_attr_bn
,
use_global_stats
=
False
)
return
convTrans_bn
def
conv_bn
(
input
,
channels
,
imgSize
,
num_filters
,
output_x
,
stride
,
name
,
def
conv_bn
(
input
,
channels
,
imgSize
,
num_filters
,
output_x
,
stride
,
name
,
param_attr
,
bias_attr
,
param_attr_bn
,
bn
):
param_attr
,
bias_attr
,
param_attr_bn
,
bn
,
trans
=
False
,
act
=
ReluActivation
()):
tmp
=
imgSize
-
(
output_x
-
1
)
*
stride
tmp
=
imgSize
-
(
output_x
-
1
)
*
stride
if
tmp
<=
1
or
tmp
>
5
:
if
tmp
<=
1
or
tmp
>
5
:
raise
ValueError
(
"conv input-output dimension does not fit"
)
raise
ValueError
(
"conv input-output dimension does not fit"
)
...
@@ -85,19 +55,25 @@ def conv_bn(input, channels, imgSize, num_filters, output_x, stride, name,
...
@@ -85,19 +55,25 @@ def conv_bn(input, channels, imgSize, num_filters, output_x, stride, name,
padding
=
0
padding
=
0
print
(
imgSize
,
output_x
,
stride
,
filter_size
,
padding
)
print
(
imgSize
,
output_x
,
stride
,
filter_size
,
padding
)
if
trans
:
nameApx
=
"_conv"
else
:
nameApx
=
"_convt"
if
bn
:
if
bn
:
conv
=
img_conv_layer
(
input
,
filter_size
=
filter_size
,
conv
=
img_conv_layer
(
input
,
filter_size
=
filter_size
,
num_filters
=
num_filters
,
num_filters
=
num_filters
,
name
=
name
+
"_conv"
,
num_channels
=
channels
,
name
=
name
+
nameApx
,
num_channels
=
channels
,
act
=
LinearActivation
(),
groups
=
1
,
stride
=
stride
,
act
=
LinearActivation
(),
groups
=
1
,
stride
=
stride
,
padding
=
padding
,
bias_attr
=
bias_attr
,
padding
=
padding
,
bias_attr
=
bias_attr
,
param_attr
=
param_attr
,
shared_biases
=
True
,
layer_attr
=
None
,
param_attr
=
param_attr
,
shared_biases
=
True
,
layer_attr
=
None
,
filter_size_y
=
None
,
stride_y
=
None
,
padding_y
=
None
)
filter_size_y
=
None
,
stride_y
=
None
,
padding_y
=
None
,
trans
=
trans
)
conv_bn
=
batch_norm_layer
(
conv
,
conv_bn
=
batch_norm_layer
(
conv
,
act
=
ReluActivation
()
,
act
=
act
,
name
=
name
+
"_conv
_bn"
,
name
=
name
+
nameApx
+
"
_bn"
,
bias_attr
=
bias_attr
,
bias_attr
=
bias_attr
,
param_attr
=
param_attr_bn
,
param_attr
=
param_attr_bn
,
use_global_stats
=
False
)
use_global_stats
=
False
)
...
@@ -106,11 +82,12 @@ def conv_bn(input, channels, imgSize, num_filters, output_x, stride, name,
...
@@ -106,11 +82,12 @@ def conv_bn(input, channels, imgSize, num_filters, output_x, stride, name,
else
:
else
:
conv
=
img_conv_layer
(
input
,
filter_size
=
filter_size
,
conv
=
img_conv_layer
(
input
,
filter_size
=
filter_size
,
num_filters
=
num_filters
,
num_filters
=
num_filters
,
name
=
name
+
"_conv"
,
num_channels
=
channels
,
name
=
name
+
nameApx
,
num_channels
=
channels
,
act
=
ReluActivation
()
,
groups
=
1
,
stride
=
stride
,
act
=
act
,
groups
=
1
,
stride
=
stride
,
padding
=
padding
,
bias_attr
=
bias_attr
,
padding
=
padding
,
bias_attr
=
bias_attr
,
param_attr
=
param_attr
,
shared_biases
=
True
,
layer_attr
=
None
,
param_attr
=
param_attr
,
shared_biases
=
True
,
layer_attr
=
None
,
filter_size_y
=
None
,
stride_y
=
None
,
padding_y
=
None
)
filter_size_y
=
None
,
stride_y
=
None
,
padding_y
=
None
,
trans
=
trans
)
return
conv
return
conv
def
generator
(
noise
):
def
generator
(
noise
):
...
@@ -143,39 +120,46 @@ def generator(noise):
...
@@ -143,39 +120,46 @@ def generator(noise):
param_attr
=
param_attr_bn
,
param_attr
=
param_attr_bn
,
use_global_stats
=
False
)
use_global_stats
=
False
)
h2_bn
=
convTrans_bn
(
h1_bn
,
h2_bn
=
conv_bn
(
h1_bn
,
channels
=
gf_dim
*
4
,
channels
=
gf_dim
*
4
,
output_x
=
s8
,
output_x
=
s8
,
num_filters
=
gf_dim
*
2
,
num_filters
=
gf_dim
*
2
,
imgSize
=
s4
,
imgSize
=
s4
,
stride
=
2
,
stride
=
2
,
name
=
"gen_layer_h2"
,
name
=
"gen_layer_h2"
,
param_attr
=
param_attr
,
param_attr
=
param_attr
,
bias_attr
=
bias_attr
,
bias_attr
=
bias_attr
,
param_attr_bn
=
param_attr_bn
)
param_attr_bn
=
param_attr_bn
,
bn
=
True
,
trans
=
True
)
h3_bn
=
convTrans_bn
(
h2_bn
,
h3_bn
=
conv_bn
(
h2_bn
,
channels
=
gf_dim
*
2
,
channels
=
gf_dim
*
2
,
output_x
=
s4
,
output_x
=
s4
,
num_filters
=
gf_dim
,
num_filters
=
gf_dim
,
imgSize
=
s2
,
imgSize
=
s2
,
stride
=
2
,
stride
=
2
,
name
=
"gen_layer_h3"
,
name
=
"gen_layer_h3"
,
param_attr
=
param_attr
,
param_attr
=
param_attr
,
bias_attr
=
bias_attr
,
bias_attr
=
bias_attr
,
param_attr_bn
=
param_attr_bn
)
param_attr_bn
=
param_attr_bn
,
bn
=
True
,
trans
=
True
)
return
convTrans_bn
(
h3_bn
,
return
conv_bn
(
h3_bn
,
channels
=
gf_dim
,
channels
=
gf_dim
,
output_x
=
s2
,
output_x
=
s2
,
num_filters
=
c_dim
,
num_filters
=
c_dim
,
imgSize
=
sample_dim
,
imgSize
=
sample_dim
,
stride
=
2
,
stride
=
2
,
name
=
"gen_layer_h4"
,
name
=
"gen_layer_h4"
,
param_attr
=
param_attr
,
param_attr
=
param_attr
,
bias_attr
=
bias_attr
,
bias_attr
=
bias_attr
,
param_attr_bn
=
param_attr_bn
)
param_attr_bn
=
param_attr_bn
,
bn
=
False
,
trans
=
True
,
act
=
TanhActivation
())
def
discriminator
(
sample
):
def
discriminator
(
sample
):
...
@@ -186,10 +170,12 @@ def discriminator(sample):
...
@@ -186,10 +170,12 @@ def discriminator(sample):
of the sample is from generator and dimension 1 is the probabblity
of the sample is from generator and dimension 1 is the probabblity
of the sample is from real data.
of the sample is from real data.
"""
"""
param_attr
=
ParamAttr
(
is_static
=
is_generator_training
)
param_attr
=
ParamAttr
(
is_static
=
is_generator_training
,
initial_mean
=
0.0
,
initial_std
=
0.02
)
bias_attr
=
ParamAttr
(
is_static
=
is_generator_training
,
bias_attr
=
ParamAttr
(
is_static
=
is_generator_training
,
initial_mean
=
1
.0
,
initial_mean
=
0
.0
,
initial_std
=
0
)
initial_std
=
0
.0
)
param_attr_bn
=
ParamAttr
(
is_static
=
is_generator_training
,
param_attr_bn
=
ParamAttr
(
is_static
=
is_generator_training
,
initial_mean
=
1.0
,
initial_mean
=
1.0
,
...
...
demo/gan/gan_trainer.py
浏览文件 @
3ebf6aaf
...
@@ -97,32 +97,32 @@ def prepare_discriminator_data_batch(
...
@@ -97,32 +97,32 @@ def prepare_discriminator_data_batch(
(
numpy
.
zeros
(
batch_size
/
2
,
dtype
=
'int32'
),
(
numpy
.
zeros
(
batch_size
/
2
,
dtype
=
'int32'
),
numpy
.
ones
(
batch_size
/
2
,
dtype
=
'int32'
)),
0
)
numpy
.
ones
(
batch_size
/
2
,
dtype
=
'int32'
)),
0
)
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
C
puDenseFromNumpy
(
all_samples
))
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
G
puDenseFromNumpy
(
all_samples
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
CpuVectorFromNump
y
(
all_labels
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
GpuVectorFromNum
y
(
all_labels
))
return
inputs
return
inputs
def
prepare_discriminator_data_batch_pos
(
batch_size
,
noise_dim
,
sample_dim
):
def
prepare_discriminator_data_batch_pos
(
batch_size
,
noise_dim
,
sample_dim
):
real_samples
=
get_real_samples
(
batch_size
,
sample_dim
)
real_samples
=
get_real_samples
(
batch_size
,
sample_dim
)
labels
=
numpy
.
ones
(
batch_size
,
dtype
=
'int32'
)
labels
=
numpy
.
ones
(
batch_size
,
dtype
=
'int32'
)
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
C
puDenseFromNumpy
(
real_samples
))
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
G
puDenseFromNumpy
(
real_samples
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
CpuVectorFromNump
y
(
labels
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
GpuVectorFromNum
y
(
labels
))
return
inputs
return
inputs
def
prepare_discriminator_data_batch_neg
(
generator_machine
,
batch_size
,
noise_dim
,
sample_dim
):
def
prepare_discriminator_data_batch_neg
(
generator_machine
,
batch_size
,
noise_dim
,
sample_dim
):
fake_samples
=
get_fake_samples
(
generator_machine
,
batch_size
,
noise_dim
,
sample_dim
)
fake_samples
=
get_fake_samples
(
generator_machine
,
batch_size
,
noise_dim
,
sample_dim
)
labels
=
numpy
.
zeros
(
batch_size
,
dtype
=
'int32'
)
labels
=
numpy
.
zeros
(
batch_size
,
dtype
=
'int32'
)
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
C
puDenseFromNumpy
(
fake_samples
))
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
G
puDenseFromNumpy
(
fake_samples
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
CpuVectorFromNump
y
(
labels
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
GpuVectorFromNum
y
(
labels
))
return
inputs
return
inputs
def
prepare_generator_data_batch
(
batch_size
,
dim
):
def
prepare_generator_data_batch
(
batch_size
,
dim
):
noise
=
numpy
.
random
.
normal
(
size
=
(
batch_size
,
dim
)).
astype
(
'float32'
)
noise
=
numpy
.
random
.
normal
(
size
=
(
batch_size
,
dim
)).
astype
(
'float32'
)
label
=
numpy
.
ones
(
batch_size
,
dtype
=
'int32'
)
label
=
numpy
.
ones
(
batch_size
,
dtype
=
'int32'
)
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
C
puDenseFromNumpy
(
noise
))
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
G
puDenseFromNumpy
(
noise
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
CpuVectorFromNump
y
(
label
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
GpuVectorFromNum
y
(
label
))
return
inputs
return
inputs
...
@@ -140,7 +140,7 @@ def get_layer_size(model_conf, layer_name):
...
@@ -140,7 +140,7 @@ def get_layer_size(model_conf, layer_name):
def
main
():
def
main
():
api
.
initPaddle
(
'--use_gpu=
0
'
,
'--dot_period=100'
,
'--log_period=10000'
)
api
.
initPaddle
(
'--use_gpu=
1
'
,
'--dot_period=100'
,
'--log_period=10000'
)
gen_conf
=
parse_config
(
"gan_conf.py"
,
"mode=generator_training"
)
gen_conf
=
parse_config
(
"gan_conf.py"
,
"mode=generator_training"
)
dis_conf
=
parse_config
(
"gan_conf.py"
,
"mode=discriminator_training"
)
dis_conf
=
parse_config
(
"gan_conf.py"
,
"mode=discriminator_training"
)
generator_conf
=
parse_config
(
"gan_conf.py"
,
"mode=generator"
)
generator_conf
=
parse_config
(
"gan_conf.py"
,
"mode=generator"
)
...
...
demo/gan/gan_trainer_image.py
浏览文件 @
3ebf6aaf
...
@@ -16,7 +16,7 @@ import argparse
...
@@ -16,7 +16,7 @@ import argparse
import
itertools
import
itertools
import
random
import
random
import
numpy
import
numpy
import
sys
,
os
import
sys
,
os
,
gc
from
PIL
import
Image
from
PIL
import
Image
from
paddle.trainer.config_parser
import
parse_config
from
paddle.trainer.config_parser
import
parse_config
...
@@ -94,10 +94,19 @@ def load_mnist_data(imageFile):
...
@@ -94,10 +94,19 @@ def load_mnist_data(imageFile):
f
.
close
()
f
.
close
()
return
data
return
data
def
merge
(
images
,
size
):
h
,
w
=
28
,
28
img
=
numpy
.
zeros
((
h
*
size
[
0
],
w
*
size
[
1
]))
for
idx
in
xrange
(
size
[
0
]
*
size
[
1
]):
i
=
idx
%
size
[
1
]
j
=
idx
//
size
[
1
]
img
[
j
*
h
:
j
*
h
+
h
,
i
*
w
:
i
*
w
+
w
]
=
(
images
[
idx
,
:].
reshape
((
h
,
w
))
+
1.0
)
/
2.0
*
255.0
return
img
def
saveImages
(
images
,
path
):
def
saveImages
(
images
,
path
):
for
i
in
xrange
(
10
):
merged_img
=
merge
(
images
,
[
8
,
8
])
im
=
Image
.
fromarray
(
images
[
i
,
:].
reshape
((
28
,
28
))
*
255.0
).
convert
(
'RGB'
)
im
=
Image
.
fromarray
(
merged_img
).
convert
(
'RGB'
)
im
.
save
(
path
+
"/image_"
+
str
(
i
)
+
".png"
)
im
.
save
(
path
)
def
get_real_samples
(
batch_size
,
data_np
):
def
get_real_samples
(
batch_size
,
data_np
):
return
data_np
[
numpy
.
random
.
choice
(
data_np
.
shape
[
0
],
batch_size
,
return
data_np
[
numpy
.
random
.
choice
(
data_np
.
shape
[
0
],
batch_size
,
...
@@ -124,8 +133,8 @@ def prepare_discriminator_data_batch_pos(batch_size, data_np):
...
@@ -124,8 +133,8 @@ def prepare_discriminator_data_batch_pos(batch_size, data_np):
real_samples
=
get_real_samples
(
batch_size
,
data_np
)
real_samples
=
get_real_samples
(
batch_size
,
data_np
)
labels
=
numpy
.
ones
(
batch_size
,
dtype
=
'int32'
)
labels
=
numpy
.
ones
(
batch_size
,
dtype
=
'int32'
)
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
C
puDenseFromNumpy
(
real_samples
))
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
G
puDenseFromNumpy
(
real_samples
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
C
puVectorFromNumpy
(
labels
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
G
puVectorFromNumpy
(
labels
))
return
inputs
return
inputs
def
prepare_discriminator_data_batch_neg
(
generator_machine
,
batch_size
,
noise
):
def
prepare_discriminator_data_batch_neg
(
generator_machine
,
batch_size
,
noise
):
...
@@ -133,16 +142,16 @@ def prepare_discriminator_data_batch_neg(generator_machine, batch_size, noise):
...
@@ -133,16 +142,16 @@ def prepare_discriminator_data_batch_neg(generator_machine, batch_size, noise):
#print fake_samples.shape
#print fake_samples.shape
labels
=
numpy
.
zeros
(
batch_size
,
dtype
=
'int32'
)
labels
=
numpy
.
zeros
(
batch_size
,
dtype
=
'int32'
)
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
C
puDenseFromNumpy
(
fake_samples
))
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
G
puDenseFromNumpy
(
fake_samples
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
C
puVectorFromNumpy
(
labels
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
G
puVectorFromNumpy
(
labels
))
return
inputs
return
inputs
def
prepare_generator_data_batch
(
batch_size
,
noise
):
def
prepare_generator_data_batch
(
batch_size
,
noise
):
label
=
numpy
.
ones
(
batch_size
,
dtype
=
'int32'
)
label
=
numpy
.
ones
(
batch_size
,
dtype
=
'int32'
)
#label = numpy.zeros(batch_size, dtype='int32')
#label = numpy.zeros(batch_size, dtype='int32')
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
=
api
.
Arguments
.
createArguments
(
2
)
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
C
puDenseFromNumpy
(
noise
))
inputs
.
setSlotValue
(
0
,
api
.
Matrix
.
create
G
puDenseFromNumpy
(
noise
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
C
puVectorFromNumpy
(
label
))
inputs
.
setSlotIds
(
1
,
api
.
IVector
.
create
G
puVectorFromNumpy
(
label
))
return
inputs
return
inputs
...
@@ -160,7 +169,7 @@ def get_layer_size(model_conf, layer_name):
...
@@ -160,7 +169,7 @@ def get_layer_size(model_conf, layer_name):
def
main
():
def
main
():
api
.
initPaddle
(
'--use_gpu=
0
'
,
'--dot_period=10'
,
'--log_period=100'
)
api
.
initPaddle
(
'--use_gpu=
1
'
,
'--dot_period=10'
,
'--log_period=100'
)
gen_conf
=
parse_config
(
"gan_conf_image.py"
,
"mode=generator_training"
)
gen_conf
=
parse_config
(
"gan_conf_image.py"
,
"mode=generator_training"
)
dis_conf
=
parse_config
(
"gan_conf_image.py"
,
"mode=discriminator_training"
)
dis_conf
=
parse_config
(
"gan_conf_image.py"
,
"mode=discriminator_training"
)
generator_conf
=
parse_config
(
"gan_conf_image.py"
,
"mode=generator"
)
generator_conf
=
parse_config
(
"gan_conf_image.py"
,
"mode=generator"
)
...
@@ -169,7 +178,7 @@ def main():
...
@@ -169,7 +178,7 @@ def main():
sample_dim
=
get_layer_size
(
dis_conf
.
model_config
,
"sample"
)
sample_dim
=
get_layer_size
(
dis_conf
.
model_config
,
"sample"
)
data_np
=
load_mnist_data
(
"./data/raw_data/train-images-idx3-ubyte"
)
data_np
=
load_mnist_data
(
"./data/raw_data/train-images-idx3-ubyte"
)
# this create a gradient machine for discriminator
# this create a gradient machine for discriminator
dis_training_machine
=
api
.
GradientMachine
.
createFromConfigProto
(
dis_training_machine
=
api
.
GradientMachine
.
createFromConfigProto
(
dis_conf
.
model_config
)
dis_conf
.
model_config
)
...
@@ -252,10 +261,7 @@ def main():
...
@@ -252,10 +261,7 @@ def main():
fake_samples
=
get_fake_samples
(
generator_machine
,
batch_size
,
noise
)
fake_samples
=
get_fake_samples
(
generator_machine
,
batch_size
,
noise
)
save_dir
=
"./pass_"
+
str
(
train_pass
)
saveImages
(
fake_samples
,
"train_pass%s.png"
%
train_pass
)
if
not
os
.
path
.
exists
(
save_dir
):
os
.
makedirs
(
save_dir
)
saveImages
(
fake_samples
,
save_dir
)
dis_trainer
.
finishTrain
()
dis_trainer
.
finishTrain
()
gen_trainer
.
finishTrain
()
gen_trainer
.
finishTrain
()
...
...
paddle/api/Paddle.swig
浏览文件 @
3ebf6aaf
...
@@ -193,5 +193,4 @@ namespace std {
...
@@ -193,5 +193,4 @@ namespace std {
%ignore OptimizationConfigPrivate;
%ignore OptimizationConfigPrivate;
%ignore ParameterTraverseCallbackPrivate;
%ignore ParameterTraverseCallbackPrivate;
%include "utils/GlobalConstants.h"
%include "utils/GlobalConstants.h"
%include "api/PaddleAPI.h"
%include "api/PaddleAPI.h"
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录