dc_gan.py 6.9 KB
Newer Older
W
whs 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

W
whs 已提交
15 16 17
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
W
whs 已提交
18 19 20 21 22
import sys
import os
import argparse
import functools
import matplotlib
W
whs 已提交
23
import six
W
whs 已提交
24 25
import numpy as np
import paddle
W
whs 已提交
26
import time
W
whs 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39
import paddle.fluid as fluid
from utility import get_parent_function_name, plot, check, add_arguments, print_arguments
from network import G, D
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec

NOISE_SIZE = 100
LEARNING_RATE = 2e-4

parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
40
add_arg('batch_size',        int,   128,          "Minibatch size.")
W
whs 已提交
41
add_arg('epoch',             int,   20,        "The number of epoched to be trained.")
42
add_arg('output',            str,   "./output_dcgan", "The directory the model and the test result to be saved to.")
W
whs 已提交
43
add_arg('use_gpu',           bool,  True,       "Whether to use GPU to train.")
W
whs 已提交
44
add_arg('run_ce',            bool,  False,       "Whether to run for model ce.")
W
whs 已提交
45 46 47 48
# yapf: enable


def loss(x, label):
49 50 51
    return fluid.layers.mean(
        fluid.layers.sigmoid_cross_entropy_with_logits(
            x=x, label=label))
W
whs 已提交
52 53 54 55


def train(args):

W
whs 已提交
56 57 58
    if args.run_ce:
        np.random.seed(10)
        fluid.default_startup_program().random_seed = 90
W
whs 已提交
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
    d_program = fluid.Program()
    dg_program = fluid.Program()

    with fluid.program_guard(d_program):
        img = fluid.layers.data(name='img', shape=[784], dtype='float32')
        label = fluid.layers.data(name='label', shape=[1], dtype='float32')
        d_logit = D(img)
        d_loss = loss(d_logit, label)

    with fluid.program_guard(dg_program):
        noise = fluid.layers.data(
            name='noise', shape=[NOISE_SIZE], dtype='float32')
        g_img = G(x=noise)

        g_program = dg_program.clone()
        g_program_test = dg_program.clone(for_test=True)

        dg_logit = D(g_img)
77 78 79 80
        dg_loss = loss(
            dg_logit,
            fluid.layers.fill_constant_batch_size_like(
                input=noise, dtype='float32', shape=[-1, 1], value=1.0))
W
whs 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93

    opt = fluid.optimizer.Adam(learning_rate=LEARNING_RATE)

    opt.minimize(loss=d_loss)
    parameters = [p.name for p in g_program.global_block().all_parameters()]

    opt.minimize(loss=dg_loss, parameter_list=parameters)

    exe = fluid.Executor(fluid.CPUPlace())
    if args.use_gpu:
        exe = fluid.Executor(fluid.CUDAPlace(0))
    exe.run(fluid.default_startup_program())

W
whs 已提交
94 95
    if args.run_ce:
        train_reader = paddle.batch(
L
lvmengsi 已提交
96
            paddle.dataset.mnist.train(), batch_size=args.batch_size)
W
whs 已提交
97 98 99 100 101
    else:
        train_reader = paddle.batch(
            paddle.reader.shuffle(
                paddle.dataset.mnist.train(), buf_size=60000),
            batch_size=args.batch_size)
W
whs 已提交
102 103 104 105 106

    NUM_TRAIN_TIMES_OF_DG = 2
    const_n = np.random.uniform(
        low=-1.0, high=1.0,
        size=[args.batch_size, NOISE_SIZE]).astype('float32')
W
whs 已提交
107 108 109

    t_time = 0
    losses = [[], []]
W
whs 已提交
110 111 112 113 114 115 116
    for pass_id in range(args.epoch):
        for batch_id, data in enumerate(train_reader()):
            if len(data) != args.batch_size:
                continue
            noise_data = np.random.uniform(
                low=-1.0, high=1.0,
                size=[args.batch_size, NOISE_SIZE]).astype('float32')
W
whs 已提交
117
            real_image = np.array(list(map(lambda x: x[0], data))).reshape(
W
whs 已提交
118 119 120 121 122 123
                -1, 784).astype('float32')
            real_labels = np.ones(
                shape=[real_image.shape[0], 1], dtype='float32')
            fake_labels = np.zeros(
                shape=[real_image.shape[0], 1], dtype='float32')
            total_label = np.concatenate([real_labels, fake_labels])
W
whs 已提交
124
            s_time = time.time()
W
whs 已提交
125 126
            generated_image = exe.run(g_program,
                                      feed={'noise': noise_data},
L
lvmengsi 已提交
127
                                      fetch_list=[g_img])[0]
W
whs 已提交
128 129 130 131 132 133 134 135

            total_images = np.concatenate([real_image, generated_image])

            d_loss_1 = exe.run(d_program,
                               feed={
                                   'img': generated_image,
                                   'label': fake_labels,
                               },
L
lvmengsi 已提交
136
                               fetch_list=[d_loss])[0][0]
W
whs 已提交
137 138 139 140 141 142

            d_loss_2 = exe.run(d_program,
                               feed={
                                   'img': real_image,
                                   'label': real_labels,
                               },
L
lvmengsi 已提交
143
                               fetch_list=[d_loss])[0][0]
W
whs 已提交
144

W
whs 已提交
145 146
            d_loss_n = d_loss_1 + d_loss_2
            losses[0].append(d_loss_n)
W
whs 已提交
147
            for _ in six.moves.xrange(NUM_TRAIN_TIMES_OF_DG):
W
whs 已提交
148 149 150
                noise_data = np.random.uniform(
                    low=-1.0, high=1.0,
                    size=[args.batch_size, NOISE_SIZE]).astype('float32')
W
whs 已提交
151
                dg_loss_n = exe.run(dg_program,
L
lvmengsi 已提交
152 153
                                    feed={'noise': noise_data},
                                    fetch_list=[dg_loss])[0][0]
W
whs 已提交
154 155 156
                losses[1].append(dg_loss_n)
            t_time += (time.time() - s_time)
            if batch_id % 10 == 0 and not args.run_ce:
W
whs 已提交
157 158 159 160 161
                if not os.path.exists(args.output):
                    os.makedirs(args.output)
                # generate image each batch
                generated_images = exe.run(g_program_test,
                                           feed={'noise': const_n},
L
lvmengsi 已提交
162
                                           fetch_list=[g_img])[0]
W
whs 已提交
163 164
                total_images = np.concatenate([real_image, generated_images])
                fig = plot(total_images)
165
                msg = "Epoch ID={0} Batch ID={1} D-Loss={2} DG-Loss={3}\n gen={4}".format(
L
lvmengsi 已提交
166 167
                    pass_id, batch_id, d_loss_n, dg_loss_n,
                    check(generated_images))
W
whs 已提交
168 169 170 171 172 173 174
                print(msg)
                plt.title(msg)
                plt.savefig(
                    '{}/{:04d}_{:04d}.png'.format(args.output, pass_id,
                                                  batch_id),
                    bbox_inches='tight')
                plt.close(fig)
W
whs 已提交
175 176 177 178
    if args.run_ce:
        print("kpis,dcgan_d_train_cost,{}".format(np.mean(losses[0])))
        print("kpis,dcgan_g_train_cost,{}".format(np.mean(losses[1])))
        print("kpis,dcgan_duration,{}".format(t_time / args.epoch))
L
lvmengsi 已提交
179

W
whs 已提交
180 181 182 183 184

if __name__ == "__main__":
    args = parser.parse_args()
    print_arguments(args)
    train(args)