googlenet_multi_gpu.py 14.7 KB
Newer Older
D
dangqingqing 已提交
1 2 3 4 5 6 7 8 9 10 11
from six.moves import xrange  # pylint: disable=redefined-builtin
from datetime import datetime
import math
import re
import time

import tensorflow.python.platform
import tensorflow as tf

FLAGS = tf.app.flags.FLAGS

12 13
tf.app.flags.DEFINE_integer('batch_size', 64, """Batch size.""")
tf.app.flags.DEFINE_integer('num_batches', 100, """Number of batches to run.""")
D
dangqingqing 已提交
14 15 16 17 18 19 20 21
tf.app.flags.DEFINE_string('data_format', 'NCHW',
                           """The data format for Convnet operations.
                           Can be either NHWC or NCHW.
                           """)

tf.app.flags.DEFINE_string('train_dir', '/train_model',
                           """Directory where to write event logs """
                           """and checkpoint.""")
22
tf.app.flags.DEFINE_integer('num_gpus', 4, """How many GPUs to use.""")
D
dangqingqing 已提交
23 24 25
tf.app.flags.DEFINE_boolean('log_device_placement', False,
                            """Whether to log device placement.""")

26 27 28
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EPOCHS_PER_DECAY = 50
INITIAL_LEARNING_RATE = 0.1
D
dangqingqing 已提交
29 30 31 32 33 34
LEARNING_RATE_DECAY_FACTOR = 0.1
TOWER_NAME = 'tower'


def _conv(name, inpOp, nIn, nOut, kH, kW, dH, dW, padType, wd=0.005):
    with tf.name_scope(name) as scope:
35 36 37 38 39
        kernel = tf.get_variable(
            name + '_w', [kH, kW, nIn, nOut],
            initializer=tf.truncated_normal_initializer(
                stddev=0.01, dtype=tf.float32),
            dtype=tf.float32)
D
dangqingqing 已提交
40 41 42 43 44 45

        if wd is not None:
            weight_decay = tf.mul(tf.nn.l2_loss(kernel), wd, name='weight_loss')
            tf.add_to_collection('losses', weight_decay)

        if FLAGS.data_format == 'NCHW':
46
            strides = [1, 1, dH, dW]
D
dangqingqing 已提交
47
        else:
48 49 50 51 52 53 54 55 56 57 58 59 60
            strides = [1, dH, dW, 1]
        conv = tf.nn.conv2d(
            inpOp,
            kernel,
            strides,
            padding=padType,
            data_format=FLAGS.data_format)

        biases = tf.get_variable(
            name=name + '_b',
            shape=[nOut],
            initializer=tf.constant_initializer(
                value=0.0, dtype=tf.float32),
D
dangqingqing 已提交
61 62 63
            dtype=tf.float32)

        bias = tf.reshape(
64 65
            tf.nn.bias_add(
                conv, biases, data_format=FLAGS.data_format),
D
dangqingqing 已提交
66 67 68 69 70
            conv.get_shape())

        conv1 = tf.nn.relu(bias, name=scope)
        return conv1

71

D
dangqingqing 已提交
72 73
def _affine(name, inpOp, nIn, nOut, wd=0.005, act=True):
    with tf.name_scope(name) as scope:
74 75 76 77
        kernel = tf.get_variable(
            name + '_w', [nIn, nOut],
            initializer=tf.truncated_normal_initializer(
                stddev=0.01, dtype=tf.float32),
D
dangqingqing 已提交
78 79 80 81 82 83
            dtype=tf.float32)

        if wd is not None:
            weight_decay = tf.mul(tf.nn.l2_loss(kernel), wd, name='weight_loss')
            tf.add_to_collection('losses', weight_decay)

84 85 86 87 88 89
        biases = tf.get_variable(
            name + '_b', [nOut],
            initializer=tf.constant_initializer(
                value=0.0, dtype=tf.float32),
            dtype=tf.float32,
            trainable=True)
D
dangqingqing 已提交
90 91 92 93 94 95

        affine1 = tf.nn.relu_layer(inpOp, kernel, biases, name=name) if act else \
                  tf.matmul(inpOp, kernel) + biases

        return affine1

96

D
dangqingqing 已提交
97 98
def _mpool(name, inpOp, kH, kW, dH, dW, padding):
    if FLAGS.data_format == 'NCHW':
99 100
        ksize = [1, 1, kH, kW]
        strides = [1, 1, dH, dW]
D
dangqingqing 已提交
101
    else:
102 103 104 105 106 107 108 109 110 111
        ksize = [1, kH, kW, 1]
        strides = [1, dH, dW, 1]
    return tf.nn.max_pool(
        inpOp,
        ksize=ksize,
        strides=strides,
        padding=padding,
        data_format=FLAGS.data_format,
        name=name)

D
dangqingqing 已提交
112 113 114

def _apool(name, inpOp, kH, kW, dH, dW, padding):
    if FLAGS.data_format == 'NCHW':
115 116
        ksize = [1, 1, kH, kW]
        strides = [1, 1, dH, dW]
D
dangqingqing 已提交
117
    else:
118 119 120 121 122 123 124 125 126 127
        ksize = [1, kH, kW, 1]
        strides = [1, dH, dW, 1]
    return tf.nn.avg_pool(
        inpOp,
        ksize=ksize,
        strides=strides,
        padding=padding,
        data_format=FLAGS.data_format,
        name=name)

D
dangqingqing 已提交
128 129 130 131

def loss(logits, labels):
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
132
        logits, labels, name='cross_entropy_per_example')
D
dangqingqing 已提交
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')


def get_incoming_shape(incoming):
    """ Returns the incoming data shape """
    if isinstance(incoming, tf.Tensor):
        return incoming.get_shape().as_list()
    elif type(incoming) in [np.array, list, tuple]:
        return np.shape(incoming)
    else:
        raise Exception("Invalid incoming layer.")


def _inception(name, inp, inSize, o1s, o2s1, o2s2, o3s1, o3s2, o4s1, o4s2):
152
    conv1 = _conv(name + '_1', inp, inSize, o1s, 1, 1, 1, 1, 'VALID')
D
dangqingqing 已提交
153 154 155 156 157 158 159 160 161 162 163

    conv3_ = _conv(name + '_3r', inp, inSize, o2s1, 1, 1, 1, 1, 'VALID')
    conv3 = _conv(name + '_3', conv3_, o2s1, o2s2, 3, 3, 1, 1, 'SAME')

    conv5_ = _conv(name + '_5r', inp, inSize, o3s1, 1, 1, 1, 1, 'VALID')
    conv5 = _conv(name + '5', conv5_, o3s1, o3s2, 5, 5, 1, 1, 'SAME')

    pool_ = _mpool(name + 'pool', inp, o4s1, o4s1, 1, 1, 'SAME')
    pool = _conv(name + 'proj', pool_, inSize, o4s2, 1, 1, 1, 1, 'VALID')

    if FLAGS.data_format == 'NCHW':
164
        channel_dim = 1
D
dangqingqing 已提交
165
    else:
166
        channel_dim = 3
D
dangqingqing 已提交
167 168 169 170 171 172
    incept = tf.concat(channel_dim, [conv1, conv3, conv5, pool])
    return incept


def inference(images):
    # stage 1
173 174
    conv1 = _conv('conv1', images, 3, 64, 7, 7, 2, 2, 'SAME')
    pool1 = _mpool('pool1', conv1, 3, 3, 2, 2, 'SAME')
D
dangqingqing 已提交
175 176

    # stage 2
177 178 179
    conv2 = _conv('conv2', pool1, 64, 64, 1, 1, 1, 1, 'VALID')
    conv3 = _conv('conv3', conv2, 64, 192, 3, 3, 1, 1, 'SAME')
    pool3 = _mpool('pool3', conv3, 3, 3, 2, 2, 'SAME')
D
dangqingqing 已提交
180 181

    # stage 3
182
    incept3a = _inception('ince3a', pool3, 192, 64, 96, 128, 16, 32, 3, 32)
D
dangqingqing 已提交
183
    incept3b = _inception('ince3b', incept3a, 256, 128, 128, 192, 32, 96, 3, 64)
184
    pool4 = _mpool('pool4', incept3b, 3, 3, 2, 2, 'SAME')
D
dangqingqing 已提交
185 186

    # stage 4
187
    incept4a = _inception('ince4a', pool4, 480, 192, 96, 208, 16, 48, 3, 64)
D
dangqingqing 已提交
188 189 190
    incept4b = _inception('ince4b', incept4a, 512, 160, 112, 224, 24, 64, 3, 64)
    incept4c = _inception('ince4c', incept4b, 512, 128, 128, 256, 24, 64, 3, 64)
    incept4d = _inception('ince4d', incept4c, 512, 112, 144, 288, 32, 64, 3, 64)
191 192 193
    incept4e = _inception('ince4e', incept4d, 528, 256, 160, 320, 32, 128, 3,
                          128)
    pool5 = _mpool('pool5', incept4e, 3, 3, 2, 2, 'SAME')
D
dangqingqing 已提交
194 195

    # stage 5
196 197 198 199
    incept5a = _inception('ince5a', pool5, 832, 256, 160, 320, 32, 128, 3, 128)
    incept5b = _inception('ince5b', incept5a, 832, 384, 192, 384, 48, 128, 3,
                          128)
    pool6 = _apool('pool6', incept5b, 7, 7, 1, 1, 'VALID')
D
dangqingqing 已提交
200 201 202 203 204 205 206 207

    # output 1
    resh1 = tf.reshape(pool6, [-1, 1024])
    drop = tf.nn.dropout(resh1, 0.4)
    affn1 = _affine('fc_out', resh1, 1024, 1000, act=False)

    return affn1

208

D
dangqingqing 已提交
209 210 211 212 213 214 215 216 217 218 219 220
def tower_loss(scope):
    """Calculate the total loss on a single tower running the model.
    Args:
        scope: unique prefix string identifying the tower, e.g. 'tower_0'
    Returns:
        Tensor of shape [] containing the total loss for a batch of data
    """
    image_size = 224
    if FLAGS.data_format == 'NCHW':
        image_shape = [FLAGS.batch_size, 3, image_size, image_size]
    else:
        image_shape = [FLAGS.batch_size, image_size, image_size, 3]
221 222 223 224 225 226 227 228 229 230 231 232 233
    images = tf.get_variable(
        'image',
        image_shape,
        initializer=tf.truncated_normal_initializer(
            stddev=0.1, dtype=tf.float32),
        dtype=tf.float32,
        trainable=False)

    labels = tf.get_variable(
        'label', [FLAGS.batch_size],
        initializer=tf.constant_initializer(1),
        dtype=tf.int32,
        trainable=False)
D
dangqingqing 已提交
234 235 236 237 238 239 240 241

    # Build a Graph that computes the logits predictions from the
    # inference model.
    last_layer = inference(images)

    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    _ = loss(last_layer, labels)
242

D
dangqingqing 已提交
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection('losses', scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')

    # Compute the moving average of all individual losses and the total loss.
    loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
    loss_averages_op = loss_averages.apply(losses + [total_loss])

    # Attach a scalar summary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
        # session. This helps the clarity of presentation on tensorboard.
        loss_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', l.op.name)
        # Name each loss as '(raw)' and name the moving average version of the loss
        # as the original loss name.
261
        tf.scalar_summary(loss_name + ' (raw)', l)
D
dangqingqing 已提交
262 263 264 265 266 267 268 269
        tf.scalar_summary(loss_name, loss_averages.average(l))

    with tf.control_dependencies([loss_averages_op]):
        total_loss = tf.identity(total_loss)
    return total_loss


def average_gradients(tower_grads):
270
    """Calculate the average gradient for each shared variable across all towers.
D
dangqingqing 已提交
271 272 273 274 275 276 277 278 279
  Note that this function provides a synchronization point across all towers.
  Args:
    tower_grads: List of lists of (gradient, variable) tuples. The outer list
      is over individual gradients. The inner list is over the gradient
      calculation for each tower.
  Returns:
     List of pairs of (gradient, variable) where the gradient has been averaged
     across all towers.
  """
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
    average_grads = []
    for grad_and_vars in zip(*tower_grads):
        # Note that each grad_and_vars looks like the following:
        #   ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
        grads = []
        for g, _ in grad_and_vars:
            # Add 0 dimension to the gradients to represent the tower.
            expanded_g = tf.expand_dims(g, 0)

            # Append on a 'tower' dimension which we will average over below.
            grads.append(expanded_g)

        # Average over the 'tower' dimension.
        grad = tf.concat(0, grads)
        grad = tf.reduce_mean(grad, 0)

        # Keep in mind that the Variables are redundant because they are shared
        # across towers. So .. we will just return the first tower's pointer to
        # the Variable.
        v = grad_and_vars[0][1]
        grad_and_var = (grad, v)
        average_grads.append(grad_and_var)
    return average_grads

D
dangqingqing 已提交
304 305 306 307 308 309

def time_tensorflow_run(session, target):
    num_steps_burn_in = 50
    total_duration = 0.0
    total_duration_squared = 0.0
    for i in xrange(FLAGS.num_batches + num_steps_burn_in):
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
        start_time = time.time()
        _, loss_value = session.run(target)
        duration = time.time() - start_time
        if i > num_steps_burn_in:
            if not i % 10:
                num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = duration

                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                    'sec/batch batch_size = %d)')
                print(format_str %
                      (datetime.now(), i - num_steps_burn_in, loss_value,
                       duration, sec_per_batch, num_examples_per_step))

            total_duration += duration
            total_duration_squared += duration * duration
D
dangqingqing 已提交
328 329 330 331

    mn = total_duration / FLAGS.num_batches
    vr = total_duration_squared / FLAGS.num_batches - mn * mn
    sd = math.sqrt(vr)
332
    print('%s: FwdBwd across %d steps, %.3f +/- %.3f sec / batch' %
D
dangqingqing 已提交
333 334
          (datetime.now(), FLAGS.num_batches, mn, sd))

335

D
dangqingqing 已提交
336
def run_benchmark():
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
    with tf.Graph().as_default(), tf.device('/cpu:0'):
        # Create a variable to count the number of train() calls. This equals the
        # number of batches processed * FLAGS.num_gpus.
        global_step = tf.get_variable(
            'global_step', [],
            initializer=tf.constant_initializer(0),
            trainable=False)

        # Calculate the learning rate schedule.
        num_batches_per_epoch = (NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
                                 FLAGS.batch_size)
        decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)

        # Decay the learning rate exponentially based on the number of steps.
        lr = tf.train.exponential_decay(
            INITIAL_LEARNING_RATE,
            global_step,
            decay_steps,
            LEARNING_RATE_DECAY_FACTOR,
            staircase=True)

        # Create an optimizer that performs gradient descent.
        opt = tf.train.MomentumOptimizer(lr, 0.9)

        # Calculate the gradients for each model tower.
        tower_grads = []
        for i in xrange(FLAGS.num_gpus):
            with tf.device('/gpu:%d' % i):
                with tf.name_scope('%s_%d' % (TOWER_NAME, i)) as scope:
                    # Calculate the loss for one tower of the model. This function
                    # constructs the entire model but shares the variables across
                    # all towers.
                    loss = tower_loss(scope)

                    # Reuse variables for the next tower.
                    tf.get_variable_scope().reuse_variables()

                    # Retain the summaries from the final tower.
                    summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)

                    # Calculate the gradients for the batch of data on this tower.
                    grads = opt.compute_gradients(loss)

                    # Keep track of the gradients across all towers.
                    tower_grads.append(grads)

        # We must calculate the mean of each gradient. Note that this is the
        # synchronization point across all towers.
        grads = average_gradients(tower_grads)

        # Apply the gradients to adjust the shared variables.
        apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

        # Group all updates to into a single train op.
        train_op = tf.group(apply_gradient_op)

        # Build an initialization operation.
        init = tf.initialize_all_variables()

        # Start running operations on the Graph. allow_soft_placement must be set to
        # True to build towers on GPU, as some of the ops do not have GPU
        # implementations.
        sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True,
            log_device_placement=FLAGS.log_device_placement))
        sess.run(init)
        time_tensorflow_run(sess, [train_op, loss])
D
dangqingqing 已提交
404 405 406


def main(_):
407
    run_benchmark()
D
dangqingqing 已提交
408 409 410


if __name__ == '__main__':
411
    tf.app.run()