From 11b6473c2693e1b965bf4abcdf16a576ef65523b Mon Sep 17 00:00:00 2001 From: daminglu Date: Fri, 18 May 2018 12:02:57 -0700 Subject: [PATCH] Image classification & word2vec (#10738) --- .../tests/book/high-level-api/CMakeLists.txt | 1 + .../image_classification/CMakeLists.txt | 7 ++ .../cifar10_small_test_set.py | 82 +++++++++++++++++++ ...py => test_image_classification_resnet.py} | 51 +++++++----- ...gg.py => test_image_classification_vgg.py} | 53 +++++++----- 5 files changed, 152 insertions(+), 42 deletions(-) create mode 100644 python/paddle/fluid/tests/book/high-level-api/image_classification/CMakeLists.txt create mode 100644 python/paddle/fluid/tests/book/high-level-api/image_classification/cifar10_small_test_set.py rename python/paddle/fluid/tests/book/high-level-api/image_classification/{notest_image_classification_resnet.py => test_image_classification_resnet.py} (77%) rename python/paddle/fluid/tests/book/high-level-api/image_classification/{notest_image_classification_vgg.py => test_image_classification_vgg.py} (72%) diff --git a/python/paddle/fluid/tests/book/high-level-api/CMakeLists.txt b/python/paddle/fluid/tests/book/high-level-api/CMakeLists.txt index c2a15bdb3b1..da76747f82d 100644 --- a/python/paddle/fluid/tests/book/high-level-api/CMakeLists.txt +++ b/python/paddle/fluid/tests/book/high-level-api/CMakeLists.txt @@ -8,3 +8,4 @@ endforeach() add_subdirectory(fit_a_line) add_subdirectory(recognize_digits) +add_subdirectory(image_classification) diff --git a/python/paddle/fluid/tests/book/high-level-api/image_classification/CMakeLists.txt b/python/paddle/fluid/tests/book/high-level-api/image_classification/CMakeLists.txt new file mode 100644 index 00000000000..673c965b662 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") + +# default test +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/fluid/tests/book/high-level-api/image_classification/cifar10_small_test_set.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/cifar10_small_test_set.py new file mode 100644 index 00000000000..7fed6d914f7 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/cifar10_small_test_set.py @@ -0,0 +1,82 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +CIFAR dataset. + +This module will download dataset from +https://www.cs.toronto.edu/~kriz/cifar.html and parse train/test set into +paddle reader creators. + +The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, +with 6000 images per class. There are 50000 training images and 10000 test +images. + +The CIFAR-100 dataset is just like the CIFAR-10, except it has 100 classes +containing 600 images each. There are 500 training images and 100 testing +images per class. + +""" + +import cPickle +import itertools +import numpy +import paddle.v2.dataset.common +import tarfile + +__all__ = ['train10'] + +URL_PREFIX = 'https://www.cs.toronto.edu/~kriz/' +CIFAR10_URL = URL_PREFIX + 'cifar-10-python.tar.gz' +CIFAR10_MD5 = 'c58f30108f718f92721af3b95e74349a' + + +def reader_creator(filename, sub_name, batch_size=None): + def read_batch(batch): + data = batch['data'] + labels = batch.get('labels', batch.get('fine_labels', None)) + assert labels is not None + for sample, label in itertools.izip(data, labels): + yield (sample / 255.0).astype(numpy.float32), int(label) + + def reader(): + with tarfile.open(filename, mode='r') as f: + names = (each_item.name for each_item in f + if sub_name in each_item.name) + + batch_count = 0 + for name in names: + batch = cPickle.load(f.extractfile(name)) + for item in read_batch(batch): + if isinstance(batch_size, int) and batch_count > batch_size: + break + batch_count += 1 + yield item + + return reader + + +def train10(batch_size=None): + """ + CIFAR-10 training set creator. + + It returns a reader creator, each sample in the reader is image pixels in + [0, 1] and label in [0, 9]. + + :return: Training reader creator + :rtype: callable + """ + return reader_creator( + paddle.v2.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), + 'data_batch', + batch_size=batch_size) diff --git a/python/paddle/fluid/tests/book/high-level-api/image_classification/notest_image_classification_resnet.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py similarity index 77% rename from python/paddle/fluid/tests/book/high-level-api/image_classification/notest_image_classification_resnet.py rename to python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py index 17db38797cf..1160e500dbd 100644 --- a/python/paddle/fluid/tests/book/high-level-api/image_classification/notest_image_classification_resnet.py +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py @@ -17,6 +17,7 @@ from __future__ import print_function import paddle import paddle.fluid as fluid import numpy +import cifar10_small_test_set def resnet_cifar10(input, depth=32): @@ -81,46 +82,50 @@ def train_network(): cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=predict, label=label) - return avg_cost, accuracy + return [avg_cost, accuracy] -def train(use_cuda, save_path): +def train(use_cuda, train_program, save_dirname): BATCH_SIZE = 128 EPOCH_NUM = 1 train_reader = paddle.batch( paddle.reader.shuffle( - paddle.dataset.cifar.train10(), buf_size=128 * 10), + cifar10_small_test_set.train10(batch_size=10), buf_size=128 * 10), batch_size=BATCH_SIZE) test_reader = paddle.batch( paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) def event_handler(event): - if isinstance(event, fluid.EndIteration): - if (event.batch_id % 10) == 0: - avg_cost, accuracy = trainer.test(reader=test_reader) + if isinstance(event, fluid.EndStepEvent): + avg_cost, accuracy = trainer.test( + reader=test_reader, feed_order=['pixel', 'label']) - print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format( - event.batch_id + 1, avg_cost, accuracy)) + print('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy)) - if accuracy > 0.01: # Low threshold for speeding up CI - trainer.params.save(save_path) - return + if accuracy > 0.01: # Low threshold for speeding up CI + if save_dirname is not None: + trainer.save_params(save_dirname) + return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( - train_network, + train_func=train_program, optimizer=fluid.optimizer.Adam(learning_rate=0.001), - place=place, - event_handler=event_handler) - trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler) + place=place) + trainer.train( + reader=train_reader, + num_epochs=EPOCH_NUM, + event_handler=event_handler, + feed_order=['pixel', 'label']) -def infer(use_cuda, save_path): - params = fluid.Params(save_path) + +def infer(use_cuda, inference_program, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - inferencer = fluid.Inferencer(inference_network, params, place=place) + inferencer = fluid.Inferencer( + infer_func=inference_program, param_path=save_dirname, place=place) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range @@ -135,8 +140,14 @@ def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return save_path = "image_classification_resnet.inference.model" - train(use_cuda, save_path) - infer(use_cuda, save_path) + + train( + use_cuda=use_cuda, train_program=train_network, save_dirname=save_path) + + infer( + use_cuda=use_cuda, + inference_program=inference_network, + save_dirname=save_path) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/book/high-level-api/image_classification/notest_image_classification_vgg.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py similarity index 72% rename from python/paddle/fluid/tests/book/high-level-api/image_classification/notest_image_classification_vgg.py rename to python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py index e83afeed2f7..1e3e955ba02 100644 --- a/python/paddle/fluid/tests/book/high-level-api/image_classification/notest_image_classification_vgg.py +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py @@ -17,6 +17,7 @@ from __future__ import print_function import paddle import paddle.fluid as fluid import numpy +import cifar10_small_test_set def vgg16_bn_drop(input): @@ -60,46 +61,48 @@ def train_network(): cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=predict, label=label) - return avg_cost, accuracy + return [avg_cost, accuracy] -def train(use_cuda, save_path): +def train(use_cuda, train_program, save_dirname): BATCH_SIZE = 128 - EPOCH_NUM = 1 - train_reader = paddle.batch( paddle.reader.shuffle( - paddle.dataset.cifar.train10(), buf_size=128 * 10), + cifar10_small_test_set.train10(batch_size=10), buf_size=128 * 10), batch_size=BATCH_SIZE) test_reader = paddle.batch( paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) def event_handler(event): - if isinstance(event, fluid.EndIteration): - if (event.batch_id % 10) == 0: - avg_cost, accuracy = trainer.test(reader=test_reader) + if isinstance(event, fluid.EndStepEvent): + avg_cost, accuracy = trainer.test( + reader=test_reader, feed_order=['pixel', 'label']) - print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format( - event.batch_id + 1, avg_cost, accuracy)) + print('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy)) - if accuracy > 0.01: # Low threshold for speeding up CI - trainer.params.save(save_path) - return + if accuracy > 0.01: # Low threshold for speeding up CI + if save_dirname is not None: + trainer.save_params(save_dirname) + return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( - train_network, - optimizer=fluid.optimizer.Adam(learning_rate=0.001), + train_func=train_program, place=place, - event_handler=event_handler) - trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler) + optimizer=fluid.optimizer.Adam(learning_rate=0.001)) + + trainer.train( + reader=train_reader, + num_epochs=1, + event_handler=event_handler, + feed_order=['pixel', 'label']) -def infer(use_cuda, save_path): - params = fluid.Params(save_path) +def infer(use_cuda, inference_program, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - inferencer = fluid.Inferencer(inference_network, params, place=place) + inferencer = fluid.Inferencer( + infer_func=inference_program, param_path=save_dirname, place=place) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range @@ -114,8 +117,14 @@ def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return save_path = "image_classification_vgg.inference.model" - train(use_cuda, save_path) - infer(use_cuda, save_path) + + train( + use_cuda=use_cuda, train_program=train_network, save_dirname=save_path) + + infer( + use_cuda=use_cuda, + inference_program=inference_network, + save_dirname=save_path) if __name__ == '__main__': -- GitLab