未验证 提交 20d1e9bf 编写于 作者: H hong 提交者: GitHub

update new reader for resnet, mobilenet; test=develop (#4685)

上级 856c428f
......@@ -2,6 +2,10 @@
图像分类是计算机视觉的重要领域,它的目标是将图像分类到预定义的标签。CNN模型在图像分类领域取得了突破的成果,同时模型复杂度也在不断增加。MobileNet是一种小巧而高效CNN模型,本文介绍如何使PaddlePaddle的动态图MobileNet进行图像分类。
**版本要求**
该模型在develop版本必须使用develop版本或者paddle 2.0alpha的版本才能够运行
如果期望在paddle 1.8 上执行,请切换到 release/1.8 分支
**代码结构**
├── run_mul_v1.sh # 多卡训练启动脚本_v1
......@@ -55,9 +59,9 @@
**模型性能**
Model Top-1(单卡/4卡) Top-5(单卡/4卡) 收敛时间(单卡/4卡)
MobileNetV1 0.707/0.711 0.897/0.899 116小时/30.9小时
MobileNetV2 0.708/0.724 0.899/0.906 227.8小时/60.8小时
**参考论文**
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import math
import random
import numpy as np
from paddle.incubate.hapi.datasets import DatasetFolder
from paddle.incubate.hapi.vision.transforms import transforms
from paddle import fluid
class ImageNetDataset(DatasetFolder):
def __init__(self,
path,
mode='train',
image_size=224,
resize_short_size=256):
super(ImageNetDataset, self).__init__(path)
self.mode = mode
normalize = transforms.Normalize(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.120, 57.375])
if self.mode == 'train':
self.transform = transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.Permute(mode='CHW'), normalize
])
else:
self.transform = transforms.Compose([
transforms.Resize(resize_short_size),
transforms.CenterCrop(image_size),
transforms.Permute(mode='CHW'), normalize
])
def __getitem__(self, idx):
img_path, label = self.samples[idx]
img = cv2.imread(img_path).astype(np.float32)
label = np.array([label]).astype(np.int64)
return self.transform(img), label
def __len__(self):
return len(self.samples)
......@@ -31,6 +31,9 @@ from utils import *
from mobilenet_v1 import *
from mobilenet_v2 import *
from imagenet_dataset import ImageNetDataset
from paddle.io import DataLoader
args = parse_args()
if int(os.getenv("PADDLE_TRAINER_ID", 0)) == 0:
print_arguments(args)
......@@ -116,14 +119,31 @@ def train_mobilenet():
optimizer.set_dict(opti_dict)
# 3. reader
train_data_loader = utility.create_data_loader(is_train=True, args=args)
test_data_loader = utility.create_data_loader(is_train=False, args=args)
num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
imagenet_reader = reader.ImageNetReader(seed=0, place_num=place_num)
train_reader = imagenet_reader.train(settings=args)
test_reader = imagenet_reader.val(settings=args)
train_data_loader.set_sample_list_generator(train_reader, place)
test_data_loader.set_sample_list_generator(test_reader, place)
train_dataset = ImageNetDataset(
os.path.join(args.data_dir, "train"), mode='train')
train_data_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
places=place,
shuffle=True,
drop_last=True,
num_workers=10)
test_dataset = ImageNetDataset(
os.path.join(args.data_dir, "val"), mode='val')
test_data_loader = DataLoader(
test_dataset,
batch_size=args.batch_size,
places=place,
shuffle=True,
drop_last=True,
num_workers=1)
# 4. train loop
total_batch_num = 0 #this is for benchmark
......@@ -184,7 +204,7 @@ def train_mobilenet():
total_sample += 1
batch_id += 1
t_last = time.time()
# NOTE: used for benchmark
total_batch_num = total_batch_num + 1
......
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import sys
import os
import math
import random
import functools
import numpy as np
import cv2
import paddle
from paddle import fluid
from PIL import Image
policy = None
random.seed(0)
np.random.seed(0)
def random_crop(img, size, interpolation=Image.BILINEAR):
"""random crop image
Args:
img: image data
size: crop size
Returns:
random cropped image data
"""
lower_scale = 0.08
lower_ratio = 3. / 4.
upper_ratio = 4. / 3.
scale = [lower_scale, 1.0]
ratio = [lower_ratio, upper_ratio]
aspect_ratio = math.sqrt(np.random.uniform(*ratio))
w = 1. * aspect_ratio
h = 1. / aspect_ratio
bound = min((float(img.shape[0]) / img.shape[1]) / (h**2),
(float(img.shape[1]) / img.shape[0]) / (w**2))
scale_max = min(scale[1], bound)
scale_min = min(scale[0], bound)
target_area = img.shape[0] * img.shape[1] * np.random.uniform(scale_min,
scale_max)
target_size = math.sqrt(target_area)
w = int(target_size * w)
h = int(target_size * h)
i = np.random.randint(0, img.shape[0] - h + 1)
j = np.random.randint(0, img.shape[1] - w + 1)
img = img[i:i + h, j:j + w, :]
if interpolation:
resized = cv2.resize(img, (size, size), interpolation=interpolation)
else:
resized = cv2.resize(img, (size, size))
return resized
def resize_short(img, target_size, interpolation=None):
"""resize image
Args:
img: image data
target_size: resize short target size
interpolation: interpolation mode
Returns:
resized image data
"""
percent = float(target_size) / min(img.shape[0], img.shape[1])
resized_width = int(round(img.shape[1] * percent))
resized_height = int(round(img.shape[0] * percent))
if interpolation:
resized = cv2.resize(
img, (resized_width, resized_height), interpolation=interpolation)
else:
resized = cv2.resize(img, (resized_width, resized_height))
return resized
def crop_image(img, target_size, center):
"""crop image
Args:
img: images data
target_size: crop target size
center: crop mode
Returns:
img: cropped image data
"""
height, width = img.shape[:2]
size = target_size
if center == True:
w_start = (width - size) // 2
h_start = (height - size) // 2
else:
w_start = np.random.randint(0, width - size + 1)
h_start = np.random.randint(0, height - size + 1)
w_end = w_start + size
h_end = h_start + size
img = img[h_start:h_end, w_start:w_end, :]
return img
def process_image(sample, settings, mode):
""" process_image """
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
crop_size = 224
img_path = sample[0]
img = cv2.imread(img_path)
if mode == 'train':
img = random_crop(img, crop_size)
if np.random.randint(0, 2) == 1:
img = img[:, ::-1, :]
else:
if crop_size > 0:
target_size = settings.resize_short_size
img = resize_short(
img, target_size, interpolation=settings.interpolation)
img = crop_image(img, target_size=crop_size, center=True)
img = img[:, :, ::-1]
img = img.astype('float32').transpose((2, 0, 1)) / 255
img_mean = np.array(mean).reshape((3, 1, 1))
img_std = np.array(std).reshape((3, 1, 1))
img -= img_mean
img /= img_std
if mode == 'train' or mode == 'val':
return (img, np.array([sample[1]]))
elif mode == 'test':
return (img, )
def process_batch_data(input_data, settings, mode):
batch_data = []
for sample in input_data:
if os.path.isfile(sample[0]):
batch_data.append(process_image(sample, settings, mode))
else:
print("File not exist : %s" % sample[0])
return batch_data
class ImageNetReader:
def __init__(self, seed=None):
self.shuffle_seed = seed
def set_shuffle_seed(self, seed):
assert isinstance(seed, int), "shuffle seed must be int"
self.shuffle_seed = seed
def _reader_creator(self,
settings,
file_list,
mode,
shuffle=False,
data_dir=None):
if mode == 'test':
batch_size = 1
else:
batch_size = settings.batch_size
def reader():
def read_file_list():
with open(file_list) as flist:
full_lines = [line.strip() for line in flist]
if mode != "test" and len(full_lines) < settings.batch_size:
print(
"Warning: The number of the whole data ({}) is smaller than the batch_size ({}), and drop_last is turnning on, so nothing will feed in program, Terminated now. Please reset batch_size to a smaller number or feed more data!"
.format(len(full_lines), settings.batch_size))
os._exit(1)
if shuffle:
assert self.shuffle_seed is not None, "multiprocess train, shuffle seed must be set!"
np.random.RandomState(self.shuffle_seed).shuffle(
full_lines)
batch_data = []
for line in full_lines:
img_path, label = line.split()
img_path = os.path.join(data_dir, img_path)
batch_data.append([img_path, int(label)])
if len(batch_data) == batch_size:
if mode == 'train' or mode == 'val' or mde == 'test':
yield batch_data
batch_data = []
return read_file_list
data_reader = reader()
mapper = functools.partial(
process_batch_data, settings=settings, mode=mode)
ret = fluid.io.xmap_readers(
mapper,
data_reader,
settings.reader_thread,
settings.reader_buf_size,
order=False)
return ret
def train(self, settings):
"""Create a reader for trainning
Args:
settings: arguments
Returns:
train reader
"""
file_list = os.path.join(settings.data_dir, 'train_list.txt')
assert os.path.isfile(
file_list), "{} doesn't exist, please check data list path".format(
file_list)
reader = self._reader_creator(
settings,
file_list,
'train',
shuffle=True,
data_dir=settings.data_dir)
return reader
def val(self, settings):
"""Create a reader for eval
Args:
settings: arguments
Returns:
eval reader
"""
file_list = os.path.join(settings.data_dir, 'val_list.txt')
assert os.path.isfile(
file_list), "{} doesn't exist, please check data list path".format(
file_list)
return self._reader_creator(
settings,
file_list,
'val',
shuffle=False,
data_dir=settings.data_dir)
def test(self, settings):
"""Create a reader for testing
Args:
settings: arguments
Returns:
test reader
"""
file_list = os.path.join(settings.data_dir, 'val_list.txt')
assert os.path.isfile(
file_list), "{} doesn't exist, please check data list path".format(
file_list)
return self._reader_creator(
settings,
file_list,
'test',
shuffle=False,
data_dir=settings.data_dir)
......@@ -26,6 +26,7 @@ from paddle.fluid import framework
import math
import sys
import time
import reader
IMAGENET1000 = 1281167
base_lr = 0.1
......@@ -46,9 +47,90 @@ def parse_args():
parser.add_argument(
"-b", "--batch_size", default=32, type=int, help="set epoch")
parser.add_argument("--ce", action="store_true", help="run ce")
# NOTE:used in benchmark
parser.add_argument("--max_iter", default=0, type=int, help="the max iters to train, used in benchmark")
parser.add_argument(
"--max_iter",
default=0,
type=int,
help="the max iters to train, used in benchmark")
parser.add_argument(
"--class_dim",
default=102,
type=int,
help="the class number of flowers dataset")
parser.add_argument(
"--use_imagenet_data",
action="store_true",
help="Use imagenet dataset instead of the flowers dataset(small dataset)"
)
parser.add_argument(
'--data_dir',
default="./data/ILSVRC2012",
type=str,
help="The ImageNet dataset root directory.")
parser.add_argument(
'--lower_scale',
default=0.08,
type=float,
help="The value of lower_scale in ramdom_crop")
parser.add_argument(
'--lower_ratio',
default=3. / 4.,
type=float,
help="The value of lower_ratio in ramdom_crop")
parser.add_argument(
'--upper_ratio',
default=4. / 3.,
type=float,
help="The value of upper_ratio in ramdom_crop")
parser.add_argument(
'--resize_short_size',
default=256,
type=int,
help="The value of resize_short_size")
parser.add_argument(
'--crop_size', default=224, type=int, help="The value of crop size")
parser.add_argument(
'--use_mixup', default=False, type=bool, help="Whether to use mixup")
parser.add_argument(
'--mixup_alpha',
default=0.2,
type=float,
help="The value of mixup_alpha")
parser.add_argument(
'--reader_thread',
default=8,
type=int,
help="The number of multi thread reader")
parser.add_argument(
'--reader_buf_size',
default=16,
type=int,
help="The buf size of multi thread reader")
parser.add_argument(
'--interpolation',
default=None,
type=int,
help="The interpolation mode")
parser.add_argument(
'--use_aa',
default=False,
type=bool,
help="Whether to use auto augment")
parser.add_argument(
'--image_mean',
nargs='+',
type=float,
default=[0.485, 0.456, 0.406],
help="The mean of input image data")
parser.add_argument(
'--image_std',
nargs='+',
type=float,
default=[0.229, 0.224, 0.225],
help="The std of input image data")
args = parser.parse_args()
return args
......@@ -288,30 +370,47 @@ def train_resnet():
if args.use_data_parallel:
strategy = fluid.dygraph.parallel.prepare_context()
resnet = ResNet()
resnet = ResNet(class_dim=args.class_dim)
optimizer = optimizer_setting(parameter_list=resnet.parameters())
if args.use_data_parallel:
resnet = fluid.dygraph.parallel.DataParallel(resnet, strategy)
train_reader = paddle.batch(
reader_decorator(paddle.dataset.flowers.train(use_xmap=True)),
batch_size=batch_size,
drop_last=True)
if args.use_imagenet_data:
imagenet_reader = reader.ImageNetReader(0)
train_reader = imagenet_reader.train(settings=args)
else:
train_reader = paddle.batch(
reader_decorator(paddle.dataset.flowers.train(use_xmap=True)),
batch_size=batch_size,
drop_last=True)
if args.use_data_parallel:
train_reader = fluid.contrib.reader.distributed_batch_reader(
train_reader)
test_reader = paddle.batch(
reader_decorator(paddle.dataset.flowers.test(use_xmap=True)),
batch_size=batch_size,
drop_last=True)
train_loader = fluid.io.DataLoader.from_generator(capacity=10)
if args.use_imagenet_data:
test_reader = imagenet_reader.val(settings=args)
else:
test_reader = paddle.batch(
reader_decorator(paddle.dataset.flowers.test(use_xmap=True)),
batch_size=batch_size,
drop_last=True)
train_loader = fluid.io.DataLoader.from_generator(
capacity=32,
use_double_buffer=True,
iterable=True,
return_list=True,
use_multiprocess=True)
train_loader.set_sample_list_generator(train_reader, places=place)
test_loader = fluid.io.DataLoader.from_generator(capacity=10)
test_loader = fluid.io.DataLoader.from_generator(
capacity=64,
use_double_buffer=True,
iterable=True,
return_list=True,
use_multiprocess=True)
test_loader.set_sample_list_generator(test_reader, places=place)
#file_name = './model/epoch_0.npz'
......@@ -368,7 +467,7 @@ def train_resnet():
total_acc1 += acc_top1.numpy()
total_acc5 += acc_top5.numpy()
total_sample += 1
total_batch_num = total_batch_num + 1 #this is for benchmark
total_batch_num = total_batch_num + 1 #this is for benchmark
#print("epoch id: %d, batch step: %d, loss: %f" % (eop, batch_id, dy_out))
if batch_id % 10 == 0:
print( "epoch %d | batch step %d, loss %0.3f acc1 %0.3f acc5 %0.3f, batch cost: %.5f" % \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册