dataset.py 3.5 KB
Newer Older
W
wandongdong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
create train or eval dataset.
"""
import os
import mindspore.common.dtype as mstype
import mindspore.dataset.engine as de
import mindspore.dataset.transforms.vision.c_transforms as C
import mindspore.dataset.transforms.c_transforms as C2

C
chenzomi 已提交
24
def create_dataset(dataset_path, do_train, config, device_target, repeat_num=1, batch_size=32):
W
wandongdong 已提交
25 26 27 28 29 30
    """
    create a train or eval dataset

    Args:
        dataset_path(string): the path of dataset.
        do_train(bool): whether dataset is used for train or eval.
C
chenzomi 已提交
31 32
        repeat_num(int): the repeat times of dataset. Default: 1.
        batch_size(int): the batch size of dataset. Default: 32.
W
wandongdong 已提交
33 34 35 36

    Returns:
        dataset
    """
C
chenzomi 已提交
37
    if device_target == "Ascend":
38 39
        rank_size = int(os.getenv("RANK_SIZE", '1'))
        rank_id = int(os.getenv("RANK_ID", '0'))
C
chenzomi 已提交
40 41 42 43 44
        if rank_size == 1:
            ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True)
        else:
            ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True,
                                         num_shards=rank_size, shard_id=rank_id)
C
chenzomi 已提交
45
    elif device_target == "GPU":
C
chenzomi 已提交
46 47 48 49 50 51
        if do_train:
            from mindspore.communication.management import get_rank, get_group_size
            ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True,
                                         num_shards=get_group_size(), shard_id=get_rank())
        else:
            ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True)
W
wandongdong 已提交
52
    else:
C
chenzomi 已提交
53
        raise ValueError("Unsupported device_target.")
W
wandongdong 已提交
54 55 56 57 58 59 60

    resize_height = config.image_height
    resize_width = config.image_width
    buffer_size = 1000

    # define map operations
    decode_op = C.Decode()
W
wandongdong 已提交
61
    resize_crop_op = C.RandomCropDecodeResize(resize_height, scale=(0.08, 1.0), ratio=(0.75, 1.333))
62
    horizontal_flip_op = C.RandomHorizontalFlip(prob=0.5)
W
wandongdong 已提交
63 64 65

    resize_op = C.Resize((256, 256))
    center_crop = C.CenterCrop(resize_width)
W
wandongdong 已提交
66 67
    rescale_op = C.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
    normalize_op = C.Normalize(mean=[0.485*255, 0.456*255, 0.406*255], std=[0.229*255, 0.224*255, 0.225*255])
W
wandongdong 已提交
68 69 70
    change_swap_op = C.HWC2CHW()

    if do_train:
W
wandongdong 已提交
71
        trans = [resize_crop_op, horizontal_flip_op, rescale_op, normalize_op, change_swap_op]
W
wandongdong 已提交
72
    else:
73
        trans = [decode_op, resize_op, center_crop, normalize_op, change_swap_op]
W
wandongdong 已提交
74 75 76

    type_cast_op = C2.TypeCast(mstype.int32)

W
wandongdong 已提交
77 78
    ds = ds.map(input_columns="image", operations=trans, num_parallel_workers=8)
    ds = ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=8)
W
wandongdong 已提交
79 80 81 82 83 84 85 86 87 88 89

    # apply shuffle operations
    ds = ds.shuffle(buffer_size=buffer_size)

    # apply batch operations
    ds = ds.batch(batch_size, drop_remainder=True)

    # apply dataset repeat operation
    ds = ds.repeat(repeat_num)

    return ds