提交 27c66cfa 编写于 作者: L LielinJiang

Merge branch 'master' of https://github.com/PaddlePaddle/hapi into style-transfer

...@@ -2,3 +2,6 @@ ...@@ -2,3 +2,6 @@
*.json *.json
output* output*
*checkpoint* *checkpoint*
build
dist
hapi.egg-info
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import copy
from progressbar import ProgressBar
from paddle.fluid.dygraph.parallel import ParallelEnv
def config_callbacks(callbacks=None,
model=None,
batch_size=None,
epochs=None,
steps=None,
log_freq=2,
verbose=2,
save_freq=1,
save_dir=None,
metrics=None,
mode='train'):
cbks = callbacks or []
cbks = cbks if isinstance(cbks, (list, tuple)) else [cbks]
if not any(isinstance(k, ProgBarLogger) for k in cbks) and verbose:
cbks = cbks + [ProgBarLogger(log_freq, verbose=verbose)]
if not any(isinstance(k, ModelCheckpoint) for k in cbks):
cbks = cbks + [ModelCheckpoint(save_freq, save_dir)]
cbk_list = CallbackList(cbks)
cbk_list.set_model(model)
metrics = metrics or [] if mode != 'test' else []
params = {
'batch_size': batch_size,
'epochs': epochs,
'steps': steps,
'verbose': verbose,
'metrics': metrics,
}
cbk_list.set_params(params)
return cbk_list
class CallbackList(object):
def __init__(self, callbacks=None):
# copy
self.callbacks = [c for c in callbacks]
self.params = {}
self.model = None
def append(self, callback):
self.callbacks.append(callback)
def __iter__(self):
return iter(self.callbacks)
def set_params(self, params):
for c in self.callbacks:
c.set_params(params)
def set_model(self, model):
for c in self.callbacks:
c.set_model(model)
def _call(self, name, *args):
for c in self.callbacks:
func = getattr(c, name)
func(*args)
def _check_mode(self, mode):
assert mode in ['train', 'eval', 'test'], \
'mode should be train, eval or test'
def on_begin(self, mode, logs=None):
self._check_mode(mode)
name = 'on_{}_begin'.format(mode)
self._call(name, logs)
def on_end(self, mode, logs=None):
self._check_mode(mode)
name = 'on_{}_end'.format(mode)
self._call(name, logs)
def on_epoch_begin(self, epoch=None, logs=None):
self._call('on_epoch_begin', epoch, logs)
def on_epoch_end(self, epoch=None, logs=None):
self._call('on_epoch_end', epoch, logs)
def on_batch_begin(self, mode, step=None, logs=None):
self._check_mode(mode)
name = 'on_{}_batch_begin'.format(mode)
self._call(name, step, logs)
def on_batch_end(self, mode, step=None, logs=None):
self._check_mode(mode)
name = 'on_{}_batch_end'.format(mode)
self._call(name, step, logs)
class Callback(object):
def __init__(self):
self.model = None
self.params = {}
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_train_begin(self, logs=None):
"""
"""
def on_train_end(self, logs=None):
"""
"""
def on_eval_begin(self, logs=None):
"""
"""
def on_eval_end(self, logs=None):
"""
"""
def on_test_begin(self, logs=None):
"""
"""
def on_test_end(self, logs=None):
"""
"""
def on_epoch_begin(self, epoch, logs=None):
"""
"""
def on_epoch_end(self, epoch, logs=None):
"""
"""
def on_train_batch_begin(self, step, logs=None):
"""
"""
def on_train_batch_end(self, step, logs=None):
"""
"""
def on_eval_batch_begin(self, step, logs=None):
"""
"""
def on_eval_batch_end(self, step, logs=None):
"""
"""
def on_eval_batch_begin(self, step, logs=None):
"""
"""
def on_eval_batch_end(self, step, logs=None):
"""
"""
class ProgBarLogger(Callback):
def __init__(self, log_freq=1, verbose=2):
self.epochs = None
self.steps = None
self.progbar = None
self.verbose = verbose
self.log_freq = log_freq
def on_train_begin(self, logs=None):
self.epochs = self.params['epochs']
assert self.epochs
self.train_metrics = self.params['metrics']
assert self.train_metrics
def on_epoch_begin(self, epoch=None, logs=None):
self.steps = self.params['steps']
self.epoch = epoch
self.train_step = 0
if self.verbose and self.epochs and ParallelEnv().local_rank == 0:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
self.train_progbar = ProgressBar(num=self.steps, verbose=self.verbose)
def _updates(self, logs, mode):
values = []
metrics = getattr(self, '%s_metrics' % (mode))
progbar = getattr(self, '%s_progbar' % (mode))
steps = getattr(self, '%s_step' % (mode))
for k in metrics:
if k in logs:
values.append((k, logs[k]))
progbar.update(steps, values)
def on_train_batch_end(self, step, logs=None):
logs = logs or {}
self.train_step += 1
if self.train_step % self.log_freq == 0 and self.verbose and ParallelEnv(
).local_rank == 0:
# if steps is not None, last step will update in on_epoch_end
if self.steps and self.train_step < self.steps:
self._updates(logs, 'train')
else:
self._updates(logs, 'train')
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if self.verbose and ParallelEnv().local_rank == 0:
self._updates(logs, 'train')
def on_eval_begin(self, logs=None):
self.eval_steps = logs.get('steps', None)
self.eval_metrics = logs.get('metrics_name', [])
self.eval_step = 0
self.evaled_samples = 0
self.eval_progbar = ProgressBar(
num=self.eval_steps, verbose=self.verbose)
if ParallelEnv().local_rank == 0:
print('Eval begin...')
def on_eval_batch_end(self, step, logs=None):
logs = logs or {}
self.eval_step = step
samples = logs.get('batch_size', 1)
self.evaled_samples += samples
if self.eval_step % self.log_freq == 0 and self.verbose and ParallelEnv(
).local_rank == 0:
# if steps is not None, last step will update in on_epoch_end
if self.eval_steps and self.eval_step < self.eval_steps:
self._updates(logs, 'eval')
def on_eval_end(self, logs=None):
logs = logs or {}
if self.verbose and ParallelEnv().local_rank == 0:
self._updates(logs, 'eval')
print('Eval samples: %d' % (self.evaled_samples))
class ModelCheckpoint(Callback):
def __init__(self, save_freq=1, save_dir=None):
self.save_freq = save_freq
self.save_dir = save_dir
def on_epoch_begin(self, epoch=None, logs=None):
self.epoch = epoch
def _is_save(self):
return self.model and self.save_dir and ParallelEnv().local_rank == 0
def on_epoch_end(self, epoch, logs=None):
if self._is_save() and self.epoch % self.save_freq == 0:
path = '{}/{}'.format(self.save_dir, epoch)
print('save checkpoint at {}'.format(path))
self.model.save(path)
def on_train_end(self, logs=None):
if self._is_save():
path = '{}/final'.format(self.save_dir)
print('save checkpoint at {}'.format(path))
self.model.save(path)
...@@ -30,7 +30,7 @@ IMAGES_ROOT = "./data/" + DATASET + "/" ...@@ -30,7 +30,7 @@ IMAGES_ROOT = "./data/" + DATASET + "/"
import paddle.fluid as fluid import paddle.fluid as fluid
class Cityscapes(fluid.io.Dataset): class Cityscapes(paddle.io.Dataset):
def __init__(self, root_path, file_path, mode='train', return_name=False): def __init__(self, root_path, file_path, mode='train', return_name=False):
self.root_path = root_path self.root_path = root_path
self.file_path = file_path self.file_path = file_path
......
...@@ -86,13 +86,13 @@ def main(): ...@@ -86,13 +86,13 @@ def main():
if FLAGS.resume: if FLAGS.resume:
g.load(FLAGS.resume) g.load(FLAGS.resume)
loader_A = fluid.io.DataLoader( loader_A = paddle.io.DataLoader(
data.DataA(), data.DataA(),
places=place, places=place,
shuffle=True, shuffle=True,
return_list=True, return_list=True,
batch_size=FLAGS.batch_size) batch_size=FLAGS.batch_size)
loader_B = fluid.io.DataLoader( loader_B = paddle.io.DataLoader(
data.DataB(), data.DataB(),
places=place, places=place,
shuffle=True, shuffle=True,
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import six
import time
import math
import socket
import contextlib
import numpy as np
from paddle import fluid
from paddle.fluid.layers import collective
from paddle.fluid.dygraph.parallel import ParallelEnv, ParallelStrategy
from paddle.fluid.io import BatchSampler
_parallel_context_initialized = False
class DistributedBatchSampler(BatchSampler):
"""Sampler that restricts data loading to a subset of the dataset.
In such case, each process can pass a DistributedBatchSampler instance
as a DataLoader sampler, and load a subset of the original dataset that
is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Args:
data_source: this could be a `fluid.io.Dataset` implement
or other python object which implemented
`__len__` for BatchSampler to get sample
number of data source.
batch_size(int): sample indice number in a mini-batch indices.
shuffle(bool): whther to shuffle indices order before genrating
batch indices. Default False.
drop_last(bool): whether drop the last incomplete batch dataset size
is not divisible by the batch size. Default False
"""
def __init__(self, dataset, batch_size, shuffle=False, drop_last=False):
self.dataset = dataset
assert isinstance(batch_size, int) and batch_size > 0, \
"batch_size should be a positive integer"
self.batch_size = batch_size
assert isinstance(shuffle, bool), \
"shuffle should be a boolean value"
self.shuffle = shuffle
assert isinstance(drop_last, bool), \
"drop_last should be a boolean number"
self.drop_last = drop_last
self.nranks = ParallelEnv().nranks
self.local_rank = ParallelEnv().local_rank
self.epoch = 0
self.num_samples = int(
math.ceil(len(self.dataset) * 1.0 / self.nranks))
self.total_size = self.num_samples * self.nranks
def __iter__(self):
num_samples = len(self.dataset)
indices = np.arange(num_samples).tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
if self.shuffle:
np.random.RandomState(self.epoch).shuffle(indices)
self.epoch += 1
# subsample
def _get_indices_by_batch_size(indices):
subsampled_indices = []
last_batch_size = self.total_size % (self.batch_size * self.nranks)
assert last_batch_size % self.nranks == 0
last_local_batch_size = last_batch_size // self.nranks
for i in range(self.local_rank * self.batch_size,
len(indices) - last_batch_size,
self.batch_size * self.nranks):
subsampled_indices.extend(indices[i:i + self.batch_size])
indices = indices[len(indices) - last_batch_size:]
subsampled_indices.extend(indices[
self.local_rank * last_local_batch_size:(
self.local_rank + 1) * last_local_batch_size])
return subsampled_indices
if self.nranks > 1:
indices = _get_indices_by_batch_size(indices)
assert len(indices) == self.num_samples
_sample_iter = iter(indices)
batch_indices = []
for idx in _sample_iter:
batch_indices.append(idx)
if len(batch_indices) == self.batch_size:
yield batch_indices
batch_indices = []
if not self.drop_last and len(batch_indices) > 0:
yield batch_indices
def __len__(self):
num_samples = self.num_samples
num_samples += int(not self.drop_last) * (self.batch_size - 1)
return num_samples // self.batch_size
def set_epoch(self, epoch):
self.epoch = epoch
def _all_gather(x, nranks, ring_id=0, use_calc_stream=True):
return collective._c_allgather(
x, nranks, ring_id=ring_id, use_calc_stream=use_calc_stream)
def wait_server_ready(endpoints):
assert not isinstance(endpoints, six.string_types)
while True:
all_ok = True
not_ready_endpoints = []
for ep in endpoints:
ip_port = ep.split(":")
with contextlib.closing(
socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex((ip_port[0], int(ip_port[1])))
if result != 0:
all_ok = False
not_ready_endpoints.append(ep)
if not all_ok:
time.sleep(3)
else:
break
def init_communicator(program, rank, nranks, wait_port, current_endpoint,
endpoints):
if nranks < 2:
return
other_endpoints = endpoints[:]
other_endpoints.remove(current_endpoint)
if rank == 0 and wait_port:
wait_server_ready(other_endpoints)
block = program.global_block()
nccl_id_var = block.create_var(
name=fluid.unique_name.generate('nccl_id'),
persistable=True,
type=fluid.core.VarDesc.VarType.RAW)
block.append_op(
type='c_gen_nccl_id',
inputs={},
outputs={'Out': nccl_id_var},
attrs={
'rank': rank,
'endpoint': current_endpoint,
'other_endpoints': other_endpoints
})
block.append_op(
type='c_comm_init',
inputs={'X': nccl_id_var},
outputs={},
attrs={
'nranks': nranks,
'rank': rank,
'ring_id': 0,
})
def prepare_distributed_context(place=None):
if place is None:
place = fluid.CUDAPlace(ParallelEnv().dev_id) if ParallelEnv().nranks > 1 \
else fluid.CUDAPlace(0)
strategy = ParallelStrategy()
strategy.nranks = ParallelEnv().nranks
strategy.local_rank = ParallelEnv().local_rank
strategy.trainer_endpoints = ParallelEnv().trainer_endpoints
strategy.current_endpoint = ParallelEnv().current_endpoint
if strategy.nranks < 2:
return
global _parallel_context_initialized
if not _parallel_context_initialized and isinstance(place,
fluid.CUDAPlace):
def _init_context():
communicator_prog = fluid.Program()
init_communicator(communicator_prog, strategy.local_rank,
strategy.nranks, True, strategy.current_endpoint,
strategy.trainer_endpoints)
exe = fluid.Executor(place)
exe.run(communicator_prog)
if fluid.in_dygraph_mode():
fluid.disable_dygraph()
_init_context()
fluid.enable_dygraph(place)
else:
_init_context()
else:
assert ("Only support CUDAPlace for now.")
_parallel_context_initialized = True
return strategy
...@@ -20,7 +20,7 @@ import json ...@@ -20,7 +20,7 @@ import json
sys.path.append('../') sys.path.append('../')
from metrics import Metric from hapi.metrics import Metric
from bmn_utils import boundary_choose, bmn_post_processing from bmn_utils import boundary_choose, bmn_post_processing
......
...@@ -162,56 +162,3 @@ def bmn_post_processing(video_dict, subset, output_path, result_path): ...@@ -162,56 +162,3 @@ def bmn_post_processing(video_dict, subset, output_path, result_path):
outfile.close() outfile.close()
def _get_interp1d_bin_mask(seg_xmin, seg_xmax, tscale, num_sample,
num_sample_perbin):
""" generate sample mask for a boundary-matching pair """
plen = float(seg_xmax - seg_xmin)
plen_sample = plen / (num_sample * num_sample_perbin - 1.0)
total_samples = [
seg_xmin + plen_sample * ii
for ii in range(num_sample * num_sample_perbin)
]
p_mask = []
for idx in range(num_sample):
bin_samples = total_samples[idx * num_sample_perbin:(idx + 1) *
num_sample_perbin]
bin_vector = np.zeros([tscale])
for sample in bin_samples:
sample_upper = math.ceil(sample)
sample_decimal, sample_down = math.modf(sample)
if int(sample_down) <= (tscale - 1) and int(sample_down) >= 0:
bin_vector[int(sample_down)] += 1 - sample_decimal
if int(sample_upper) <= (tscale - 1) and int(sample_upper) >= 0:
bin_vector[int(sample_upper)] += sample_decimal
bin_vector = 1.0 / num_sample_perbin * bin_vector
p_mask.append(bin_vector)
p_mask = np.stack(p_mask, axis=1)
return p_mask
def get_interp1d_mask(tscale, dscale, prop_boundary_ratio, num_sample,
num_sample_perbin):
""" generate sample mask for each point in Boundary-Matching Map """
mask_mat = []
for start_index in range(tscale):
mask_mat_vector = []
for duration_index in range(dscale):
if start_index + duration_index < tscale:
p_xmin = start_index
p_xmax = start_index + duration_index
center_len = float(p_xmax - p_xmin) + 1
sample_xmin = p_xmin - center_len * prop_boundary_ratio
sample_xmax = p_xmax + center_len * prop_boundary_ratio
p_mask = _get_interp1d_bin_mask(sample_xmin, sample_xmax,
tscale, num_sample,
num_sample_perbin)
else:
p_mask = np.zeros([tscale, num_sample])
mask_mat_vector.append(p_mask)
mask_mat_vector = np.stack(mask_mat_vector, axis=2)
mask_mat.append(mask_mat_vector)
mask_mat = np.stack(mask_mat, axis=3)
mask_mat = mask_mat.astype(np.float32)
sample_mask = np.reshape(mask_mat, [tscale, -1])
return sample_mask
...@@ -18,11 +18,9 @@ import sys ...@@ -18,11 +18,9 @@ import sys
import logging import logging
import paddle.fluid as fluid import paddle.fluid as fluid
sys.path.append('../') from hapi.model import set_device, Input
from hapi.vision.models import BMN, BmnLoss
from model import set_device, Input
from bmn_metric import BmnMetric from bmn_metric import BmnMetric
from bmn_model import BMN, BmnLoss
from reader import BmnDataset from reader import BmnDataset
from config_utils import * from config_utils import *
......
...@@ -18,11 +18,9 @@ import os ...@@ -18,11 +18,9 @@ import os
import logging import logging
import paddle.fluid as fluid import paddle.fluid as fluid
sys.path.append('../') from hapi.model import set_device, Input
from hapi.vision.models import BMN, BmnLoss
from model import set_device, Input
from bmn_metric import BmnMetric from bmn_metric import BmnMetric
from bmn_model import BMN, BmnLoss
from reader import BmnDataset from reader import BmnDataset
from config_utils import * from config_utils import *
......
...@@ -22,7 +22,7 @@ import sys ...@@ -22,7 +22,7 @@ import sys
sys.path.append('../') sys.path.append('../')
from distributed import DistributedBatchSampler from distributed import DistributedBatchSampler
from paddle.fluid.io import Dataset, DataLoader from paddle.io import Dataset, DataLoader
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -18,10 +18,8 @@ import logging ...@@ -18,10 +18,8 @@ import logging
import sys import sys
import os import os
sys.path.append('../') from hapi.model import set_device, Input
from hapi.vision.models import BMN, BmnLoss
from model import set_device, Input
from bmn_model import BMN, BmnLoss
from reader import BmnDataset from reader import BmnDataset
from config_utils import * from config_utils import *
......
...@@ -18,8 +18,8 @@ import math ...@@ -18,8 +18,8 @@ import math
import random import random
import numpy as np import numpy as np
from datasets.folder import DatasetFolder from hapi.datasets import DatasetFolder
from transform import transforms from hapi.vision.transforms import transforms
from paddle import fluid from paddle import fluid
...@@ -45,7 +45,8 @@ class ImageNetDataset(DatasetFolder): ...@@ -45,7 +45,8 @@ class ImageNetDataset(DatasetFolder):
def __getitem__(self, idx): def __getitem__(self, idx):
img_path, label = self.samples[idx] img_path, label = self.samples[idx]
img = cv2.imread(img_path).astype(np.float32) img = cv2.imread(img_path).astype(np.float32)
return self.transform(img), [label] label = np.array([label])
return self.transform(img, label)
def __len__(self): def __len__(self):
return len(self.samples) return len(self.samples)
...@@ -24,15 +24,17 @@ sys.path.append('../') ...@@ -24,15 +24,17 @@ sys.path.append('../')
import time import time
import math import math
import numpy as np import numpy as np
import models
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.io import BatchSampler, DataLoader
from hapi.model import CrossEntropy, Input, set_device
from hapi.distributed import DistributedBatchSampler
from hapi.metrics import Accuracy
import hapi.vision.models as models
from model import CrossEntropy, Input, set_device
from imagenet_dataset import ImageNetDataset from imagenet_dataset import ImageNetDataset
from distributed import DistributedBatchSampler
from paddle.fluid.dygraph.parallel import ParallelEnv
from metrics import Accuracy
from paddle.fluid.io import BatchSampler, DataLoader
def make_optimizer(step_per_epoch, parameter_list=None): def make_optimizer(step_per_epoch, parameter_list=None):
......
...@@ -19,8 +19,8 @@ import os ...@@ -19,8 +19,8 @@ import os
import argparse import argparse
import numpy as np import numpy as np
from model import Input, set_device from hapi.model import Input, set_device
from models import tsm_resnet50 from hapi.vision.models import tsm_resnet50
from check import check_gpu, check_version from check import check_gpu, check_version
from kinetics_dataset import KineticsDataset from kinetics_dataset import KineticsDataset
......
...@@ -26,7 +26,7 @@ except ImportError: ...@@ -26,7 +26,7 @@ except ImportError:
import pickle import pickle
from io import BytesIO from io import BytesIO
from paddle.fluid.io import Dataset from paddle.io import Dataset
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -22,9 +22,9 @@ import numpy as np ...@@ -22,9 +22,9 @@ import numpy as np
from paddle import fluid from paddle import fluid
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from model import Model, CrossEntropy, Input, set_device from hapi.model import Model, CrossEntropy, Input, set_device
from metrics import Accuracy from hapi.metrics import Accuracy
from models import tsm_resnet50 from hapi.vision.models import tsm_resnet50
from check import check_gpu, check_version from check import check_gpu, check_version
from kinetics_dataset import KineticsDataset from kinetics_dataset import KineticsDataset
......
...@@ -17,7 +17,7 @@ import os.path as osp ...@@ -17,7 +17,7 @@ import os.path as osp
import sys import sys
import tarfile import tarfile
from models.download import _download from hapi.download import _download
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -22,13 +22,12 @@ from PIL import Image ...@@ -22,13 +22,12 @@ from PIL import Image
from paddle import fluid from paddle import fluid
from paddle.fluid.optimizer import Momentum from paddle.fluid.optimizer import Momentum
from paddle.fluid.io import DataLoader from paddle.io import DataLoader
from model import Model, Input, set_device from hapi.model import Model, Input, set_device
from models import yolov3_darknet53, YoloLoss from hapi.vision.models import yolov3_darknet53, YoloLoss
from hapi.vision.transforms import *
from coco import COCODataset
from transforms import *
from visualizer import draw_bbox from visualizer import draw_bbox
import logging import logging
...@@ -65,7 +64,8 @@ def main(): ...@@ -65,7 +64,8 @@ def main():
device = set_device(FLAGS.device) device = set_device(FLAGS.device)
fluid.enable_dygraph(device) if FLAGS.dynamic else None fluid.enable_dygraph(device) if FLAGS.dynamic else None
inputs = [Input([None, 3], 'int32', name='img_info'), inputs = [Input([None, 1], 'int64', name='img_id'),
Input([None, 2], 'int32', name='img_shape'),
Input([None, 3, None, None], 'float32', name='image')] Input([None, 3, None, None], 'float32', name='image')]
cat2name = load_labels(FLAGS.label_list, with_background=False) cat2name = load_labels(FLAGS.label_list, with_background=False)
...@@ -87,9 +87,10 @@ def main(): ...@@ -87,9 +87,10 @@ def main():
img -= np.array(IMAGE_MEAN) img -= np.array(IMAGE_MEAN)
img /= np.array(IMAGE_STD) img /= np.array(IMAGE_STD)
img = img.transpose((2, 0, 1))[np.newaxis, :] img = img.transpose((2, 0, 1))[np.newaxis, :]
img_info = np.array([0, h, w]).astype('int32')[np.newaxis, :] img_id = np.array([0]).astype('int64')[np.newaxis, :]
img_shape = np.array([h, w]).astype('int32')[np.newaxis, :]
_, bboxes = model.test([img_info, img]) _, bboxes = model.test([img_id, img_shape, img])
vis_img = draw_bbox(orig_img, cat2name, bboxes, FLAGS.draw_threshold) vis_img = draw_bbox(orig_img, cat2name, bboxes, FLAGS.draw_threshold)
save_name = get_save_image_name(FLAGS.output_dir, FLAGS.infer_image) save_name = get_save_image_name(FLAGS.output_dir, FLAGS.infer_image)
......
...@@ -23,15 +23,15 @@ import numpy as np ...@@ -23,15 +23,15 @@ import numpy as np
from paddle import fluid from paddle import fluid
from paddle.fluid.optimizer import Momentum from paddle.fluid.optimizer import Momentum
from paddle.fluid.io import DataLoader from paddle.io import DataLoader
from model import Model, Input, set_device from hapi.model import Model, Input, set_device
from distributed import DistributedBatchSampler from hapi.distributed import DistributedBatchSampler
from models import yolov3_darknet53, YoloLoss from hapi.datasets import COCODataset
from hapi.vision.transforms import *
from hapi.vision.models import yolov3_darknet53, YoloLoss
from coco_metric import COCOMetric from coco_metric import COCOMetric
from coco import COCODataset
from transforms import *
NUM_MAX_BOXES = 50 NUM_MAX_BOXES = 50
......
...@@ -12,4 +12,26 @@ ...@@ -12,4 +12,26 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from hapi.configure import Config as Config from hapi.configure import Config
from hapi import callbacks
from hapi import datasets
from hapi import distributed
from hapi import download
from hapi import metrics
from hapi import model
from hapi import progressbar
from hapi import text
from hapi import vision
__all__ = [
'Config',
'callbacks',
'datasets',
'distributed',
'download',
'metrics',
'model',
'progressbar',
'text',
'vision',
]
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import six import six
import copy import copy
from hapi.progressbar import ProgressBar from progressbar import ProgressBar
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .folder import *
from .mnist import *
from .flowers import *
from .coco import *
...@@ -20,7 +20,7 @@ import cv2 ...@@ -20,7 +20,7 @@ import cv2
import numpy as np import numpy as np
from pycocotools.coco import COCO from pycocotools.coco import COCO
from paddle.fluid.io import Dataset from paddle.io import Dataset
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import io
import tarfile
import numpy as np
import scipy.io as scio
from PIL import Image
from paddle.io import Dataset
from .utils import _check_exists_and_download
__all__ = ["Flowers"]
DATA_URL = 'http://paddlemodels.bj.bcebos.com/flowers/102flowers.tgz'
LABEL_URL = 'http://paddlemodels.bj.bcebos.com/flowers/imagelabels.mat'
SETID_URL = 'http://paddlemodels.bj.bcebos.com/flowers/setid.mat'
DATA_MD5 = '52808999861908f626f3c1f4e79d11fa'
LABEL_MD5 = 'e0620be6f572b9609742df49c70aed4d'
SETID_MD5 = 'a5357ecc9cb78c4bef273ce3793fc85c'
# In official 'readme', tstid is the flag of test data
# and trnid is the flag of train data. But test data is more than train data.
# So we exchange the train data and test data.
MODE_FLAG_MAP = {'train': 'tstid', 'test': 'trnid', 'valid': "valid"}
class Flowers(Dataset):
"""
Implement of flowers dataset
Args:
data_file(str): path to data file, can be set None if
:attr:`download` is True. Default None
label_file(str): path to label file, can be set None if
:attr:`download` is True. Default None
setid_file(str): path to subset index file, can be set
None if :attr:`download` is True. Default None
mode(str): 'train', 'valid' or 'test' mode. Default 'train'.
download(bool): whether auto download mnist dataset if
:attr:`image_path`/:attr:`label_path` unset. Default
True
Examples:
.. code-block:: python
from hapi.vision.datasets import Flowers
flowers = Flowers(mode='test')
for i in range(len(flowers)):
sample = flowers[i]
print(sample[0].shape, sample[1])
"""
def __init__(self,
data_file=None,
label_file=None,
setid_file=None,
mode='train',
transform=None,
download=True):
assert mode.lower() in ['train', 'valid', 'test'], \
"mode should be 'train', 'valid' or 'test', but got {}".format(mode)
self.flag = MODE_FLAG_MAP[mode.lower()]
self.data_file = data_file
if self.data_file is None:
assert download, "data_file not set and auto download disabled"
self.data_file = _check_exists_and_download(
data_file, DATA_URL, DATA_MD5, 'flowers', download)
self.label_file = label_file
if self.label_file is None:
assert download, "label_file not set and auto download disabled"
self.label_file = _check_exists_and_download(
label_file, LABEL_URL, LABEL_MD5, 'flowers', download)
self.setid_file = setid_file
if self.setid_file is None:
assert download, "setid_file not set and auto download disabled"
self.setid_file = _check_exists_and_download(
setid_file, SETID_URL, SETID_MD5, 'flowers', download)
self.transform = transform
# read dataset into memory
self._load_anno()
def _load_anno(self):
self.name2mem = {}
self.data_tar = tarfile.open(self.data_file)
for ele in self.data_tar.getmembers():
self.name2mem[ele.name] = ele
self.labels = scio.loadmat(self.label_file)['labels'][0]
self.indexes = scio.loadmat(self.setid_file)[self.flag][0]
def __getitem__(self, idx):
index = self.indexes[idx]
label = np.array([self.labels[index - 1]])
img_name = "jpg/image_%05d.jpg" % index
img_ele = self.name2mem[img_name]
image = self.data_tar.extractfile(img_ele).read()
image = np.array(Image.open(io.BytesIO(image)))
if self.transform is not None:
image, label = self.transform(image, label)
return image, label
def __len__(self):
return len(self.indexes)
...@@ -16,7 +16,9 @@ import os ...@@ -16,7 +16,9 @@ import os
import sys import sys
import cv2 import cv2
from paddle.fluid.io import Dataset from paddle.io import Dataset
__all__ = ["DatasetFolder"]
def has_valid_extension(filename, extensions): def has_valid_extension(filename, extensions):
...@@ -76,8 +78,6 @@ class DatasetFolder(Dataset): ...@@ -76,8 +78,6 @@ class DatasetFolder(Dataset):
both extensions and is_valid_file should not be passed. both extensions and is_valid_file should not be passed.
transform (callable|optional): A function/transform that takes in transform (callable|optional): A function/transform that takes in
a sample and returns a transformed version. a sample and returns a transformed version.
target_transform (callable|optional): A function/transform that takes
in the target and transforms it.
is_valid_file (callable|optional): A function that takes path of a file is_valid_file (callable|optional): A function that takes path of a file
and check if the file is a valid file (used to check of corrupt files) and check if the file is a valid file (used to check of corrupt files)
both extensions and is_valid_file should not be passed. both extensions and is_valid_file should not be passed.
...@@ -94,11 +94,9 @@ class DatasetFolder(Dataset): ...@@ -94,11 +94,9 @@ class DatasetFolder(Dataset):
loader=None, loader=None,
extensions=None, extensions=None,
transform=None, transform=None,
target_transform=None,
is_valid_file=None): is_valid_file=None):
self.root = root self.root = root
self.transform = transform self.transform = transform
self.target_transform = target_transform
if extensions is None: if extensions is None:
extensions = IMG_EXTENSIONS extensions = IMG_EXTENSIONS
classes, class_to_idx = self._find_classes(self.root) classes, class_to_idx = self._find_classes(self.root)
...@@ -152,9 +150,7 @@ class DatasetFolder(Dataset): ...@@ -152,9 +150,7 @@ class DatasetFolder(Dataset):
path, target = self.samples[index] path, target = self.samples[index]
sample = self.loader(path) sample = self.loader(path)
if self.transform is not None: if self.transform is not None:
sample = self.transform(sample) sample, target = self.transform(sample, target)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target return sample, target
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import gzip
import struct
import numpy as np
import paddle.dataset.common
from paddle.io import Dataset
from .utils import _check_exists_and_download
__all__ = ["MNIST"]
URL_PREFIX = 'https://dataset.bj.bcebos.com/mnist/'
TEST_IMAGE_URL = URL_PREFIX + 't10k-images-idx3-ubyte.gz'
TEST_IMAGE_MD5 = '9fb629c4189551a2d022fa330f9573f3'
TEST_LABEL_URL = URL_PREFIX + 't10k-labels-idx1-ubyte.gz'
TEST_LABEL_MD5 = 'ec29112dd5afa0611ce80d1b7f02629c'
TRAIN_IMAGE_URL = URL_PREFIX + 'train-images-idx3-ubyte.gz'
TRAIN_IMAGE_MD5 = 'f68b3c2dcbeaaa9fbdd348bbdeb94873'
TRAIN_LABEL_URL = URL_PREFIX + 'train-labels-idx1-ubyte.gz'
TRAIN_LABEL_MD5 = 'd53e105ee54ea40749a09fcbcd1e9432'
class MNIST(Dataset):
"""
Implement of MNIST dataset
Args:
image_path(str): path to image file, can be set None if
:attr:`download` is True. Default None
label_path(str): path to label file, can be set None if
:attr:`download` is True. Default None
mode(str): 'train' or 'test' mode. Default 'train'.
download(bool): whether auto download mnist dataset if
:attr:`image_path`/:attr:`label_path` unset. Default
True
Returns:
Dataset: MNIST Dataset.
Examples:
.. code-block:: python
from hapi.vision.datasets import MNIST
mnist = MNIST(mode='test')
for i in range(len(mnist)):
sample = mnist[i]
print(sample[0].shape, sample[1])
"""
def __init__(self,
image_path=None,
label_path=None,
mode='train',
transform=None,
download=True):
assert mode.lower() in ['train', 'test'], \
"mode should be 'train' or 'test', but got {}".format(mode)
self.mode = mode.lower()
self.image_path = image_path
if self.image_path is None:
assert download, "image_path not set and auto download disabled"
image_url = TRAIN_IMAGE_URL if mode == 'train' else TEST_IMAGE_URL
image_md5 = TRAIN_IMAGE_MD5 if mode == 'train' else TEST_IMAGE_MD5
self.image_path = _check_exists_and_download(
image_path, image_url, image_md5, 'mnist', download)
self.label_path = label_path
if self.label_path is None:
assert download, "label_path not set and auto download disabled"
label_url = TRAIN_LABEL_URL if mode == 'train' else TEST_LABEL_URL
label_md5 = TRAIN_LABEL_MD5 if mode == 'train' else TEST_LABEL_MD5
self.label_path = _check_exists_and_download(
label_path, label_url, label_md5, 'mnist', download)
self.transform = transform
# read dataset into memory
self._parse_dataset()
def _parse_dataset(self, buffer_size=100):
self.images = []
self.labels = []
with gzip.GzipFile(self.image_path, 'rb') as image_file:
img_buf = image_file.read()
with gzip.GzipFile(self.label_path, 'rb') as label_file:
lab_buf = label_file.read()
step_label = 0
offset_img = 0
# read from Big-endian
# get file info from magic byte
# image file : 16B
magic_byte_img = '>IIII'
magic_img, image_num, rows, cols = struct.unpack_from(
magic_byte_img, img_buf, offset_img)
offset_img += struct.calcsize(magic_byte_img)
offset_lab = 0
# label file : 8B
magic_byte_lab = '>II'
magic_lab, label_num = struct.unpack_from(magic_byte_lab,
lab_buf, offset_lab)
offset_lab += struct.calcsize(magic_byte_lab)
while True:
if step_label >= label_num:
break
fmt_label = '>' + str(buffer_size) + 'B'
labels = struct.unpack_from(fmt_label, lab_buf, offset_lab)
offset_lab += struct.calcsize(fmt_label)
step_label += buffer_size
fmt_images = '>' + str(buffer_size * rows * cols) + 'B'
images_temp = struct.unpack_from(fmt_images, img_buf,
offset_img)
images = np.reshape(images_temp, (buffer_size, rows *
cols)).astype('float32')
offset_img += struct.calcsize(fmt_images)
images = images / 255.0
images = images * 2.0
images = images - 1.0
for i in range(buffer_size):
self.images.append(images[i, :])
self.labels.append(np.array([labels[i]]))
def __getitem__(self, idx):
image, label = self.images[idx], self.labels[idx]
if self.transform is not None:
image, label = self.transform(image, label)
return image, label
def __len__(self):
return len(self.labels)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import paddle.dataset.common
def _check_exists_and_download(path, url, md5, module_name, download=True):
if path and os.path.exists(path):
return path
if download:
return paddle.dataset.common.download(url, module_name, md5)
else:
raise FileNotFoundError(
'{} not exists and auto download disabled'.format(path))
...@@ -23,7 +23,7 @@ import numpy as np ...@@ -23,7 +23,7 @@ import numpy as np
from paddle import fluid from paddle import fluid
from paddle.fluid.layers import collective from paddle.fluid.layers import collective
from paddle.fluid.dygraph.parallel import ParallelEnv, ParallelStrategy from paddle.fluid.dygraph.parallel import ParallelEnv, ParallelStrategy
from paddle.fluid.io import BatchSampler from paddle.io import BatchSampler
_parallel_context_initialized = False _parallel_context_initialized = False
...@@ -39,7 +39,7 @@ class DistributedBatchSampler(BatchSampler): ...@@ -39,7 +39,7 @@ class DistributedBatchSampler(BatchSampler):
Dataset is assumed to be of constant size. Dataset is assumed to be of constant size.
Args: Args:
data_source: this could be a `fluid.io.Dataset` implement data_source: this could be a `paddle.io.Dataset` implement
or other python object which implemented or other python object which implemented
`__len__` for BatchSampler to get sample `__len__` for BatchSampler to get sample
number of data source. number of data source.
......
...@@ -32,7 +32,7 @@ from paddle.fluid.dygraph.parallel import ParallelEnv ...@@ -32,7 +32,7 @@ from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.layers.utils import flatten from paddle.fluid.layers.utils import flatten
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
from paddle.fluid.incubate.fleet.base import role_maker from paddle.fluid.incubate.fleet.base import role_maker
from paddle.fluid.io import DataLoader, Dataset from paddle.io import DataLoader, Dataset
from hapi.distributed import DistributedBatchSampler, _all_gather, prepare_distributed_context, _parallel_context_initialized from hapi.distributed import DistributedBatchSampler, _all_gather, prepare_distributed_context, _parallel_context_initialized
from hapi.metrics import Metric from hapi.metrics import Metric
...@@ -45,6 +45,14 @@ __all__ = [ ...@@ -45,6 +45,14 @@ __all__ = [
def set_device(device): def set_device(device):
"""
Args:
device (str): specify device type, 'cpu' or 'gpu'.
Returns:
fluid.CUDAPlace or fluid.CPUPlace: Created GPU or CPU place.
"""
assert isinstance(device, six.string_types) and device.lower() in ['cpu', 'gpu'], \ assert isinstance(device, six.string_types) and device.lower() in ['cpu', 'gpu'], \
"Expected device in ['cpu', 'gpu'], but got {}".format(device) "Expected device in ['cpu', 'gpu'], but got {}".format(device)
...@@ -117,9 +125,9 @@ class Loss(object): ...@@ -117,9 +125,9 @@ class Loss(object):
def forward(self, outputs, labels): def forward(self, outputs, labels):
raise NotImplementedError() raise NotImplementedError()
def __call__(self, outputs, labels): def __call__(self, outputs, labels=None):
labels = to_list(labels) labels = to_list(labels)
if in_dygraph_mode(): if in_dygraph_mode() and labels:
labels = [to_variable(l) for l in labels] labels = [to_variable(l) for l in labels]
losses = to_list(self.forward(to_list(outputs), labels)) losses = to_list(self.forward(to_list(outputs), labels))
if self.average: if self.average:
...@@ -366,10 +374,27 @@ class StaticGraphAdapter(object): ...@@ -366,10 +374,27 @@ class StaticGraphAdapter(object):
metric_list, metric_splits = flatten_list(endpoints['metric']) metric_list, metric_splits = flatten_list(endpoints['metric'])
fetch_list = endpoints['loss'] + metric_list fetch_list = endpoints['loss'] + metric_list
num_loss = len(endpoints['loss']) num_loss = len(endpoints['loss'])
# if fetch Variable is same as input Variable, do not fetch
# from program, get it from input directly
pruned_fetch_list = []
pruned_fetch_idx_name_map = [""] * len(fetch_list)
for i, fetch_var in enumerate(fetch_list):
if fetch_var.name in feed.keys():
pruned_fetch_idx_name_map[i] = fetch_var.name
else:
pruned_fetch_list.append(fetch_var)
rets = self._executor.run(compiled_prog, rets = self._executor.run(compiled_prog,
feed=feed, feed=feed,
fetch_list=fetch_list, fetch_list=pruned_fetch_list,
return_numpy=False) return_numpy=False)
# restore pruned fetch_list Variable from feeds
for i, name in enumerate(pruned_fetch_idx_name_map):
if len(name) > 0:
rets.insert(i, feed[name])
# LoDTensor cannot be fetch as numpy directly # LoDTensor cannot be fetch as numpy directly
rets = [np.array(v) for v in rets] rets = [np.array(v) for v in rets]
if self.mode == 'test': if self.mode == 'test':
...@@ -867,8 +892,6 @@ class Model(fluid.dygraph.Layer): ...@@ -867,8 +892,6 @@ class Model(fluid.dygraph.Layer):
if not isinstance(inputs, (list, dict, Input)): if not isinstance(inputs, (list, dict, Input)):
raise TypeError( raise TypeError(
"'inputs' must be list or dict in static graph mode") "'inputs' must be list or dict in static graph mode")
if loss_function and not isinstance(labels, (list, Input)):
raise TypeError("'labels' must be list in static graph mode")
metrics = metrics or [] metrics = metrics or []
for metric in to_list(metrics): for metric in to_list(metrics):
...@@ -904,11 +927,11 @@ class Model(fluid.dygraph.Layer): ...@@ -904,11 +927,11 @@ class Model(fluid.dygraph.Layer):
FIXME: add more comments and usage FIXME: add more comments and usage
Args: Args:
train_data (Dataset|DataLoader): An iterable data loader is used for train_data (Dataset|DataLoader): An iterable data loader is used for
train. An instance of paddle.fluid.io.Dataset or train. An instance of paddle paddle.io.Dataset or
paddle.fluid.io.Dataloader is recomended. paddle.io.Dataloader is recomended.
eval_data (Dataset|DataLoader): An iterable data loader is used for eval_data (Dataset|DataLoader): An iterable data loader is used for
evaluation at the end of epoch. If None, will not do evaluation. evaluation at the end of epoch. If None, will not do evaluation.
An instance of paddle.fluid.io.Dataset or paddle.fluid.io.Dataloader An instance of paddle.io.Dataset or paddle.io.Dataloader
is recomended. is recomended.
batch_size (int): Integer number. The batch size of train_data and eval_data. batch_size (int): Integer number. The batch size of train_data and eval_data.
When train_data and eval_data are both the instance of Dataloader, this When train_data and eval_data are both the instance of Dataloader, this
...@@ -1032,8 +1055,8 @@ class Model(fluid.dygraph.Layer): ...@@ -1032,8 +1055,8 @@ class Model(fluid.dygraph.Layer):
FIXME: add more comments and usage FIXME: add more comments and usage
Args: Args:
eval_data (Dataset|DataLoader): An iterable data loader is used for eval_data (Dataset|DataLoader): An iterable data loader is used for
evaluation. An instance of paddle.fluid.io.Dataset or evaluation. An instance of paddle.io.Dataset or
paddle.fluid.io.Dataloader is recomended. paddle.io.Dataloader is recomended.
batch_size (int): Integer number. The batch size of train_data and eval_data. batch_size (int): Integer number. The batch size of train_data and eval_data.
When train_data and eval_data are both the instance of Dataloader, this When train_data and eval_data are both the instance of Dataloader, this
parameter will be ignored. parameter will be ignored.
...@@ -1098,12 +1121,16 @@ class Model(fluid.dygraph.Layer): ...@@ -1098,12 +1121,16 @@ class Model(fluid.dygraph.Layer):
return eval_result return eval_result
def predict(self, test_data, batch_size=1, num_workers=0): def predict(self,
test_data,
batch_size=1,
num_workers=0,
stack_outputs=True):
""" """
FIXME: add more comments and usage FIXME: add more comments and usage
Args: Args:
test_data (Dataset|DataLoader): An iterable data loader is used for test_data (Dataset|DataLoader): An iterable data loader is used for
predict. An instance of paddle.fluid.io.Dataset or paddle.fluid.io.Dataloader predict. An instance of paddle.io.Dataset or paddle.io.Dataloader
is recomended. is recomended.
batch_size (int): Integer number. The batch size of train_data and eval_data. batch_size (int): Integer number. The batch size of train_data and eval_data.
When train_data and eval_data are both the instance of Dataloader, this When train_data and eval_data are both the instance of Dataloader, this
...@@ -1111,6 +1138,12 @@ class Model(fluid.dygraph.Layer): ...@@ -1111,6 +1138,12 @@ class Model(fluid.dygraph.Layer):
num_workers (int): the number of subprocess to load data, 0 for no subprocess num_workers (int): the number of subprocess to load data, 0 for no subprocess
used and loading data in main process. When train_data and eval_data are used and loading data in main process. When train_data and eval_data are
both the instance of Dataloader, this parameter will be ignored. both the instance of Dataloader, this parameter will be ignored.
stack_output (bool): whether stack output field like a batch, as for an output
filed of a sample is in shape [X, Y], test_data contains N samples, predict
output field will be in shape [N, X, Y] if stack_output is True, and will
be a length N list in shape [[X, Y], [X, Y], ....[X, Y]] if stack_outputs
is False. stack_outputs as False is used for LoDTensor output situation,
it is recommended set as True if outputs contains no LoDTensor. Default False
""" """
if fluid.in_dygraph_mode(): if fluid.in_dygraph_mode():
...@@ -1137,19 +1170,16 @@ class Model(fluid.dygraph.Layer): ...@@ -1137,19 +1170,16 @@ class Model(fluid.dygraph.Layer):
if not isinstance(test_loader, Iterable): if not isinstance(test_loader, Iterable):
loader = test_loader() loader = test_loader()
outputs = None outputs = []
for data in tqdm.tqdm(loader): for data in tqdm.tqdm(loader):
if not fluid.in_dygraph_mode(): data = flatten(data)
data = data[0] outputs.append(self.test(data[:len(self._inputs)]))
outs = self.test(*data)
if outputs is None: # NOTE: for lod tensor output, we should not stack outputs
outputs = outs # for stacking may loss its detail info
else: outputs = list(zip(*outputs))
outputs = [ if stack_outputs:
np.vstack([x, outs[i]]) for i, x in enumerate(outputs) outputs = [np.stack(outs, axis=0) for outs in outputs]
]
self._test_dataloader = None self._test_dataloader = None
if test_loader is not None and self._adapter._nranks > 1 \ if test_loader is not None and self._adapter._nranks > 1 \
...@@ -1161,8 +1191,8 @@ class Model(fluid.dygraph.Layer): ...@@ -1161,8 +1191,8 @@ class Model(fluid.dygraph.Layer):
""" """
Args: Args:
eval_data (Dataset|DataLoader|None): An iterable data loader is used for eval_data (Dataset|DataLoader|None): An iterable data loader is used for
eval. An instance of paddle.fluid.io.Dataset or eval. An instance of paddle.io.Dataset or
paddle.fluid.io.Dataloader is recomended. paddle.io.Dataloader is recomended.
""" """
assert isinstance( assert isinstance(
eval_data, eval_data,
......
...@@ -25,7 +25,7 @@ from functools import partial ...@@ -25,7 +25,7 @@ from functools import partial
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.io import BatchSampler, DataLoader, Dataset from paddle.io import BatchSampler, DataLoader, Dataset
from hapi.distributed import DistributedBatchSampler from hapi.distributed import DistributedBatchSampler
from hapi.text.bert.data_processor import DataProcessor, XnliProcessor, ColaProcessor, MrpcProcessor, MnliProcessor from hapi.text.bert.data_processor import DataProcessor, XnliProcessor, ColaProcessor, MrpcProcessor, MnliProcessor
from hapi.text.bert.batching import prepare_batch_data from hapi.text.bert.batching import prepare_batch_data
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import models
from . import transforms
__all__ = ["models", "transforms"]
...@@ -19,6 +19,7 @@ from . import mobilenetv2 ...@@ -19,6 +19,7 @@ from . import mobilenetv2
from . import darknet from . import darknet
from . import yolov3 from . import yolov3
from . import tsm from . import tsm
from . import bmn
from .resnet import * from .resnet import *
from .mobilenetv1 import * from .mobilenetv1 import *
...@@ -27,6 +28,7 @@ from .vgg import * ...@@ -27,6 +28,7 @@ from .vgg import *
from .darknet import * from .darknet import *
from .yolov3 import * from .yolov3 import *
from .tsm import * from .tsm import *
from .bmn import *
__all__ = resnet.__all__ \ __all__ = resnet.__all__ \
+ vgg.__all__ \ + vgg.__all__ \
...@@ -34,4 +36,5 @@ __all__ = resnet.__all__ \ ...@@ -34,4 +36,5 @@ __all__ = resnet.__all__ \
+ mobilenetv2.__all__ \ + mobilenetv2.__all__ \
+ darknet.__all__ \ + darknet.__all__ \
+ yolov3.__all__ \ + yolov3.__all__ \
+ tsm.__all__ + tsm.__all__ \
+ bmn.__all__
...@@ -17,12 +17,68 @@ from paddle.fluid import ParamAttr ...@@ -17,12 +17,68 @@ from paddle.fluid import ParamAttr
import numpy as np import numpy as np
import math import math
from bmn_utils import get_interp1d_mask from hapi.model import Model, Loss
from model import Model, Loss
__all__ = ["BMN", "BmnLoss"]
DATATYPE = 'float32' DATATYPE = 'float32'
def _get_interp1d_bin_mask(seg_xmin, seg_xmax, tscale, num_sample,
num_sample_perbin):
""" generate sample mask for a boundary-matching pair """
plen = float(seg_xmax - seg_xmin)
plen_sample = plen / (num_sample * num_sample_perbin - 1.0)
total_samples = [
seg_xmin + plen_sample * ii
for ii in range(num_sample * num_sample_perbin)
]
p_mask = []
for idx in range(num_sample):
bin_samples = total_samples[idx * num_sample_perbin:(idx + 1) *
num_sample_perbin]
bin_vector = np.zeros([tscale])
for sample in bin_samples:
sample_upper = math.ceil(sample)
sample_decimal, sample_down = math.modf(sample)
if int(sample_down) <= (tscale - 1) and int(sample_down) >= 0:
bin_vector[int(sample_down)] += 1 - sample_decimal
if int(sample_upper) <= (tscale - 1) and int(sample_upper) >= 0:
bin_vector[int(sample_upper)] += sample_decimal
bin_vector = 1.0 / num_sample_perbin * bin_vector
p_mask.append(bin_vector)
p_mask = np.stack(p_mask, axis=1)
return p_mask
def get_interp1d_mask(tscale, dscale, prop_boundary_ratio, num_sample,
num_sample_perbin):
""" generate sample mask for each point in Boundary-Matching Map """
mask_mat = []
for start_index in range(tscale):
mask_mat_vector = []
for duration_index in range(dscale):
if start_index + duration_index < tscale:
p_xmin = start_index
p_xmax = start_index + duration_index
center_len = float(p_xmax - p_xmin) + 1
sample_xmin = p_xmin - center_len * prop_boundary_ratio
sample_xmax = p_xmax + center_len * prop_boundary_ratio
p_mask = _get_interp1d_bin_mask(sample_xmin, sample_xmax,
tscale, num_sample,
num_sample_perbin)
else:
p_mask = np.zeros([tscale, num_sample])
mask_mat_vector.append(p_mask)
mask_mat_vector = np.stack(mask_mat_vector, axis=2)
mask_mat.append(mask_mat_vector)
mask_mat = np.stack(mask_mat, axis=3)
mask_mat = mask_mat.astype(np.float32)
sample_mask = np.reshape(mask_mat, [tscale, -1])
return sample_mask
# Net # Net
class Conv1D(fluid.dygraph.Layer): class Conv1D(fluid.dygraph.Layer):
def __init__(self, def __init__(self,
......
...@@ -18,10 +18,10 @@ from paddle.fluid.regularizer import L2Decay ...@@ -18,10 +18,10 @@ from paddle.fluid.regularizer import L2Decay
from paddle.fluid.dygraph.nn import Conv2D, BatchNorm from paddle.fluid.dygraph.nn import Conv2D, BatchNorm
from model import Model from hapi.model import Model
from .download import get_weights_path from hapi.download import get_weights_path
__all__ = ['DarkNet53', 'ConvBNLayer', 'darknet53'] __all__ = ['DarkNet', 'ConvBNLayer', 'darknet53']
# {num_layers: (url, md5)} # {num_layers: (url, md5)}
pretrain_infos = { pretrain_infos = {
...@@ -136,9 +136,17 @@ class LayerWarp(fluid.dygraph.Layer): ...@@ -136,9 +136,17 @@ class LayerWarp(fluid.dygraph.Layer):
DarkNet_cfg = {53: ([1, 2, 8, 8, 4])} DarkNet_cfg = {53: ([1, 2, 8, 8, 4])}
class DarkNet53(Model): class DarkNet(Model):
"""DarkNet model from
`"YOLOv3: An Incremental Improvement" <https://arxiv.org/abs/1804.02767>`_
Args:
num_layers (int): layer number of DarkNet, only 53 supported currently, default: 53.
ch_in (int): channel number of input data, default 3.
"""
def __init__(self, num_layers=53, ch_in=3): def __init__(self, num_layers=53, ch_in=3):
super(DarkNet53, self).__init__() super(DarkNet, self).__init__()
assert num_layers in DarkNet_cfg.keys(), \ assert num_layers in DarkNet_cfg.keys(), \
"only support num_layers in {} currently" \ "only support num_layers in {} currently" \
.format(DarkNet_cfg.keys()) .format(DarkNet_cfg.keys())
...@@ -188,7 +196,7 @@ class DarkNet53(Model): ...@@ -188,7 +196,7 @@ class DarkNet53(Model):
def _darknet(num_layers=53, input_channels=3, pretrained=True): def _darknet(num_layers=53, input_channels=3, pretrained=True):
model = DarkNet53(num_layers, input_channels) model = DarkNet(num_layers, input_channels)
if pretrained: if pretrained:
assert num_layers in pretrain_infos.keys(), \ assert num_layers in pretrain_infos.keys(), \
"DarkNet{} do not have pretrained weights now, " \ "DarkNet{} do not have pretrained weights now, " \
...@@ -201,4 +209,11 @@ def _darknet(num_layers=53, input_channels=3, pretrained=True): ...@@ -201,4 +209,11 @@ def _darknet(num_layers=53, input_channels=3, pretrained=True):
def darknet53(input_channels=3, pretrained=True): def darknet53(input_channels=3, pretrained=True):
"""DarkNet 53-layer model
Args:
input_channels (bool): channel number of input data, default 3.
pretrained (bool): If True, returns a model pre-trained on ImageNet,
default True.
"""
return _darknet(53, input_channels, pretrained) return _darknet(53, input_channels, pretrained)
...@@ -19,8 +19,8 @@ from paddle.fluid.initializer import MSRA ...@@ -19,8 +19,8 @@ from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from model import Model from hapi.model import Model
from .download import get_weights_path from hapi.download import get_weights_path
__all__ = ['MobileNetV1', 'mobilenet_v1'] __all__ = ['MobileNetV1', 'mobilenet_v1']
......
...@@ -18,8 +18,8 @@ import paddle.fluid as fluid ...@@ -18,8 +18,8 @@ import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from model import Model from hapi.model import Model
from .download import get_weights_path from hapi.download import get_weights_path
__all__ = ['MobileNetV2', 'mobilenet_v2'] __all__ = ['MobileNetV2', 'mobilenet_v2']
......
...@@ -22,8 +22,8 @@ from paddle.fluid.layer_helper import LayerHelper ...@@ -22,8 +22,8 @@ from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from paddle.fluid.dygraph.container import Sequential from paddle.fluid.dygraph.container import Sequential
from model import Model from hapi.model import Model
from .download import get_weights_path from hapi.download import get_weights_path
__all__ = [ __all__ = [
'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152' 'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'
......
...@@ -17,8 +17,8 @@ import paddle.fluid as fluid ...@@ -17,8 +17,8 @@ import paddle.fluid as fluid
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from model import Model from hapi.model import Model
from .download import get_weights_path from hapi.download import get_weights_path
__all__ = ["TSM_ResNet", "tsm_resnet50"] __all__ = ["TSM_ResNet", "tsm_resnet50"]
...@@ -201,4 +201,12 @@ def _tsm_resnet(num_layers, seg_num=8, num_classes=400, pretrained=True): ...@@ -201,4 +201,12 @@ def _tsm_resnet(num_layers, seg_num=8, num_classes=400, pretrained=True):
def tsm_resnet50(seg_num=8, num_classes=400, pretrained=True): def tsm_resnet50(seg_num=8, num_classes=400, pretrained=True):
"""TSM model with 50-layer ResNet as backbone
Args:
seg_num (int): segment number of each video sample. Default 8.
num_classes (int): video class number. Default 400.
pretrained (bool): If True, returns a model with pre-trained model
on COCO, default True
"""
return _tsm_resnet(50, seg_num, num_classes, pretrained) return _tsm_resnet(50, seg_num, num_classes, pretrained)
...@@ -17,8 +17,8 @@ import paddle.fluid as fluid ...@@ -17,8 +17,8 @@ import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from paddle.fluid.dygraph.container import Sequential from paddle.fluid.dygraph.container import Sequential
from model import Model from hapi.model import Model
from .download import get_weights_path from hapi.download import get_weights_path
__all__ = [ __all__ = [
'VGG', 'VGG',
......
...@@ -20,9 +20,9 @@ from paddle.fluid.dygraph.nn import Conv2D ...@@ -20,9 +20,9 @@ from paddle.fluid.dygraph.nn import Conv2D
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay from paddle.fluid.regularizer import L2Decay
from model import Model, Loss from hapi.model import Model, Loss
from hapi.download import get_weights_path
from .darknet import darknet53, ConvBNLayer from .darknet import darknet53, ConvBNLayer
from .download import get_weights_path
__all__ = ['YoloLoss', 'YOLOv3', 'yolov3_darknet53'] __all__ = ['YoloLoss', 'YOLOv3', 'yolov3_darknet53']
...@@ -88,6 +88,20 @@ class YoloDetectionBlock(fluid.dygraph.Layer): ...@@ -88,6 +88,20 @@ class YoloDetectionBlock(fluid.dygraph.Layer):
class YOLOv3(Model): class YOLOv3(Model):
"""YOLOv3 model from
`"YOLOv3: An Incremental Improvement" <https://arxiv.org/abs/1804.02767>`_
Args:
num_classes (int): class number, default 80.
model_mode (str): 'train', 'eval', 'test' mode, network structure
will be diffrent in the output layer and data, in 'train' mode,
no output layer append, in 'eval' and 'test', output feature
map will be decode to predictions by 'fluid.layers.yolo_box',
in 'eval' mode, return feature maps and predictions, in 'test'
mode, only return predictions. Default 'train'.
"""
def __init__(self, num_classes=80, model_mode='train'): def __init__(self, num_classes=80, model_mode='train'):
super(YOLOv3, self).__init__() super(YOLOv3, self).__init__()
self.num_classes = num_classes self.num_classes = num_classes
...@@ -245,4 +259,17 @@ def _yolov3_darknet(num_layers=53, num_classes=80, ...@@ -245,4 +259,17 @@ def _yolov3_darknet(num_layers=53, num_classes=80,
def yolov3_darknet53(num_classes=80, model_mode='train', pretrained=True): def yolov3_darknet53(num_classes=80, model_mode='train', pretrained=True):
"""YOLOv3 model with 53-layer DarkNet as backbone
Args:
num_classes (int): class number, default 80.
model_mode (str): 'train', 'eval', 'test' mode, network structure
will be diffrent in the output layer and data, in 'train' mode,
no output layer append, in 'eval' and 'test', output feature
map will be decode to predictions by 'fluid.layers.yolo_box',
in 'eval' mode, return feature maps and predictions, in 'test'
mode, only return predictions. Default 'train'.
pretrained (bool): If True, returns a model with pre-trained model
on COCO, default True
"""
return _yolov3_darknet(53, num_classes, model_mode, pretrained) return _yolov3_darknet(53, num_classes, model_mode, pretrained)
...@@ -13,3 +13,5 @@ ...@@ -13,3 +13,5 @@
# limitations under the License. # limitations under the License.
from .transforms import * from .transforms import *
from .functional import *
from .detection_transforms import *
...@@ -19,48 +19,18 @@ import cv2 ...@@ -19,48 +19,18 @@ import cv2
import traceback import traceback
import numpy as np import numpy as np
import logging __all__ = [
logger = logging.getLogger(__name__) 'ColorDistort',
'RandomExpand',
__all__ = ['ColorDistort', 'RandomExpand', 'RandomCrop', 'RandomFlip', 'RandomCrop',
'NormalizeBox', 'PadBox', 'RandomShape', 'NormalizeImage', 'RandomFlip',
'BboxXYXY2XYWH', 'ResizeImage', 'Compose', 'BatchCompose'] 'NormalizeBox',
'PadBox',
'RandomShape',
class Compose(object): 'NormalizeImage',
def __init__(self, transforms=[]): 'BboxXYXY2XYWH',
self.transforms = transforms 'ResizeImage',
]
def __call__(self, *data):
for f in self.transforms:
try:
data = f(*data)
except Exception as e:
stack_info = traceback.format_exc()
logger.info("fail to perform transform [{}] with error: "
"{} and stack:\n{}".format(f, e, str(stack_info)))
raise e
return data
class BatchCompose(object):
def __init__(self, transforms=[]):
self.transforms = transforms
def __call__(self, data):
for f in self.transforms:
try:
data = f(data)
except Exception as e:
stack_info = traceback.format_exc()
logger.info("fail to perform batch transform [{}] with error: "
"{} and stack:\n{}".format(f, e, str(stack_info)))
raise e
# sample list to batch data
batch = list(zip(*data))
return batch
class ColorDistort(object): class ColorDistort(object):
......
...@@ -24,6 +24,7 @@ import numbers ...@@ -24,6 +24,7 @@ import numbers
import types import types
import collections import collections
import warnings import warnings
import traceback
from . import functional as F from . import functional as F
...@@ -34,6 +35,7 @@ else: ...@@ -34,6 +35,7 @@ else:
__all__ = [ __all__ = [
"Compose", "Compose",
"BatchCompose",
"Resize", "Resize",
"RandomResizedCrop", "RandomResizedCrop",
"CenterCropResize", "CenterCropResize",
...@@ -62,10 +64,16 @@ class Compose(object): ...@@ -62,10 +64,16 @@ class Compose(object):
def __init__(self, transforms): def __init__(self, transforms):
self.transforms = transforms self.transforms = transforms
def __call__(self, img): def __call__(self, *data):
for t in self.transforms: for f in self.transforms:
img = t(img) try:
return img data = f(*data)
except Exception as e:
stack_info = traceback.format_exc()
print("fail to perform transform [{}] with error: "
"{} and stack:\n{}".format(f, e, str(stack_info)))
raise e
return data
def __repr__(self): def __repr__(self):
format_string = self.__class__.__name__ + '(' format_string = self.__class__.__name__ + '('
...@@ -76,6 +84,33 @@ class Compose(object): ...@@ -76,6 +84,33 @@ class Compose(object):
return format_string return format_string
class BatchCompose(object):
"""Composes several batch transforms together
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
these transforms perform on batch data.
"""
def __init__(self, transforms=[]):
self.transforms = transforms
def __call__(self, data):
for f in self.transforms:
try:
data = f(data)
except Exception as e:
stack_info = traceback.format_exc()
print("fail to perform batch transform [{}] with error: "
"{} and stack:\n{}".format(f, e, str(stack_info)))
raise e
# sample list to batch data
batch = list(zip(*data))
return batch
class Resize(object): class Resize(object):
"""Resize the input PIL Image to the given size. """Resize the input PIL Image to the given size.
...@@ -94,7 +129,7 @@ class Resize(object): ...@@ -94,7 +129,7 @@ class Resize(object):
self.size = size self.size = size
self.interpolation = interpolation self.interpolation = interpolation
def __call__(self, img): def __call__(self, img, lbl):
""" """
Args: Args:
img (PIL Image): Image to be scaled. img (PIL Image): Image to be scaled.
...@@ -102,7 +137,7 @@ class Resize(object): ...@@ -102,7 +137,7 @@ class Resize(object):
Returns: Returns:
PIL Image: Rescaled image. PIL Image: Rescaled image.
""" """
return F.resize(img, self.size, self.interpolation) return F.resize(img, self.size, self.interpolation), lbl
class RandomResizedCrop(object): class RandomResizedCrop(object):
...@@ -164,10 +199,10 @@ class RandomResizedCrop(object): ...@@ -164,10 +199,10 @@ class RandomResizedCrop(object):
y = (height - h) // 2 y = (height - h) // 2
return x, y, w, h return x, y, w, h
def __call__(self, img): def __call__(self, img, lbl):
x, y, w, h = self._get_params(img) x, y, w, h = self._get_params(img)
cropped_img = img[y:y + h, x:x + w] cropped_img = img[y:y + h, x:x + w]
return F.resize(cropped_img, self.output_size, self.interpolation) return F.resize(cropped_img, self.output_size, self.interpolation), lbl
class CenterCropResize(object): class CenterCropResize(object):
...@@ -195,10 +230,10 @@ class CenterCropResize(object): ...@@ -195,10 +230,10 @@ class CenterCropResize(object):
y = (w + 1 - c) // 2 y = (w + 1 - c) // 2
return c, x, y return c, x, y
def __call__(self, img): def __call__(self, img, lbl):
c, x, y = self._get_params(img) c, x, y = self._get_params(img)
cropped_img = img[x:x + c, y:y + c, :] cropped_img = img[x:x + c, y:y + c, :]
return F.resize(cropped_img, self.size, self.interpolation) return F.resize(cropped_img, self.size, self.interpolation), lbl
class CenterCrop(object): class CenterCrop(object):
...@@ -222,10 +257,10 @@ class CenterCrop(object): ...@@ -222,10 +257,10 @@ class CenterCrop(object):
y = int(round((h - th) / 2.0)) y = int(round((h - th) / 2.0))
return x, y return x, y
def __call__(self, img): def __call__(self, img, lbl):
x, y = self._get_params(img) x, y = self._get_params(img)
th, tw = self.output_size th, tw = self.output_size
return img[y:y + th, x:x + tw] return img[y:y + th, x:x + tw], lbl
class RandomHorizontalFlip(object): class RandomHorizontalFlip(object):
...@@ -238,10 +273,10 @@ class RandomHorizontalFlip(object): ...@@ -238,10 +273,10 @@ class RandomHorizontalFlip(object):
def __init__(self, prob=0.5): def __init__(self, prob=0.5):
self.prob = prob self.prob = prob
def __call__(self, img): def __call__(self, img, lbl):
if np.random.random() < self.prob: if np.random.random() < self.prob:
return F.flip(img, code=1) return F.flip(img, code=1), lbl
return img return img, lbl
class RandomVerticalFlip(object): class RandomVerticalFlip(object):
...@@ -254,10 +289,10 @@ class RandomVerticalFlip(object): ...@@ -254,10 +289,10 @@ class RandomVerticalFlip(object):
def __init__(self, prob=0.5): def __init__(self, prob=0.5):
self.prob = prob self.prob = prob
def __call__(self, img): def __call__(self, img, lbl):
if np.random.random() < self.prob: if np.random.random() < self.prob:
return F.flip(img, code=0) return F.flip(img, code=0), lbl
return img return img, lbl
class Normalize(object): class Normalize(object):
...@@ -282,8 +317,8 @@ class Normalize(object): ...@@ -282,8 +317,8 @@ class Normalize(object):
self.mean = np.array(mean, dtype=np.float32).reshape(len(mean), 1, 1) self.mean = np.array(mean, dtype=np.float32).reshape(len(mean), 1, 1)
self.std = np.array(std, dtype=np.float32).reshape(len(std), 1, 1) self.std = np.array(std, dtype=np.float32).reshape(len(std), 1, 1)
def __call__(self, img): def __call__(self, img, lbl):
return (img - self.mean) / self.std return (img - self.mean) / self.std, lbl
class Permute(object): class Permute(object):
...@@ -302,10 +337,10 @@ class Permute(object): ...@@ -302,10 +337,10 @@ class Permute(object):
], "Only support 'CHW' mode, but received mode: {}".format(mode) ], "Only support 'CHW' mode, but received mode: {}".format(mode)
self.mode = mode self.mode = mode
def __call__(self, img): def __call__(self, img, lbl):
if self.mode == "CHW": if self.mode == "CHW":
return img.transpose((2, 0, 1))[::-1, ...] return img.transpose((2, 0, 1))[::-1, ...], lbl
return img return img, lbl
class GaussianNoise(object): class GaussianNoise(object):
...@@ -321,11 +356,11 @@ class GaussianNoise(object): ...@@ -321,11 +356,11 @@ class GaussianNoise(object):
self.mean = np.array(mean, dtype=np.float32) self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32) self.std = np.array(std, dtype=np.float32)
def __call__(self, img): def __call__(self, img, lbl):
dtype = img.dtype dtype = img.dtype
noise = np.random.normal(self.mean, self.std, img.shape) * 255 noise = np.random.normal(self.mean, self.std, img.shape) * 255
img = img + noise.astype(np.float32) img = img + noise.astype(np.float32)
return np.clip(img, 0, 255).astype(dtype) return np.clip(img, 0, 255).astype(dtype), lbl
class BrightnessTransform(object): class BrightnessTransform(object):
...@@ -341,15 +376,15 @@ class BrightnessTransform(object): ...@@ -341,15 +376,15 @@ class BrightnessTransform(object):
raise ValueError("brightness value should be non-negative") raise ValueError("brightness value should be non-negative")
self.value = value self.value = value
def __call__(self, img): def __call__(self, img, lbl):
if self.value == 0: if self.value == 0:
return img return img, lbl
dtype = img.dtype dtype = img.dtype
img = img.astype(np.float32) img = img.astype(np.float32)
alpha = np.random.uniform(max(0, 1 - self.value), 1 + self.value) alpha = np.random.uniform(max(0, 1 - self.value), 1 + self.value)
img = img * alpha img = img * alpha
return img.clip(0, 255).astype(dtype) return img.clip(0, 255).astype(dtype), lbl
class ContrastTransform(object): class ContrastTransform(object):
...@@ -365,16 +400,16 @@ class ContrastTransform(object): ...@@ -365,16 +400,16 @@ class ContrastTransform(object):
raise ValueError("contrast value should be non-negative") raise ValueError("contrast value should be non-negative")
self.value = value self.value = value
def __call__(self, img): def __call__(self, img, lbl):
if self.value == 0: if self.value == 0:
return img return img, lbl
dtype = img.dtype dtype = img.dtype
img = img.astype(np.float32) img = img.astype(np.float32)
alpha = np.random.uniform(max(0, 1 - self.value), 1 + self.value) alpha = np.random.uniform(max(0, 1 - self.value), 1 + self.value)
img = img * alpha + cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).mean() * ( img = img * alpha + cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).mean() * (
1 - alpha) 1 - alpha)
return img.clip(0, 255).astype(dtype) return img.clip(0, 255).astype(dtype), lbl
class SaturationTransform(object): class SaturationTransform(object):
...@@ -390,9 +425,9 @@ class SaturationTransform(object): ...@@ -390,9 +425,9 @@ class SaturationTransform(object):
raise ValueError("saturation value should be non-negative") raise ValueError("saturation value should be non-negative")
self.value = value self.value = value
def __call__(self, img): def __call__(self, img, lbl):
if self.value == 0: if self.value == 0:
return img return img, lbl
dtype = img.dtype dtype = img.dtype
img = img.astype(np.float32) img = img.astype(np.float32)
...@@ -400,7 +435,7 @@ class SaturationTransform(object): ...@@ -400,7 +435,7 @@ class SaturationTransform(object):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray_img = gray_img[..., np.newaxis] gray_img = gray_img[..., np.newaxis]
img = img * alpha + gray_img * (1 - alpha) img = img * alpha + gray_img * (1 - alpha)
return img.clip(0, 255).astype(dtype) return img.clip(0, 255).astype(dtype), lbl
class HueTransform(object): class HueTransform(object):
...@@ -416,9 +451,9 @@ class HueTransform(object): ...@@ -416,9 +451,9 @@ class HueTransform(object):
raise ValueError("hue value should be in [0.0, 0.5]") raise ValueError("hue value should be in [0.0, 0.5]")
self.value = value self.value = value
def __call__(self, img): def __call__(self, img, lbl):
if self.value == 0: if self.value == 0:
return img return img, lbl
dtype = img.dtype dtype = img.dtype
img = img.astype(np.uint8) img = img.astype(np.uint8)
...@@ -431,7 +466,7 @@ class HueTransform(object): ...@@ -431,7 +466,7 @@ class HueTransform(object):
with np.errstate(over="ignore"): with np.errstate(over="ignore"):
h += np.uint8(alpha * 255) h += np.uint8(alpha * 255)
hsv_img = cv2.merge([h, s, v]) hsv_img = cv2.merge([h, s, v])
return cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR_FULL).astype(dtype) return cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR_FULL).astype(dtype), lbl
class ColorJitter(object): class ColorJitter(object):
...@@ -466,5 +501,5 @@ class ColorJitter(object): ...@@ -466,5 +501,5 @@ class ColorJitter(object):
random.shuffle(transforms) random.shuffle(transforms)
self.transforms = Compose(transforms) self.transforms = Compose(transforms)
def __call__(self, img): def __call__(self, img, lbl):
return self.transforms(img) return self.transforms(img, lbl)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import six
import abc
import numpy as np
import paddle.fluid as fluid
import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
__all__ = ['Metric', 'Accuracy']
@six.add_metaclass(abc.ABCMeta)
class Metric(object):
"""
Base class for metric, encapsulates metric logic and APIs
Usage:
m = SomeMetric()
for prediction, label in ...:
m.update(prediction, label)
m.accumulate()
"""
@abc.abstractmethod
def reset(self):
"""
Reset states and result
"""
raise NotImplementedError("function 'reset' not implemented in {}.".
format(self.__class__.__name__))
@abc.abstractmethod
def update(self, *args, **kwargs):
"""
Update states for metric
"""
raise NotImplementedError("function 'update' not implemented in {}.".
format(self.__class__.__name__))
@abc.abstractmethod
def accumulate(self):
"""
Accumulates statistics, computes and returns the metric value
"""
raise NotImplementedError(
"function 'accumulate' not implemented in {}.".format(
self.__class__.__name__))
@abc.abstractmethod
def name(self):
"""
Returns metric name
"""
raise NotImplementedError("function 'name' not implemented in {}.".
format(self.__class__.__name__))
def add_metric_op(self, pred, label):
"""
Add process op for metric in program
"""
return pred, label
class Accuracy(Metric):
"""
Encapsulates accuracy metric logic
"""
def __init__(self, topk=(1, ), name=None, *args, **kwargs):
super(Accuracy, self).__init__(*args, **kwargs)
self.topk = topk
self.maxk = max(topk)
self._init_name(name)
self.reset()
def add_metric_op(self, pred, label, *args, **kwargs):
pred = fluid.layers.argsort(pred[0], descending=True)[1][:, :self.maxk]
correct = pred == label[0]
return correct
def update(self, correct, *args, **kwargs):
accs = []
for i, k in enumerate(self.topk):
num_corrects = correct[:, :k].sum()
num_samples = len(correct)
accs.append(float(num_corrects) / num_samples)
self.total[i] += num_corrects
self.count[i] += num_samples
return accs
def reset(self):
self.total = [0.] * len(self.topk)
self.count = [0] * len(self.topk)
def accumulate(self):
res = []
for t, c in zip(self.total, self.count):
res.append(float(t) / c)
return res
def _init_name(self, name):
name = name or 'acc'
if self.maxk != 1:
self._name = ['{}_top{}'.format(name, k) for k in self.topk]
else:
self._name = ['acc']
def name(self):
return self._name
...@@ -24,7 +24,7 @@ import numpy as np ...@@ -24,7 +24,7 @@ import numpy as np
from paddle import fluid from paddle import fluid
from paddle.fluid.optimizer import Momentum from paddle.fluid.optimizer import Momentum
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from paddle.fluid.io import MNIST as MnistDataset from vision.datasets import MNIST as MnistDataset
from model import Model, CrossEntropy, Input, set_device from model import Model, CrossEntropy, Input, set_device
from metrics import Accuracy from metrics import Accuracy
......
此差异已折叠。
import sys
import time
import numpy as np
class ProgressBar(object):
"""progress bar """
def __init__(self,
num=None,
width=30,
verbose=1,
start=True,
file=sys.stdout):
self._num = num
if isinstance(num, int) and num <= 0:
raise TypeError('num should be None or integer (> 0)')
max_width = self._get_max_width()
self._width = width if width <= max_width else max_width
self._total_width = 0
self._verbose = verbose
self.file = file
self._values = {}
self._values_order = []
if start:
self._start = time.time()
self._last_update = 0
self._dynamic_display = (
(hasattr(self.file, 'isatty') and
self.file.isatty()) or 'ipykernel' in sys.modules or
'posix' in sys.modules or 'PYCHARM_HOSTED' in os.environ)
def _get_max_width(self):
if sys.version_info > (3, 3):
from shutil import get_terminal_size
else:
from backports.shutil_get_terminal_size import get_terminal_size
terminal_width, _ = get_terminal_size()
max_width = min(int(terminal_width * 0.6), terminal_width - 50)
return max_width
def start(self):
self.file.flush()
self._start = time.time()
def update(self, current_num, values=None):
now = time.time()
if current_num:
time_per_unit = (now - self._start) / current_num
else:
time_per_unit = 0
if time_per_unit >= 1 or time_per_unit == 0:
fps = ' - %.0fs/%s' % (time_per_unit, 'step')
elif time_per_unit >= 1e-3:
fps = ' - %.0fms/%s' % (time_per_unit * 1e3, 'step')
else:
fps = ' - %.0fus/%s' % (time_per_unit * 1e6, 'step')
info = ''
if self._verbose == 1:
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self._num is not None:
numdigits = int(np.log10(self._num)) + 1
bar_chars = ('step %' + str(numdigits) + 'd/%d [') % (
current_num, self._num)
prog = float(current_num) / self._num
prog_width = int(self._width * prog)
if prog_width > 0:
bar_chars += ('=' * (prog_width - 1))
if current_num < self._num:
bar_chars += '>'
else:
bar_chars += '='
bar_chars += ('.' * (self._width - prog_width))
bar_chars += ']'
else:
bar_chars = 'step %3d' % current_num
self._total_width = len(bar_chars)
sys.stdout.write(bar_chars)
for k, val in values:
info += ' - %s:' % k
val = val if isinstance(val, list) else [val]
for i, v in enumerate(val):
if isinstance(v, (float, np.float32, np.float64)):
if abs(v) > 1e-3:
info += ' %.4f' % v
else:
info += ' %.4e' % v
else:
info += ' %s' % v
if self._num is not None and current_num < self._num:
eta = time_per_unit * (self._num - current_num)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600, (eta % 3600) //
60, eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info += ' - ETA: %s' % eta_format
info += fps
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
# newline for another epoch
if self._num is not None and current_num >= self._num:
info += '\n'
if self._num is None:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
elif self._verbose == 2:
if self._num:
numdigits = int(np.log10(self._num)) + 1
count = ('step %' + str(numdigits) + 'd/%d') % (current_num,
self._num)
else:
count = 'step %3d' % current_num
info = count + info
for k, val in values:
info += ' - %s:' % k
val = val if isinstance(val, list) else [val]
for v in val:
if isinstance(v, (float, np.float32, np.float64)):
if abs(v) > 1e-3:
info += ' %.4f' % v
else:
info += ' %.4e' % v
elif isinstance(v, np.ndarray) and \
v.size == 1 and \
isinstance(v.dtype, (np.float32, np.float64)):
if abs(v[0]) > 1e-3:
info += ' %.4f' % v[0]
else:
info += ' %.4e' % v[0]
else:
info += ' %s' % v
info += fps
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
...@@ -168,13 +168,13 @@ def create_lexnet_data_generator(args, reader, file_name, place, mode="train"): ...@@ -168,13 +168,13 @@ def create_lexnet_data_generator(args, reader, file_name, place, mode="train"):
def create_dataloader(generator, place, feed_list=None): def create_dataloader(generator, place, feed_list=None):
if not feed_list: if not feed_list:
data_loader = fluid.io.DataLoader.from_generator( data_loader = paddle.io.DataLoader.from_generator(
capacity=50, capacity=50,
use_double_buffer=True, use_double_buffer=True,
iterable=True, iterable=True,
return_list=True) return_list=True)
else: else:
data_loader = fluid.io.DataLoader.from_generator( data_loader = paddle.io.DataLoader.from_generator(
feed_list=feed_list, feed_list=feed_list,
capacity=50, capacity=50,
use_double_buffer=True, use_double_buffer=True,
......
...@@ -31,15 +31,26 @@ setuptools.setup( ...@@ -31,15 +31,26 @@ setuptools.setup(
description="A Paddle High-level API that supports both static and dynamic execution modes (still under development)", description="A Paddle High-level API that supports both static and dynamic execution modes (still under development)",
url="https://github.com/PaddlePaddle/hapi", url="https://github.com/PaddlePaddle/hapi",
packages=[ packages=[
'hapi', 'hapi.text', 'hapi.text.tokenizer', 'hapi.text.bert', 'hapi',
'hapi.text.bert.utils' 'hapi.datasets',
'hapi.text',
'hapi.text.tokenizer',
'hapi.text.bert',
'hapi.text.bert.utils',
'hapi.vision',
'hapi.vision.models',
'hapi.vision.transforms',
], ],
package_dir={ package_dir={
'hapi': './hapi', 'hapi': './hapi',
'hapi.datasets': './hapi/datasets',
'hapi.text': './hapi/text', 'hapi.text': './hapi/text',
'hapi.text.tokenizer': './hapi/text/tokenizer', 'hapi.text.tokenizer': './hapi/text/tokenizer',
'hapi.text.bert': './hapi/text/bert', 'hapi.text.bert': './hapi/text/bert',
'hapi.text.bert.utils': './hapi/text/bert/utils', 'hapi.text.bert.utils': './hapi/text/bert/utils',
'hapi.vision': './hapi/vision',
'hapi.vision.models': './hapi/vision/models',
'hapi.vision.transforms': './hapi/vision/transforms',
}, },
platforms="any", platforms="any",
license='Apache 2.0', license='Apache 2.0',
......
...@@ -18,7 +18,7 @@ import unittest ...@@ -18,7 +18,7 @@ import unittest
import time import time
import random import random
from callbacks import config_callbacks from hapi.callbacks import config_callbacks
class TestCallbacks(unittest.TestCase): class TestCallbacks(unittest.TestCase):
......
...@@ -14,14 +14,16 @@ ...@@ -14,14 +14,16 @@
# when test, you should add hapi root path to the PYTHONPATH, # when test, you should add hapi root path to the PYTHONPATH,
# export PYTHONPATH=PATH_TO_HAPI:$PYTHONPATH # export PYTHONPATH=PATH_TO_HAPI:$PYTHONPATH
import unittest import unittest
import numpy as np
from datasets.folder import DatasetFolder from hapi.datasets import *
class TestFolderDatasets(unittest.TestCase): class TestFolderDatasets(unittest.TestCase):
def test_dataset(self): def test_dataset(self):
dataset_folder = DatasetFolder('test_data') dataset_folder = DatasetFolder('tests/test_data')
for _ in dataset_folder: for _ in dataset_folder:
pass pass
...@@ -30,5 +32,71 @@ class TestFolderDatasets(unittest.TestCase): ...@@ -30,5 +32,71 @@ class TestFolderDatasets(unittest.TestCase):
assert len(dataset_folder.classes) == 2 assert len(dataset_folder.classes) == 2
class TestMNISTTest(unittest.TestCase):
def test_main(self):
mnist = MNIST(mode='test')
self.assertTrue(len(mnist) == 10000)
for i in range(len(mnist)):
image, label = mnist[i]
self.assertTrue(image.shape[0] == 784)
self.assertTrue(label.shape[0] == 1)
self.assertTrue(0 <= int(label) <= 9)
class TestMNISTTrain(unittest.TestCase):
def test_main(self):
mnist = MNIST(mode='train')
self.assertTrue(len(mnist) == 60000)
for i in range(len(mnist)):
image, label = mnist[i]
self.assertTrue(image.shape[0] == 784)
self.assertTrue(label.shape[0] == 1)
self.assertTrue(0 <= int(label) <= 9)
class TestFlowersTrain(unittest.TestCase):
def test_main(self):
flowers = Flowers(mode='train')
self.assertTrue(len(flowers) == 6149)
# traversal whole dataset may cost a
# long time, randomly check 1 sample
idx = np.random.randint(0, 6149)
image, label = flowers[idx]
self.assertTrue(len(image.shape) == 3)
self.assertTrue(image.shape[2] == 3)
self.assertTrue(label.shape[0] == 1)
class TestFlowersValid(unittest.TestCase):
def test_main(self):
flowers = Flowers(mode='valid')
self.assertTrue(len(flowers) == 1020)
# traversal whole dataset may cost a
# long time, randomly check 1 sample
idx = np.random.randint(0, 1020)
image, label = flowers[idx]
self.assertTrue(len(image.shape) == 3)
self.assertTrue(image.shape[2] == 3)
self.assertTrue(label.shape[0] == 1)
class TestFlowersTest(unittest.TestCase):
def test_main(self):
flowers = Flowers(mode='test')
self.assertTrue(len(flowers) == 1020)
# traversal whole dataset may cost a
# long time, randomly check 1 sample
idx = np.random.randint(0, 1020)
image, label = flowers[idx]
self.assertTrue(len(image.shape) == 3)
self.assertTrue(image.shape[2] == 3)
self.assertTrue(label.shape[0] == 1)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -28,11 +28,12 @@ import contextlib ...@@ -28,11 +28,12 @@ import contextlib
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from model import Model, CrossEntropy, Input, Loss, set_device from paddle.io import BatchSampler, DataLoader
from metrics import Accuracy
from callbacks import ProgBarLogger from hapi.model import Model, CrossEntropy, Input, Loss, set_device
from paddle.fluid.io import BatchSampler, DataLoader from hapi.metrics import Accuracy
from paddle.fluid.io import MNIST as MnistDataset from hapi.callbacks import ProgBarLogger
from hapi.datasets import MNIST as MnistDataset
class SimpleImgConvPool(fluid.dygraph.Layer): class SimpleImgConvPool(fluid.dygraph.Layer):
......
...@@ -18,7 +18,7 @@ import unittest ...@@ -18,7 +18,7 @@ import unittest
import random import random
import time import time
from progressbar import ProgressBar from hapi.progressbar import ProgressBar
class TestProgressBar(unittest.TestCase): class TestProgressBar(unittest.TestCase):
......
...@@ -16,13 +16,13 @@ ...@@ -16,13 +16,13 @@
# export PYTHONPATH=PATH_TO_HAPI:$PYTHONPATH # export PYTHONPATH=PATH_TO_HAPI:$PYTHONPATH
import unittest import unittest
from datasets.folder import DatasetFolder from hapi.datasets import DatasetFolder
from transform import transforms import hapi.vision.transforms as transforms
class TestTransforms(unittest.TestCase): class TestTransforms(unittest.TestCase):
def do_transform(self, trans): def do_transform(self, trans):
dataset_folder = DatasetFolder('test_data', transform=trans) dataset_folder = DatasetFolder('tests/test_data', transform=trans)
for _ in dataset_folder: for _ in dataset_folder:
pass pass
......
此差异已折叠。
...@@ -22,7 +22,7 @@ from functools import partial ...@@ -22,7 +22,7 @@ from functools import partial
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.io import DataLoader from paddle.io import DataLoader
from paddle.fluid.layers.utils import flatten from paddle.fluid.layers.utils import flatten
from utils.configure import PDConfig from utils.configure import PDConfig
......
...@@ -22,7 +22,7 @@ from functools import partial ...@@ -22,7 +22,7 @@ from functools import partial
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.io import BatchSampler, DataLoader, Dataset from paddle.io import BatchSampler, DataLoader, Dataset
def create_data_loader(args, device): def create_data_loader(args, device):
......
...@@ -21,7 +21,7 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ...@@ -21,7 +21,7 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.io import DataLoader from paddle.io import DataLoader
from utils.configure import PDConfig from utils.configure import PDConfig
from utils.check import check_gpu, check_version from utils.check import check_gpu, check_version
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册