From 59b986c755ec4571ad9ef5d93be061d6c3564fe9 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Fri, 20 Mar 2020 14:25:03 +0000 Subject: [PATCH] Add fit in Model --- callbacks.py | 266 ++++++++++++++++++++++++++++++++++++++ metrics.py | 38 ++++-- model.py | 132 +++++++++++++++++-- progressbar.py | 159 +++++++++++++++++++++++ tests/test_callbacks.py | 67 ++++++++++ tests/test_model.py | 158 ++++++++++++++++++++++ tests/test_progressbar.py | 49 +++++++ 7 files changed, 848 insertions(+), 21 deletions(-) create mode 100644 callbacks.py create mode 100644 progressbar.py create mode 100644 tests/test_callbacks.py create mode 100644 tests/test_model.py create mode 100644 tests/test_progressbar.py diff --git a/callbacks.py b/callbacks.py new file mode 100644 index 0000000..24a42a1 --- /dev/null +++ b/callbacks.py @@ -0,0 +1,266 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import copy + +from progressbar import ProgressBar + + +def config_callbacks(callbacks=None, + model=None, + batch_size=None, + epochs=None, + steps=None, + log_freq=2, + verbose=2, + save_freq=1, + metrics=None, + mode='train'): + cbks = callbacks or [] + cbks = cbks if isinstance(cbks, (list, tuple)) else [cbks] + if not any(isinstance(k, ProgBarLogger) for k in cbks) and verbose: + cbks = cbks + [ProgBarLogger(log_freq, verbose=verbose)] + + if not any(isinstance(k, ModelCheckpoint) for k in cbks): + cbks = cbks + [ModelCheckpoint(save_freq)] + + cbk_list = CallbackList(cbks) + cbk_list.set_model(model) + metrics = metrics or [] if mode != 'test' else [] + params = { + 'batch_size': batch_size, + 'epochs': epochs, + 'steps': steps, + 'verbose': verbose, + 'metrics': metrics, + } + cbk_list.set_params(params) + return cbk_list + + +class CallbackList(object): + def __init__(self, callbacks=None): + # copy + self.callbacks = [c for c in callbacks] + self.params = {} + self.model = None + + def append(self, callback): + self.callbacks.append(callback) + + def __iter__(self): + return iter(self.callbacks) + + def set_params(self, params): + for c in self.callbacks: + c.set_params(params) + + def set_model(self, model): + for c in self.callbacks: + c.set_model(model) + + def _call(self, name, *args): + for c in self.callbacks: + func = getattr(c, name) + func(*args) + + def _check_mode(self, mode): + assert mode in ['train', 'eval', 'test'], \ + 'mode should be train, eval or test' + + def on_begin(self, mode, logs=None): + self._check_mode(mode) + name = 'on_{}_begin'.format(mode) + self._call(name, logs) + + def on_end(self, mode, logs=None): + self._check_mode(mode) + name = 'on_{}_end'.format(mode) + self._call(name, logs) + + def on_epoch_begin(self, epoch=None, logs=None): + self._call('on_epoch_begin', epoch, logs) + + def on_epoch_end(self, epoch=None, logs=None): + self._call('on_epoch_end', epoch, logs) + + def on_batch_begin(self, mode, step=None, logs=None): + self._check_mode(mode) + name = 'on_{}_batch_begin'.format(mode) + self._call(name, step, logs) + + def on_batch_end(self, mode, step=None, logs=None): + self._check_mode(mode) + name = 'on_{}_batch_end'.format(mode) + self._call(name, step, logs) + + +class Callback(object): + def __init__(self): + self.model = None + self.params = {} + + def set_params(self, params): + self.params = params + + def set_model(self, model): + self.model = model + + def on_train_begin(self, logs=None): + """ + """ + + def on_train_end(self, logs=None): + """ + """ + + def on_eval_begin(self, logs=None): + """ + """ + + def on_eval_end(self, logs=None): + """ + """ + + def on_test_begin(self, logs=None): + """ + """ + + def on_test_end(self, logs=None): + """ + """ + + def on_epoch_begin(self, epoch, logs=None): + """ + """ + + def on_epoch_end(self, epoch, logs=None): + """ + """ + + def on_train_batch_begin(self, step, logs=None): + """ + """ + + def on_train_batch_end(self, step, logs=None): + """ + """ + + def on_eval_batch_begin(self, step, logs=None): + """ + """ + + def on_eval_batch_end(self, step, logs=None): + """ + """ + + def on_eval_batch_begin(self, step, logs=None): + """ + """ + + def on_eval_batch_end(self, step, logs=None): + """ + """ + + +class ProgBarLogger(Callback): + def __init__(self, log_freq=1, verbose=2): + self.epochs = None + self.steps = None + self.progbar = None + self.verbose = verbose + self.log_freq = log_freq + + def on_train_begin(self, logs=None): + self.epochs = self.params['epochs'] + assert self.epochs + self.train_metrics = self.params['metrics'] + assert self.train_metrics + + def on_epoch_begin(self, epoch=None, logs=None): + self.steps = self.params['steps'] + self.epoch = epoch + self.train_step = 0 + if self.verbose and self.epochs: + print('Epoch %d/%d' % (epoch + 1, self.epochs)) + self.train_progbar = ProgressBar(num=self.steps, verbose=self.verbose) + + def _updates(self, logs, mode): + values = [] + metrics = getattr(self, '%s_metrics' % (mode)) + progbar = getattr(self, '%s_progbar' % (mode)) + steps = getattr(self, '%s_step' % (mode)) + for k in metrics: + if k in logs: + values.append((k, logs[k])) + progbar.update(steps, values) + + def on_train_batch_end(self, step, logs=None): + logs = logs or {} + self.train_step = step + + if self.train_step % self.log_freq == 0 and self.verbose: + # if steps is not None, last step will update in on_epoch_end + if self.steps and self.train_step < self.steps: + self._updates(logs, 'train') + else: + self._updates(logs, 'train') + + def on_epoch_end(self, epoch, logs=None): + logs = logs or {} + if self.verbose: + self._updates(logs, 'train') + + def on_eval_begin(self, logs=None): + self.eval_steps = logs.get('steps', None) + self.eval_metrics = logs.get('metrics_name', []) + self.eval_step = 0 + self.evaled_samples = 0 + self.eval_progbar = ProgressBar( + num=self.eval_steps, verbose=self.verbose) + print('Eval begin...') + + def on_eval_batch_end(self, step, logs=None): + logs = logs or {} + self.eval_step = step + samples = logs.get('batch_size', 1) + self.evaled_samples += samples + + def on_eval_end(self, logs=None): + logs = logs or {} + if self.verbose: + self._updates(logs, 'eval') + print('Eval samples: %d' % (self.evaled_samples)) + + +class ModelCheckpoint(Callback): + def __init__(self, save_freq=1, save_file='output'): + self.save_freq = save_freq + self.save_file = save_file + + def on_epoch_begin(self, epoch=None, logs=None): + self.epoch = epoch + + def on_epoch_end(self, epoch, logs=None): + if self.model and self.epoch % self.save_freq == 0: + path = '{}/{}'.format(self.save_file, epoch) + print('save checkpoint at {}'.format(path)) + self.model.save(path) + + def on_train_end(self, logs=None): + if self.model: + path = '{}/final'.format(self.save_file) + print('save checkpoint at {}'.format(path)) + self.model.save(path) diff --git a/metrics.py b/metrics.py index f3772d7..3350853 100644 --- a/metrics.py +++ b/metrics.py @@ -44,21 +44,33 @@ class Metric(object): """ Reset states and result """ - raise NotImplementedError("function 'reset' not implemented in {}.".format(self.__class__.__name__)) + raise NotImplementedError("function 'reset' not implemented in {}.". + format(self.__class__.__name__)) @abc.abstractmethod def update(self, *args, **kwargs): """ Update states for metric """ - raise NotImplementedError("function 'update' not implemented in {}.".format(self.__class__.__name__)) + raise NotImplementedError("function 'update' not implemented in {}.". + format(self.__class__.__name__)) @abc.abstractmethod def accumulate(self): """ Accumulates statistics, computes and returns the metric value """ - raise NotImplementedError("function 'accumulate' not implemented in {}.".format(self.__class__.__name__)) + raise NotImplementedError( + "function 'accumulate' not implemented in {}.".format( + self.__class__.__name__)) + + @abc.abstractmethod + def name(self): + """ + Returns metric name + """ + raise NotImplementedError("function 'name' not implemented in {}.". + format(self.__class__.__name__)) def add_metric_op(self, pred, label): """ @@ -72,11 +84,12 @@ class Accuracy(Metric): Encapsulates accuracy metric logic """ - def __init__(self, topk=(1, ), *args, **kwargs): - super(Accuracy, self).__init__(*args, **kwargs) - self.topk = topk - self.maxk = max(topk) - self.reset() + def __init__(self, topk=(1, ), name=None, *args, **kwargs): + super(Accuracy, self).__init__(*args, **kwargs) + self.topk = topk + self.maxk = max(topk) + self._init_name(name) + self.reset() def add_metric_op(self, pred, label, *args, **kwargs): pred = fluid.layers.argsort(pred[0], descending=True)[1][:, :self.maxk] @@ -103,3 +116,12 @@ class Accuracy(Metric): res.append(float(t) / c) return res + def _init_name(self, name): + name = name or 'acc' + if self.maxk != 1: + self._name = ['{}_top{}'.format(name, k) for k in self.topk] + else: + self._name = ['acc'] + + def name(self): + return self._name diff --git a/model.py b/model.py index acd5323..2df073c 100644 --- a/model.py +++ b/model.py @@ -17,16 +17,19 @@ from __future__ import absolute_import import inspect import os import pickle -from collections import OrderedDict - import numpy as np +import itertools +from collections import Iterable +from collections import OrderedDict from paddle import fluid from paddle.fluid.framework import in_dygraph_mode, Variable from paddle.fluid.executor import global_scope from paddle.fluid.io import is_belong_to_optimizer from paddle.fluid.dygraph.base import to_variable + from metrics import Metric +from callbacks import config_callbacks __all__ = ['Model', 'Loss', 'CrossEntropy', 'Input'] @@ -336,10 +339,10 @@ class StaticGraphAdapter(object): metric_list, metric_splits = flatten_list(endpoints['metric']) fetch_list = endpoints['loss'] + metric_list num_loss = len(endpoints['loss']) - rets = self._executor.run( - compiled_prog, feed=feed, - fetch_list=fetch_list, - return_numpy=False) + rets = self._executor.run(compiled_prog, + feed=feed, + fetch_list=fetch_list, + return_numpy=False) # LoDTensor cannot be fetch as numpy directly rets = [np.array(v) for v in rets] if self.mode == 'test': @@ -392,7 +395,8 @@ class StaticGraphAdapter(object): if self.model._loss_function: losses = self.model._loss_function(outputs, labels) for metric in self.model._metrics: - metrics.append(to_list(metric.add_metric_op(outputs, labels))) + metrics.append( + to_list(metric.add_metric_op(outputs, labels))) if mode == 'train' and self.model._optimizer: self._loss_endpoint = fluid.layers.sum(losses) self.model._optimizer.minimize(self._loss_endpoint) @@ -402,7 +406,11 @@ class StaticGraphAdapter(object): self._input_vars[mode] = inputs self._label_vars[mode] = labels self._progs[mode] = prog - self._endpoints[mode] = {"output": outputs, "loss": losses, "metric": metrics} + self._endpoints[mode] = { + "output": outputs, + "loss": losses, + "metric": metrics + } def _compile_and_initialize(self, prog, mode): compiled_prog = self._compiled_progs.get(mode, None) @@ -465,7 +473,8 @@ class DynamicGraphAdapter(object): inputs = to_list(inputs) if labels is not None: labels = [to_variable(l) for l in to_list(labels)] - outputs = to_list(self.model.forward(*[to_variable(x) for x in inputs])) + outputs = to_list( + self.model.forward(*[to_variable(x) for x in inputs])) losses = self.model._loss_function(outputs, labels) final_loss = fluid.layers.sum(losses) final_loss.backward() @@ -485,7 +494,8 @@ class DynamicGraphAdapter(object): inputs = to_list(inputs) if labels is not None: labels = [to_variable(l) for l in to_list(labels)] - outputs = to_list(self.model.forward(*[to_variable(x) for x in inputs])) + outputs = to_list( + self.model.forward(*[to_variable(x) for x in inputs])) if self.model._loss_function: losses = self.model._loss_function(outputs, labels) @@ -585,7 +595,6 @@ class Model(fluid.dygraph.Layer): self._labels = None self._loss_function = None self._loss_weights = None - self._loss = None self._optimizer = None self._device = None self._device_ids = None @@ -610,6 +619,9 @@ class Model(fluid.dygraph.Layer): def load(self, *args, **kwargs): return self._adapter.load(*args, **kwargs) + def parameters(self, *args, **kwargs): + return self._adapter.parameters(*args, **kwargs) + def prepare(self, optimizer=None, loss_function=None, @@ -680,5 +692,99 @@ class Model(fluid.dygraph.Layer): if not in_dygraph_mode(): self._adapter.prepare() - def parameters(self, *args, **kwargs): - return self._adapter.parameters(*args, **kwargs) + def fit( + self, + train_loader=None, + eval_loader=None, + epochs=1, + eval_freq=1, + log_freq=10, + save_freq=1, + verbose=2, + callbacks=None, ): + """ + FIXME: add more comments and usage + Args: + train_loader (DataLoader): an iterable data loader is used for train. + eval_loader (DataLoader): an iterable data loader is used for + evaluation at the end of epoch. If None, will not do evaluation. + epochs (int): number of epochs to train the model. + eval_freq (int): evaluation frequency in epoch. + log_freq (int): frequency to print log during training. + save_freq (int): frequency to save checkpoint during training. + verbose (int): verbosity mode, should be 0, 1, or 2. + 0 = silent, 1 = progress bar, 2 = one line per epoch. + callbacks (Callback|None): list of `Callback` instances to apply + during training. + """ + do_eval = eval_loader is not None + metrics_name = self._metrics_name() + cbks = config_callbacks( + callbacks, + model=self, + epochs=epochs, + steps=None, + log_freq=log_freq, + save_freq=save_freq, + verbose=verbose, + metrics=self._metrics_name(), ) + + def _run_one_epoch(data_loader, callbacks, mode): + size = data_loader.size if hasattr(data_loader, 'size') else None + logs = { + 'steps': size, + 'metrics_name': metrics_name, + } + for step, data in enumerate(data_loader): + cbks.on_batch_begin(mode, step, logs) + if mode == 'train': + outs = self.train(*data) + else: + outs = self.eval(*data) + + metrics = list(itertools.chain.from_iterable(outs)) + metrics = [np.mean(metrics[0])] + for metric in self._metrics: + res = metric.accumulate() + metrics.extend(to_list(res)) + assert len(metrics_name) == len(metrics) + for k, v in zip(metrics_name, metrics): + logs[k] = np.mean(v) + + logs['step'] = step + logs['batch_size'] = data[0].shape[0] + + cbks.on_batch_end(mode, step, logs) + self._reset_metrics() + return logs + + cbks.on_begin('train') + for epoch in range(epochs): + cbks.on_epoch_begin(epoch) + # FIXME: adapte to DataLoader + loader = train_loader + if not isinstance(train_loader, Iterable): + loader = train_loader() + logs = _run_one_epoch(loader, cbks, 'train') + cbks.on_epoch_end(epoch, logs) + + if do_eval and epoch % eval_freq == 0: + cbks.on_begin('eval', logs) + # FIXME: adapte to DataLoader + loader = eval_loader + if not isinstance(eval_loader, Iterable): + loader = eval_loader() + logs = _run_one_epoch(eval_loader(), cbks, 'eval') + cbks.on_end('eval', logs) + + cbks.on_end('train', logs) + + def _reset_metrics(self): + for metric in self._metrics: + metric.reset() + + def _metrics_name(self): + metrics_name = ['loss'] + for m in self._metrics: + metrics_name.extend(to_list(m.name())) + return metrics_name diff --git a/progressbar.py b/progressbar.py new file mode 100644 index 0000000..73754d5 --- /dev/null +++ b/progressbar.py @@ -0,0 +1,159 @@ +import sys +import time +import numpy as np + + +class ProgressBar(object): + """progress bar """ + + def __init__(self, + num=None, + width=30, + verbose=1, + start=True, + file=sys.stdout): + self._num = num + if isinstance(num, int) and num <= 0: + raise TypeError('num should be None or integer (> 0)') + max_width = self._get_max_width() + self._width = width if width <= max_width else max_width + self._total_width = 0 + self._verbose = verbose + self.file = file + self._values = {} + self._values_order = [] + if start: + self._start = time.time() + self._last_update = 0 + + self._dynamic_display = ( + (hasattr(self.file, 'isatty') and + self.file.isatty()) or 'ipykernel' in sys.modules or + 'posix' in sys.modules or 'PYCHARM_HOSTED' in os.environ) + + def _get_max_width(self): + if sys.version_info > (3, 3): + from shutil import get_terminal_size + else: + from backports.shutil_get_terminal_size import get_terminal_size + terminal_width, _ = get_terminal_size() + max_width = min(int(terminal_width * 0.6), terminal_width - 50) + return max_width + + def start(self): + self.file.flush() + self._start = time.time() + + def update(self, current_num, values=None): + now = time.time() + + if current_num: + time_per_unit = (now - self._start) / current_num + else: + time_per_unit = 0 + + if time_per_unit >= 1 or time_per_unit == 0: + fps = ' - %.0fs/%s' % (time_per_unit, 'step') + elif time_per_unit >= 1e-3: + fps = ' - %.0fms/%s' % (time_per_unit * 1e3, 'step') + else: + fps = ' - %.0fus/%s' % (time_per_unit * 1e6, 'step') + + info = '' + if self._verbose == 1: + prev_total_width = self._total_width + + if self._dynamic_display: + sys.stdout.write('\b' * prev_total_width) + sys.stdout.write('\r') + else: + sys.stdout.write('\n') + + if self._num is not None: + numdigits = int(np.log10(self._num)) + 1 + + bar_chars = ('step %' + str(numdigits) + 'd/%d [') % ( + current_num, self._num) + prog = float(current_num) / self._num + prog_width = int(self._width * prog) + + if prog_width > 0: + bar_chars += ('=' * (prog_width - 1)) + if current_num < self._num: + bar_chars += '>' + else: + bar_chars += '=' + bar_chars += ('.' * (self._width - prog_width)) + bar_chars += ']' + else: + bar_chars = 'step %3d' % current_num + + self._total_width = len(bar_chars) + sys.stdout.write(bar_chars) + + for k, v in values: + info += ' - %s:' % k + if isinstance(v, (float, np.float32, np.float64)): + if abs(v) > 1e-3: + info += ' %.4f' % v + else: + info += ' %.4e' % v + else: + info += ' %s' % v + + if self._num is not None and current_num < self._num: + eta = time_per_unit * (self._num - current_num) + if eta > 3600: + eta_format = '%d:%02d:%02d' % (eta // 3600, (eta % 3600) // + 60, eta % 60) + elif eta > 60: + eta_format = '%d:%02d' % (eta // 60, eta % 60) + else: + eta_format = '%ds' % eta + + info += ' - ETA: %s' % eta_format + + info += fps + self._total_width += len(info) + if prev_total_width > self._total_width: + info += (' ' * (prev_total_width - self._total_width)) + + # newline for another epoch + if self._num is not None and current_num >= self._num: + info += '\n' + if self._num is None: + info += '\n' + + sys.stdout.write(info) + sys.stdout.flush() + self._last_update = now + elif self._verbose == 2: + if self._num: + numdigits = int(np.log10(self._num)) + 1 + count = ('step %' + str(numdigits) + 'd/%d') % (current_num, + self._num) + else: + count = 'step %3d' % current_num + info = count + info + + for k, v in values: + info += ' - %s:' % k + if isinstance(v, (float, np.float32, np.float64)): + if abs(v) > 1e-3: + info += ' %.4f' % v + else: + info += ' %.4e' % v + elif isinstance(v, np.ndarray) and \ + isinstance(v.size, 1) and \ + isinstance(v.dtype, (np.float32, np.float64)): + if abs(v[0]) > 1e-3: + info += ' %.4f' % v[0] + else: + info += ' %.4e' % v[0] + else: + info += ' %s' % v + + info += fps + info += '\n' + sys.stdout.write(info) + sys.stdout.flush() diff --git a/tests/test_callbacks.py b/tests/test_callbacks.py new file mode 100644 index 0000000..3528a78 --- /dev/null +++ b/tests/test_callbacks.py @@ -0,0 +1,67 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import time +import random + +from callbacks import config_callbacks + + +class TestCallbacks(unittest.TestCase): + def test_callback(self): + epochs = 2 + steps = 50 + freq = 1 + eval_steps = 20 + cbks = config_callbacks( + batch_size=128, + epochs=epochs, + steps=steps, + verbose=2, + metrics=['loss', 'acc'], ) + cbks.on_begin('train') + + logs = {'loss': 50.341673, 'acc': 0.00256} + for epoch in range(epochs): + cbks.on_epoch_begin(epoch) + for step in range(steps): + cbks.on_batch_begin('train', step, logs) + logs['loss'] -= random.random() * 0.1 + logs['acc'] += random.random() * 0.1 + time.sleep(0.005) + cbks.on_batch_end('train', step, logs) + cbks.on_epoch_end(epoch, logs) + + eval_logs = {'eval_loss': 20.341673, 'eval_acc': 0.256} + params = { + 'eval_steps': eval_steps, + 'eval_metrics': ['eval_loss', 'eval_acc'], + 'log_freq': 10, + } + cbks.on_begin('eval', params) + for step in range(eval_steps): + cbks.on_batch_begin('eval', step, logs) + eval_logs['eval_loss'] -= random.random() * 0.1 + eval_logs['eval_acc'] += random.random() * 0.1 + eval_logs['batch_size'] = 2 + time.sleep(0.005) + cbks.on_batch_end('eval', step, eval_logs) + cbks.on_end('eval', eval_logs) + + cbks.on_end('train') + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_model.py b/tests/test_model.py new file mode 100644 index 0000000..8779152 --- /dev/null +++ b/tests/test_model.py @@ -0,0 +1,158 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import division +from __future__ import print_function + +import unittest + +import os +import numpy as np +import contextlib + +import paddle +from paddle import fluid +from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear +from model import Model, CrossEntropy, Input +from metrics import Accuracy +from callbacks import ProgBarLogger + + +class SimpleImgConvPool(fluid.dygraph.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + pool_size, + pool_stride, + pool_padding=0, + pool_type='max', + global_pooling=False, + conv_stride=1, + conv_padding=0, + conv_dilation=1, + conv_groups=None, + act=None, + use_cudnn=False, + param_attr=None, + bias_attr=None): + super(SimpleImgConvPool, self).__init__('SimpleConv') + + self._conv2d = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=conv_stride, + padding=conv_padding, + dilation=conv_dilation, + groups=conv_groups, + param_attr=None, + bias_attr=None, + use_cudnn=use_cudnn) + + self._pool2d = Pool2D( + pool_size=pool_size, + pool_type=pool_type, + pool_stride=pool_stride, + pool_padding=pool_padding, + global_pooling=global_pooling, + use_cudnn=use_cudnn) + + def forward(self, inputs): + x = self._conv2d(inputs) + x = self._pool2d(x) + return x + + +class MNIST(Model): + def __init__(self): + super(MNIST, self).__init__() + self._simple_img_conv_pool_1 = SimpleImgConvPool( + 1, 20, 5, 2, 2, act="relu") + + self._simple_img_conv_pool_2 = SimpleImgConvPool( + 20, 50, 5, 2, 2, act="relu") + + pool_2_shape = 50 * 4 * 4 + SIZE = 10 + scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5 + self._fc = Linear( + 800, + 10, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=scale)), + act="softmax") + + def forward(self, inputs): + x = self._simple_img_conv_pool_1(inputs) + x = self._simple_img_conv_pool_2(x) + x = fluid.layers.flatten(x, axis=1) + x = self._fc(x) + return x + + +def accuracy(pred, label, topk=(1, )): + maxk = max(topk) + pred = np.argsort(pred)[:, ::-1][:, :maxk] + correct = (pred == np.repeat(label, maxk, 1)) + + batch_size = label.shape[0] + res = [] + for k in topk: + correct_k = correct[:, :k].sum() + res.append(100.0 * correct_k / batch_size) + return res + + +@contextlib.contextmanager +def null_guard(): + yield + + +class TestModel(unittest.TestCase): + def fit(self, dynamic): + guard = fluid.dygraph.guard() if dynamic else null_guard() + batch_size = 128 + train_loader = fluid.io.xmap_readers( + lambda b: [np.array([x[0] for x in b]).reshape(-1, 1, 28, 28), + np.array([x[1] for x in b]).reshape(-1, 1)], + paddle.batch(fluid.io.shuffle(paddle.dataset.mnist.train(), 6e4), + batch_size=batch_size, drop_last=True), 1, 1) + val_loader = fluid.io.xmap_readers( + lambda b: [np.array([x[0] for x in b]).reshape(-1, 1, 28, 28), + np.array([x[1] for x in b]).reshape(-1, 1)], + paddle.batch(paddle.dataset.mnist.test(), + batch_size=batch_size, drop_last=False), 1, 1) + with guard: + inputs = [Input([None, 1, 28, 28], 'float32', name='image')] + labels = [Input([None, 1], 'int64', name='label')] + model = MNIST() + optim = fluid.optimizer.Momentum( + learning_rate=0.01, + momentum=.9, + parameter_list=model.parameters()) + model.prepare(optim, CrossEntropy(), Accuracy(), inputs, labels) + cbk = ProgBarLogger(50) + model.fit(train_loader, val_loader, epochs=2, callbacks=cbk) + + def test_fit_static(self): + self.fit(False) + + def test_fit_dygraph(self): + self.fit(True) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_progressbar.py b/tests/test_progressbar.py new file mode 100644 index 0000000..f7019a7 --- /dev/null +++ b/tests/test_progressbar.py @@ -0,0 +1,49 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import random +import time + +from progressbar import ProgressBar + + +class TestProgressBar(unittest.TestCase): + def prog_bar(self, num, epoch, width, verbose=1): + for epoch in range(epoch): + progbar = ProgressBar(num, verbose=verbose) + values = [ + ['loss', 50.341673], + ['acc', 0.00256], + ] + for step in xrange(1, num + 1): + values[0][1] -= random.random() * 0.1 + values[1][1] += random.random() * 0.1 + if step % 10 == 0: + progbar.update(step, values) + time.sleep(0.002) + progbar.update(step, values) + + def test1(self): + self.prog_bar(50, 1, 30) + + def test2(self): + self.prog_bar(50, 2, 30) + + def test4(self): + self.prog_bar(50, 2, 30, verbose=2) + + +if __name__ == '__main__': + unittest.main() -- GitLab