未验证 提交 b5f8784c 编写于 作者: Q qingqing01 提交者: GitHub

Refine Model of high level API (#25559)

* Refine Model

1. Take the network (instance of Layer) as the input of Model.
2. Refine set_dict/load_dict of Layer.
3. Refine Input interface, so update code sample about Input
上级 4152d399
...@@ -94,7 +94,9 @@ def save_dygraph(state_dict, model_path): ...@@ -94,7 +94,9 @@ def save_dygraph(state_dict, model_path):
pickle.dump(model_dict, f, protocol=2) pickle.dump(model_dict, f, protocol=2)
@dygraph_only # TODO(qingqing01): remove dygraph_only to support loading static model.
# maybe need to unify the loading interface after 2.0 API is ready.
#@dygraph_only
def load_dygraph(model_path, keep_name_table=False): def load_dygraph(model_path, keep_name_table=False):
''' '''
:api_attr: imperative :api_attr: imperative
......
...@@ -16,9 +16,12 @@ import collections ...@@ -16,9 +16,12 @@ import collections
import contextlib import contextlib
import sys import sys
import numpy as np import numpy as np
import collections
import six import six
import re import re
import copy
import weakref
import warnings
from . import parallel_helper from . import parallel_helper
from .. import unique_name from .. import unique_name
from paddle.fluid import core from paddle.fluid import core
...@@ -26,9 +29,6 @@ from .layer_object_helper import LayerObjectHelper ...@@ -26,9 +29,6 @@ from .layer_object_helper import LayerObjectHelper
from .base import program_desc_tracing_guard, param_guard from .base import program_desc_tracing_guard, param_guard
from paddle.fluid import framework from paddle.fluid import framework
from ..param_attr import ParamAttr from ..param_attr import ParamAttr
import copy
import weakref
import warnings
__all__ = ['Layer'] __all__ = ['Layer']
......
...@@ -66,7 +66,6 @@ CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName() ...@@ -66,7 +66,6 @@ CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName()
_dygraph_tracer_ = None _dygraph_tracer_ = None
_dygraph_current_expected_place_ = None _dygraph_current_expected_place_ = None
_current_device = None _current_device = None
global_prog_seed = 0 global_prog_seed = 0
......
...@@ -16,7 +16,10 @@ from . import logger ...@@ -16,7 +16,10 @@ from . import logger
from . import progressbar from . import progressbar
from . import callbacks from . import callbacks
from . import download from . import download
from . import model from . import model
from .model import *
from . import metrics from . import metrics
from . import loss from . import loss
from . import datasets from . import datasets
...@@ -24,6 +27,11 @@ from . import distributed ...@@ -24,6 +27,11 @@ from . import distributed
from . import vision from . import vision
from . import text from . import text
from . import device
from .device import *
from .dygraph_layer_patch import monkey_patch_layer
logger.setup_logger() logger.setup_logger()
__all__ = [ __all__ = [
...@@ -35,6 +43,6 @@ __all__ = [ ...@@ -35,6 +43,6 @@ __all__ = [
'loss', 'loss',
'vision', 'vision',
'text', 'text',
] ] + model.__all__ + device.__all__
__all__ += model.__all__ monkey_patch_layer()
...@@ -291,30 +291,22 @@ class ProgBarLogger(Callback): ...@@ -291,30 +291,22 @@ class ProgBarLogger(Callback):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np import paddle.fluid as fluid
from paddle import fluid import paddle.incubate.hapi as hapi
from paddle.incubate.hapi.metrics import Accuracy
from paddle.incubate.hapi.loss import CrossEntropy
from paddle.incubate.hapi.datasets import MNIST
from paddle.incubate.hapi.vision.models import LeNet
from paddle.incubate.hapi.callbacks import ProgBarLogger
from paddle.incubate.hapi.model import Input, set_device
inputs = [Input([-1, 1, 28, 28], 'float32', name='image')] inputs = [hapi.Input('image', [-1, 1, 28, 28], 'float32')]
labels = [Input([None, 1], 'int64', name='label')] labels = [hapi.Input('label', [None, 1], 'int64')]
train_dataset = MNIST(mode='train') train_dataset = hapi.datasets.MNIST(mode='train')
model = LeNet() model = hapi.Model(hapi.vision.LeNet(), inputs, labels)
optim = fluid.optimizer.Adam(0.001) optim = fluid.optimizer.Adam(0.001)
model.prepare(optimizer=optim, model.prepare(optimizer=optim,
loss_function=CrossEntropy(), loss_function=hapi.loss.CrossEntropy(),
metrics=Accuracy(), metrics=hapi.metrics.Accuracy())
inputs=inputs,
labels=labels)
callback = ProgBarLogger(log_freq=10) callback = hapi.callbacks.ProgBarLogger(log_freq=10)
model.fit(train_dataset, batch_size=64, callbacks=callback) model.fit(train_dataset, batch_size=64, callbacks=callback)
""" """
...@@ -433,31 +425,22 @@ class ModelCheckpoint(Callback): ...@@ -433,31 +425,22 @@ class ModelCheckpoint(Callback):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np import paddle.fluid as fluid
from paddle import fluid import paddle.incubate.hapi as hapi
from paddle.incubate.hapi.metrics import Accuracy
from paddle.incubate.hapi.loss import CrossEntropy
from paddle.incubate.hapi.datasets import MNIST
from paddle.incubate.hapi.vision.models import LeNet inputs = [hapi.Input('image', [-1, 1, 28, 28], 'float32')]
from paddle.incubate.hapi.callbacks import ModelCheckpoint labels = [hapi.Input('label', [None, 1], 'int64')]
from paddle.incubate.hapi.model import Input, set_device
inputs = [Input([-1, 1, 28, 28], 'float32', name='image')] train_dataset = hapi.datasets.MNIST(mode='train')
labels = [Input([None, 1], 'int64', name='label')]
train_dataset = MNIST(mode='train') model = hapi.Model(hapi.vision.LeNet(), inputs, labels)
model = LeNet()
optim = fluid.optimizer.Adam(0.001) optim = fluid.optimizer.Adam(0.001)
model.prepare(optimizer=optim, model.prepare(optimizer=optim,
loss_function=CrossEntropy(), loss_function=hapi.loss.CrossEntropy(),
metrics=Accuracy(), metrics=hapi.metrics.Accuracy())
inputs=inputs,
labels=labels)
callback = ModelCheckpoint(save_dir='./temp') callback = hapi.callbacks.ModelCheckpoint(save_dir='./temp')
model.fit(train_dataset, batch_size=64, callbacks=callback) model.fit(train_dataset, batch_size=64, callbacks=callback)
""" """
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import paddle.fluid as fluid
from paddle.fluid.dygraph.parallel import ParallelEnv
__all__ = ['set_device', ]
# TODO(qingqing01): remove or refine _global_device, set_device and get_device
# after core framework supporting these function.
_global_device = None
def set_device(device):
"""
Args:
device (str): specify device type, 'cpu' or 'gpu'.
Returns:
fluid.CUDAPlace or fluid.CPUPlace: Created GPU or CPU place.
Examples:
.. code-block:: python
import paddle.incubate.hapi as hapi
input = hapi.set_device('gpu')
"""
assert isinstance(device, six.string_types) and device.lower() in ['cpu', 'gpu'], \
"Expected device in ['cpu', 'gpu'], but got {}".format(device)
device = fluid.CUDAPlace(ParallelEnv().dev_id) \
if device.lower() == 'gpu' and fluid.is_compiled_with_cuda() \
else fluid.CPUPlace()
global _global_device
_global_device = device
return device
def _get_device():
"""
Return global device.
"""
if _global_device is not None:
device = _global_device
else:
if fluid.is_compiled_with_cuda():
device = fluid.CUDAPlace(ParallelEnv().dev_id)
else:
device = fluid.CPUPlace()
return device
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import paddle.fluid as fluid
from paddle.fluid.framework import in_dygraph_mode
from .device import _get_device
def monkey_patch_layer():
def load_dict(self,
stat_dict,
include_sublayers=True,
use_structured_name=True):
'''
Set parameters from stat_dict. All the parameters will be reset by the
tensor in the stat_dict
This api will be Deprecated. Please use set_dict
Parameters:
state_dict(dict) : Dict contains all the parameters
include_sublayers(bool, optional) : If true, also include the
parameters from sublayers. Default: True
use_structured_name(bool, optional) : If true, use structured name
as key, otherwise, use parameter name as key. Default: True
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10])
state_dict = emb.state_dict()
fluid.save_dygraph( state_dict, "paddle_dy")
para_state_dict, _ = fluid.load_dygraph( "paddle_dy")
emb.load_dict( para_state_dict )
'''
def _check_match(key, param):
state = stat_dict.get(key, None)
if state is None:
raise ValueError(
"{} is not found in the providing file.".format(key))
if list(state.shape) != list(param.shape):
raise ValueError(
"{} receives a shape {}, but the expected shape is {}.".
format(key, list(state.shape), list(param.shape)))
return param, state
matched_param_state = []
for key, param in self.state_dict().items():
key_name = key if use_structured_name else param.name
try:
match_res = _check_match(key_name, param)
except ValueError as err:
warnings.warn(("Skip loading for {}. ".format(key) + str(err)))
matched_param_state.append(match_res)
if in_dygraph_mode():
for param, state in matched_param_state:
param.set_value(state)
else:
def _set_var(var, ndarray):
t = fluid.global_scope().find_var(var.name).get_tensor()
p = t._place()
if p.is_cpu_place():
place = fluid.CPUPlace()
elif p.is_cuda_pinned_place():
place = fluid.CUDAPinnedPlace()
else:
p = fluid.core.Place()
p.set_place(t._place())
place = fluid.CUDAPlace(p.gpu_device_id())
t.set(ndarray, place)
executor = fluid.Executor(_get_device())._default_executor
# restore parameter states
fluid.core._create_loaded_parameter(
[param for param, state in matched_param_state],
fluid.global_scope(), executor)
for param, state in matched_param_state:
_set_var(param, state)
setattr(fluid.dygraph.Layer, 'load_dict', load_dict)
...@@ -86,16 +86,13 @@ class CrossEntropy(Loss): ...@@ -86,16 +86,13 @@ class CrossEntropy(Loss):
Examples: Examples:
.. code-block:: python .. code-block:: python
from paddle.incubate.hapi.model import Input import paddle.fluid as fluid
from paddle.incubate.hapi.vision.models import LeNet import paddle.incubate.hapi as hapi
from paddle.incubate.hapi.loss import CrossEntropy
inputs = [Input([-1, 1, 28, 28], 'float32', name='image')] fluid.enable_dygraph()
labels = [Input([None, 1], 'int64', name='label')]
model = LeNet() model = hapi.Model(hapi.vision.LeNet())
loss = CrossEntropy() model.prepare(loss_function=hapi.loss.CrossEntropy())
model.prepare(loss_function=loss, inputs=inputs, labels=labels)
""" """
...@@ -123,16 +120,14 @@ class SoftmaxWithCrossEntropy(Loss): ...@@ -123,16 +120,14 @@ class SoftmaxWithCrossEntropy(Loss):
Examples: Examples:
.. code-block:: python .. code-block:: python
from paddle.incubate.hapi.model import Input import paddle.fluid as fluid
from paddle.incubate.hapi.vision.models import LeNet import paddle.incubate.hapi as hapi
from paddle.incubate.hapi.loss import SoftmaxWithCrossEntropy
inputs = [Input([-1, 1, 28, 28], 'float32', name='image')] fluid.enable_dygraph()
labels = [Input([None, 1], 'int64', name='label')]
model = LeNet(classifier_activation=None) model = hapi.Model(hapi.vision.LeNet(classifier_activation=None))
loss = SoftmaxWithCrossEntropy() loss = hapi.loss.SoftmaxWithCrossEntropy()
model.prepare(loss_function=loss, inputs=inputs, labels=labels) model.prepare(loss_function=loss)
""" """
def __init__(self, average=True): def __init__(self, average=True):
......
...@@ -170,30 +170,20 @@ class Accuracy(Metric): ...@@ -170,30 +170,20 @@ class Accuracy(Metric):
.. code-block:: python .. code-block:: python
from paddle import fluid import paddle.fluid as fluid
from paddle.incubate.hapi.metrics import Accuracy import paddle.incubate.hapi as hapi
from paddle.incubate.hapi.loss import CrossEntropy
from paddle.incubate.hapi.datasets import MNIST
from paddle.incubate.hapi.model import Input
from paddle.incubate.hapi.vision.models import LeNet
fluid.enable_dygraph() fluid.enable_dygraph()
train_dataset = MNIST(mode='train') train_dataset = hapi.datasets.MNIST(mode='train')
model = LeNet() model = hapi.Model(hapi.vision.LeNet())
optim = fluid.optimizer.Adam( optim = fluid.optimizer.Adam(
learning_rate=0.001, parameter_list=model.parameters()) learning_rate=0.001, parameter_list=model.parameters())
inputs = [Input([-1, 1, 28, 28], 'float32', name='image')]
labels = [Input([None, 1], 'int64', name='label')]
model.prepare( model.prepare(
optim, optim,
loss_function=CrossEntropy(average=False), loss_function=hapi.loss.CrossEntropy(average=False),
metrics=Accuracy(), metrics=hapi.metrics.Accuracy())
inputs=inputs,
labels=labels)
model.fit(train_dataset, batch_size=64) model.fit(train_dataset, batch_size=64)
......
...@@ -22,7 +22,7 @@ import contextlib ...@@ -22,7 +22,7 @@ import contextlib
from paddle import fluid from paddle import fluid
from paddle.incubate.hapi.model import Model, Input, set_device from paddle.incubate.hapi import Model, Input, set_device
from paddle.incubate.hapi.loss import CrossEntropy from paddle.incubate.hapi.loss import CrossEntropy
from paddle.incubate.hapi.vision.models import LeNet from paddle.incubate.hapi.vision.models import LeNet
from paddle.incubate.hapi.metrics import Accuracy from paddle.incubate.hapi.metrics import Accuracy
...@@ -64,20 +64,19 @@ class TestDistTraning(unittest.TestCase): ...@@ -64,20 +64,19 @@ class TestDistTraning(unittest.TestCase):
im_shape = (-1, 1, 28, 28) im_shape = (-1, 1, 28, 28)
batch_size = 128 batch_size = 128
inputs = [Input(im_shape, 'float32', name='image')] inputs = [Input('image', im_shape, 'float32')]
labels = [Input([None, 1], 'int64', name='label')] labels = [Input('label', [None, 1], 'int64')]
model = Model(LeNet(), inputs, labels)
optim = fluid.optimizer.Momentum(
learning_rate=0.001, momentum=.9, parameter_list=model.parameters())
model.prepare(optim, CrossEntropy(), Accuracy())
train_dataset = MnistDataset(mode='train') train_dataset = MnistDataset(mode='train')
val_dataset = MnistDataset(mode='test') val_dataset = MnistDataset(mode='test')
test_dataset = MnistDataset(mode='test', return_label=False) test_dataset = MnistDataset(mode='test', return_label=False)
model = LeNet()
optim = fluid.optimizer.Momentum(
learning_rate=0.001, momentum=.9, parameter_list=model.parameters())
loss = CrossEntropy()
model.prepare(optim, loss, Accuracy(), inputs, labels, device=device)
cbk = ProgBarLogger(50) cbk = ProgBarLogger(50)
model.fit(train_dataset, model.fit(train_dataset,
val_dataset, val_dataset,
epochs=2, epochs=2,
......
...@@ -22,7 +22,7 @@ import contextlib ...@@ -22,7 +22,7 @@ import contextlib
from paddle import fluid from paddle import fluid
from paddle.incubate.hapi.model import Model, Input, set_device from paddle.incubate.hapi import Model, Input, set_device
from paddle.incubate.hapi.loss import CrossEntropy from paddle.incubate.hapi.loss import CrossEntropy
from paddle.incubate.hapi.vision.models import LeNet from paddle.incubate.hapi.vision.models import LeNet
from paddle.incubate.hapi.metrics import Accuracy from paddle.incubate.hapi.metrics import Accuracy
...@@ -63,20 +63,19 @@ class TestDistTraning(unittest.TestCase): ...@@ -63,20 +63,19 @@ class TestDistTraning(unittest.TestCase):
im_shape = (-1, 1, 28, 28) im_shape = (-1, 1, 28, 28)
batch_size = 128 batch_size = 128
inputs = [Input(im_shape, 'float32', name='image')] inputs = [Input('image', im_shape, 'float32')]
labels = [Input([None, 1], 'int64', name='label')] labels = [Input('label', [None, 1], 'int64')]
model = Model(LeNet(), inputs, labels)
optim = fluid.optimizer.Momentum(
learning_rate=0.001, momentum=.9, parameter_list=model.parameters())
model.prepare(optim, CrossEntropy(), Accuracy())
train_dataset = MnistDataset(mode='train') train_dataset = MnistDataset(mode='train')
val_dataset = MnistDataset(mode='test') val_dataset = MnistDataset(mode='test')
test_dataset = MnistDataset(mode='test', return_label=False) test_dataset = MnistDataset(mode='test', return_label=False)
model = LeNet()
optim = fluid.optimizer.Momentum(
learning_rate=0.001, momentum=.9, parameter_list=model.parameters())
loss = CrossEntropy()
model.prepare(optim, loss, Accuracy(), inputs, labels, device=device)
cbk = ProgBarLogger(50) cbk = ProgBarLogger(50)
model.fit(train_dataset, model.fit(train_dataset,
val_dataset, val_dataset,
epochs=2, epochs=2,
......
...@@ -18,7 +18,7 @@ import random ...@@ -18,7 +18,7 @@ import random
import tempfile import tempfile
import shutil import shutil
from paddle.incubate.hapi.model import Input from paddle.incubate.hapi.model import Model, Input
from paddle.incubate.hapi.vision.models import LeNet from paddle.incubate.hapi.vision.models import LeNet
from paddle.incubate.hapi.callbacks import config_callbacks from paddle.incubate.hapi.callbacks import config_callbacks
...@@ -36,9 +36,9 @@ class TestCallbacks(unittest.TestCase): ...@@ -36,9 +36,9 @@ class TestCallbacks(unittest.TestCase):
freq = 2 freq = 2
eval_steps = 20 eval_steps = 20
lenet = LeNet() inputs = [Input('image', [None, 1, 28, 28], 'float32')]
inputs = [Input([None, 1, 28, 28], 'float32', name='image')] lenet = Model(LeNet(), inputs)
lenet.prepare(inputs=inputs) lenet.prepare()
cbks = config_callbacks( cbks = config_callbacks(
model=lenet, model=lenet,
......
...@@ -26,7 +26,8 @@ from paddle import fluid ...@@ -26,7 +26,8 @@ from paddle import fluid
from paddle.nn import Conv2D, Pool2D, Linear, ReLU, Sequential from paddle.nn import Conv2D, Pool2D, Linear, ReLU, Sequential
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.incubate.hapi.model import Model, Input, set_device import paddle.incubate.hapi as hapi
from paddle.incubate.hapi import Model, Input
from paddle.incubate.hapi.loss import CrossEntropy from paddle.incubate.hapi.loss import CrossEntropy
from paddle.incubate.hapi.metrics import Accuracy from paddle.incubate.hapi.metrics import Accuracy
from paddle.incubate.hapi.datasets import MNIST from paddle.incubate.hapi.datasets import MNIST
...@@ -123,7 +124,7 @@ class TestModel(unittest.TestCase): ...@@ -123,7 +124,7 @@ class TestModel(unittest.TestCase):
def setUpClass(cls): def setUpClass(cls):
if not fluid.is_compiled_with_cuda(): if not fluid.is_compiled_with_cuda():
self.skipTest('module not tested when ONLY_CPU compling') self.skipTest('module not tested when ONLY_CPU compling')
cls.device = set_device('gpu') cls.device = hapi.set_device('gpu')
fluid.enable_dygraph(cls.device) fluid.enable_dygraph(cls.device)
sp_num = 1280 sp_num = 1280
...@@ -149,8 +150,8 @@ class TestModel(unittest.TestCase): ...@@ -149,8 +150,8 @@ class TestModel(unittest.TestCase):
cls.acc1 = dynamic_evaluate(dy_lenet, cls.val_loader) cls.acc1 = dynamic_evaluate(dy_lenet, cls.val_loader)
cls.inputs = [Input([-1, 1, 28, 28], 'float32', name='image')] cls.inputs = [Input('image', [-1, 1, 28, 28], 'float32')]
cls.labels = [Input([None, 1], 'int64', name='label')] cls.labels = [Input('label', [None, 1], 'int64')]
cls.save_dir = tempfile.mkdtemp() cls.save_dir = tempfile.mkdtemp()
cls.weight_path = os.path.join(cls.save_dir, 'lenet') cls.weight_path = os.path.join(cls.save_dir, 'lenet')
...@@ -189,15 +190,14 @@ class TestModel(unittest.TestCase): ...@@ -189,15 +190,14 @@ class TestModel(unittest.TestCase):
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
model = LeNet() net = LeNet()
optim_new = fluid.optimizer.Adam( optim_new = fluid.optimizer.Adam(
learning_rate=0.001, parameter_list=model.parameters()) learning_rate=0.001, parameter_list=net.parameters())
model = Model(net, inputs=self.inputs, labels=self.labels)
model.prepare( model.prepare(
optim_new, optim_new,
loss_function=CrossEntropy(average=False), loss_function=CrossEntropy(average=False),
metrics=Accuracy(), metrics=Accuracy())
inputs=self.inputs,
labels=self.labels)
model.fit(self.train_dataset, batch_size=64, shuffle=False) model.fit(self.train_dataset, batch_size=64, shuffle=False)
result = model.evaluate(self.val_dataset, batch_size=64) result = model.evaluate(self.val_dataset, batch_size=64)
...@@ -225,9 +225,8 @@ class TestModel(unittest.TestCase): ...@@ -225,9 +225,8 @@ class TestModel(unittest.TestCase):
def evaluate(self, dynamic): def evaluate(self, dynamic):
fluid.enable_dygraph(self.device) if dynamic else None fluid.enable_dygraph(self.device) if dynamic else None
model = LeNet() model = Model(LeNet(), self.inputs, self.labels)
model.prepare( model.prepare(metrics=Accuracy())
metrics=Accuracy(), inputs=self.inputs, labels=self.labels)
model.load(self.weight_path) model.load(self.weight_path)
result = model.evaluate(self.val_dataset, batch_size=64) result = model.evaluate(self.val_dataset, batch_size=64)
np.testing.assert_allclose(result['acc'], self.acc1) np.testing.assert_allclose(result['acc'], self.acc1)
...@@ -247,8 +246,8 @@ class TestModel(unittest.TestCase): ...@@ -247,8 +246,8 @@ class TestModel(unittest.TestCase):
def predict(self, dynamic): def predict(self, dynamic):
fluid.enable_dygraph(self.device) if dynamic else None fluid.enable_dygraph(self.device) if dynamic else None
model = LeNet() model = Model(LeNet(), self.inputs)
model.prepare(inputs=self.inputs) model.prepare()
model.load(self.weight_path) model.load(self.weight_path)
output = model.predict( output = model.predict(
self.test_dataset, batch_size=64, stack_outputs=True) self.test_dataset, batch_size=64, stack_outputs=True)
...@@ -271,7 +270,7 @@ class TestModel(unittest.TestCase): ...@@ -271,7 +270,7 @@ class TestModel(unittest.TestCase):
fluid.disable_dygraph() if dynamic else None fluid.disable_dygraph() if dynamic else None
class MyModel(Model): class MyModel(fluid.dygraph.Layer):
def __init__(self): def __init__(self):
super(MyModel, self).__init__() super(MyModel, self).__init__()
self._fc = Linear(20, 10, act='softmax') self._fc = Linear(20, 10, act='softmax')
...@@ -310,28 +309,24 @@ class TestModelFunction(unittest.TestCase): ...@@ -310,28 +309,24 @@ class TestModelFunction(unittest.TestCase):
ref = get_expect() ref = get_expect()
for dynamic in [True, False]: for dynamic in [True, False]:
device = set_device('cpu') device = hapi.set_device('cpu')
fluid.enable_dygraph(device) if dynamic else None fluid.enable_dygraph(device) if dynamic else None
self.set_seed() self.set_seed()
model = MyModel()
net = MyModel()
optim2 = fluid.optimizer.SGD(learning_rate=0.001, optim2 = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=model.parameters()) parameter_list=net.parameters())
inputs = [Input([None, dim], 'float32', name='x')] inputs = [Input('x', [None, dim], 'float32')]
labels = [Input([None, 1], 'int64', name='label')] labels = [Input('label', [None, 1], 'int64')]
model.prepare( model = Model(net, inputs, labels)
optim2, model.prepare(optim2, loss_function=CrossEntropy(average=False))
loss_function=CrossEntropy(average=False),
inputs=inputs,
labels=labels,
device=device)
loss, = model.train_batch([data], [label]) loss, = model.train_batch([data], [label])
np.testing.assert_allclose(loss.flatten(), ref.flatten()) np.testing.assert_allclose(loss.flatten(), ref.flatten())
fluid.disable_dygraph() if dynamic else None fluid.disable_dygraph() if dynamic else None
def test_test_batch(self, dynamic=True): def test_test_batch(self):
dim = 20 dim = 20
data = np.random.random(size=(4, dim)).astype(np.float32) data = np.random.random(size=(4, dim)).astype(np.float32)
...@@ -346,32 +341,31 @@ class TestModelFunction(unittest.TestCase): ...@@ -346,32 +341,31 @@ class TestModelFunction(unittest.TestCase):
ref = get_expect() ref = get_expect()
for dynamic in [True, False]: for dynamic in [True, False]:
device = set_device('cpu') device = hapi.set_device('cpu')
fluid.enable_dygraph(device) if dynamic else None fluid.enable_dygraph(device) if dynamic else None
self.set_seed() self.set_seed()
model = MyModel() net = MyModel()
inputs = [Input([None, dim], 'float32', name='x')] inputs = [Input('x', [None, dim], 'float32')]
model.prepare(inputs=inputs, device=device) model = Model(net, inputs)
model.prepare()
out, = model.test_batch([data]) out, = model.test_batch([data])
np.testing.assert_allclose(out, ref) np.testing.assert_allclose(out, ref, rtol=1e-6)
fluid.disable_dygraph() if dynamic else None fluid.disable_dygraph() if dynamic else None
def test_save_load(self): def test_save_load(self):
path = tempfile.mkdtemp() path = tempfile.mkdtemp()
for dynamic in [True, False]: for dynamic in [True, False]:
device = set_device('cpu') device = hapi.set_device('cpu')
fluid.enable_dygraph(device) if dynamic else None fluid.enable_dygraph(device) if dynamic else None
model = MyModel() net = MyModel()
inputs = [Input([None, 20], 'float32', name='x')] inputs = [Input('x', [None, 20], 'float32')]
labels = [Input([None, 1], 'int64', name='label')] labels = [Input('label', [None, 1], 'int64')]
optim = fluid.optimizer.SGD(learning_rate=0.001, optim = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=model.parameters()) parameter_list=net.parameters())
model = Model(net, inputs, labels)
model.prepare( model.prepare(
inputs=inputs, optimizer=optim, loss_function=CrossEntropy(average=False))
optimizer=optim,
loss_function=CrossEntropy(average=False),
labels=labels)
model.save(path + '/test') model.save(path + '/test')
model.load(path + '/test') model.load(path + '/test')
shutil.rmtree(path) shutil.rmtree(path)
...@@ -379,82 +373,73 @@ class TestModelFunction(unittest.TestCase): ...@@ -379,82 +373,73 @@ class TestModelFunction(unittest.TestCase):
def test_dynamic_save_static_load(self): def test_dynamic_save_static_load(self):
path = tempfile.mkdtemp() path = tempfile.mkdtemp()
# for dynamic in [True, False]: # dynamic saving
device = set_device('cpu') device = hapi.set_device('cpu')
fluid.enable_dygraph(device) #if dynamic else None fluid.enable_dygraph(device)
model = MyModel() model = Model(MyModel())
inputs = [Input([None, 20], 'float32', name='x')]
labels = [Input([None, 1], 'int64', name='label')]
optim = fluid.optimizer.SGD(learning_rate=0.001, optim = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=model.parameters()) parameter_list=model.parameters())
model.prepare( model.prepare(
inputs=inputs, optimizer=optim, loss_function=CrossEntropy(average=False))
optimizer=optim,
loss_function=CrossEntropy(average=False),
labels=labels)
model.save(path + '/test') model.save(path + '/test')
fluid.disable_dygraph() fluid.disable_dygraph()
model = MyModel()
inputs = [Input([None, 20], 'float32', name='x')] inputs = [Input('x', [None, 20], 'float32')]
labels = [Input([None, 1], 'int64', name='label')] labels = [Input('label', [None, 1], 'int64')]
model = Model(MyModel(), inputs, labels)
optim = fluid.optimizer.SGD(learning_rate=0.001, optim = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=model.parameters()) parameter_list=model.parameters())
model.prepare( model.prepare(
inputs=inputs, optimizer=optim, loss_function=CrossEntropy(average=False))
optimizer=optim,
loss_function=CrossEntropy(average=False),
labels=labels)
model.load(path + '/test') model.load(path + '/test')
shutil.rmtree(path) shutil.rmtree(path)
def test_static_save_dynamic_load(self): def test_static_save_dynamic_load(self):
path = tempfile.mkdtemp() path = tempfile.mkdtemp()
model = MyModel() net = MyModel()
inputs = [Input([None, 20], 'float32', name='x')] inputs = [Input('x', [None, 20], 'float32')]
labels = [Input([None, 1], 'int64', name='label')] labels = [Input('label', [None, 1], 'int64')]
optim = fluid.optimizer.SGD(learning_rate=0.001, optim = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=model.parameters()) parameter_list=net.parameters())
model = Model(net, inputs, labels)
model.prepare( model.prepare(
inputs=inputs, optimizer=optim, loss_function=CrossEntropy(average=False))
optimizer=optim,
loss_function=CrossEntropy(average=False),
labels=labels)
model.save(path + '/test') model.save(path + '/test')
device = set_device('cpu') device = hapi.set_device('cpu')
fluid.enable_dygraph(device) #if dynamic else None fluid.enable_dygraph(device) #if dynamic else None
model = MyModel() net = MyModel()
inputs = [Input([None, 20], 'float32', name='x')] inputs = [Input('x', [None, 20], 'float32')]
labels = [Input([None, 1], 'int64', name='label')] labels = [Input('label', [None, 1], 'int64')]
optim = fluid.optimizer.SGD(learning_rate=0.001, optim = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=model.parameters()) parameter_list=net.parameters())
model = Model(net, inputs, labels)
model.prepare( model.prepare(
inputs=inputs, optimizer=optim, loss_function=CrossEntropy(average=False))
optimizer=optim,
loss_function=CrossEntropy(average=False),
labels=labels)
model.load(path + '/test') model.load(path + '/test')
shutil.rmtree(path) shutil.rmtree(path)
fluid.disable_dygraph() fluid.disable_dygraph()
def test_parameters(self): def test_parameters(self):
for dynamic in [True, False]: for dynamic in [True, False]:
device = set_device('cpu') device = hapi.set_device('cpu')
fluid.enable_dygraph(device) if dynamic else None fluid.enable_dygraph(device) if dynamic else None
model = MyModel() net = MyModel()
inputs = [Input([None, 20], 'float32', name='x')] inputs = [Input('x', [None, 20], 'float32')]
model.prepare(inputs=inputs) model = Model(net, inputs)
model.prepare()
params = model.parameters() params = model.parameters()
self.assertTrue(params[0].shape[0] == 20) self.assertTrue(params[0].shape[0] == 20)
self.assertTrue(params[0].shape[1] == 10) self.assertTrue(params[0].shape[1] == 10)
fluid.disable_dygraph() if dynamic else None fluid.disable_dygraph() if dynamic else None
def test_export_deploy_model(self): def test_export_deploy_model(self):
model = LeNet() net = LeNet()
inputs = [Input([-1, 1, 28, 28], 'float32', name='image')] inputs = [Input('image', [-1, 1, 28, 28], 'float32')]
model.prepare(inputs=inputs) model = Model(net, inputs)
model.prepare()
save_dir = tempfile.mkdtemp() save_dir = tempfile.mkdtemp()
if not os.path.exists(save_dir): if not os.path.exists(save_dir):
os.makedirs(save_dir) os.makedirs(save_dir)
...@@ -476,7 +461,7 @@ class TestModelFunction(unittest.TestCase): ...@@ -476,7 +461,7 @@ class TestModelFunction(unittest.TestCase):
feed={feed_target_names[0]: tensor_img}, feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets) fetch_list=fetch_targets)
np.testing.assert_allclose(results, ori_results) np.testing.assert_allclose(results, ori_results, rtol=1e-6)
shutil.rmtree(save_dir) shutil.rmtree(save_dir)
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.incubate.hapi.vision.models as models
from paddle.incubate.hapi import Model, Input
# test the predicted resutls of static graph and dynamic graph are equal
# when used pretrained model
class TestPretrainedModel(unittest.TestCase):
def infer(self, x, arch, dygraph=True):
if dygraph:
fluid.enable_dygraph()
net = models.__dict__[arch](pretrained=True, classifier_activation=None)
inputs = [Input('image', [None, 3, 224, 224], 'float32')]
model = Model(network=net, inputs=inputs)
model.prepare()
res = model.test_batch(x)
if dygraph:
fluid.disable_dygraph()
return res
def test_models(self):
arches = ['mobilenet_v1', 'mobilenet_v2', 'resnet18']
for arch in arches:
x = np.array(np.random.random((2, 3, 224, 224)), dtype=np.float32)
y_dygraph = self.infer(x, arch)
y_static = self.infer(x, arch, dygraph=False)
np.testing.assert_allclose(y_dygraph, y_static)
if __name__ == '__main__':
unittest.main()
...@@ -23,7 +23,7 @@ import numpy as np ...@@ -23,7 +23,7 @@ import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph import Embedding, Linear, Layer from paddle.fluid.dygraph import Embedding, Linear, Layer
from paddle.fluid.layers import BeamSearchDecoder from paddle.fluid.layers import BeamSearchDecoder
from paddle.incubate.hapi.model import Model, Input, set_device from paddle.incubate.hapi import Model, Input, set_device
from paddle.incubate.hapi.text import * from paddle.incubate.hapi.text import *
...@@ -36,7 +36,7 @@ class ModuleApiTest(unittest.TestCase): ...@@ -36,7 +36,7 @@ class ModuleApiTest(unittest.TestCase):
np.random.seed(cls._random_seed) np.random.seed(cls._random_seed)
random.seed(cls._random_seed) random.seed(cls._random_seed)
cls.model_cls = type(cls.__name__ + "Model", (Model, ), { cls.model_cls = type(cls.__name__ + "Model", (Layer, ), {
"__init__": cls.model_init_wrapper(cls.model_init), "__init__": cls.model_init_wrapper(cls.model_init),
"forward": cls.model_forward "forward": cls.model_forward
}) })
...@@ -49,7 +49,7 @@ class ModuleApiTest(unittest.TestCase): ...@@ -49,7 +49,7 @@ class ModuleApiTest(unittest.TestCase):
@staticmethod @staticmethod
def model_init_wrapper(func): def model_init_wrapper(func):
def __impl__(self, *args, **kwargs): def __impl__(self, *args, **kwargs):
Model.__init__(self) Layer.__init__(self)
func(self, *args, **kwargs) func(self, *args, **kwargs)
return __impl__ return __impl__
...@@ -89,9 +89,10 @@ class ModuleApiTest(unittest.TestCase): ...@@ -89,9 +89,10 @@ class ModuleApiTest(unittest.TestCase):
fluid.disable_dygraph() fluid.disable_dygraph()
fluid.default_main_program().random_seed = self._random_seed fluid.default_main_program().random_seed = self._random_seed
fluid.default_startup_program().random_seed = self._random_seed fluid.default_startup_program().random_seed = self._random_seed
model = self.model_cls(**self.attrs) if isinstance( layer = self.model_cls(**self.attrs) if isinstance(
self.attrs, dict) else self.model_cls(*self.attrs) self.attrs, dict) else self.model_cls(*self.attrs)
model.prepare(inputs=self.make_inputs(), device=place) model = Model(layer, inputs=self.make_inputs())
model.prepare()
if self.param_states: if self.param_states:
model.load(self.param_states, optim_state=None) model.load(self.param_states, optim_state=None)
return model.test_batch(self.inputs) return model.test_batch(self.inputs)
...@@ -141,10 +142,7 @@ class TestBasicLSTM(ModuleApiTest): ...@@ -141,10 +142,7 @@ class TestBasicLSTM(ModuleApiTest):
def make_inputs(self): def make_inputs(self):
inputs = [ inputs = [
Input( Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"),
[None, None, self.inputs[-1].shape[-1]],
"float32",
name="input"),
] ]
return inputs return inputs
...@@ -170,10 +168,7 @@ class TestBasicGRU(ModuleApiTest): ...@@ -170,10 +168,7 @@ class TestBasicGRU(ModuleApiTest):
def make_inputs(self): def make_inputs(self):
inputs = [ inputs = [
Input( Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"),
[None, None, self.inputs[-1].shape[-1]],
"float32",
name="input"),
] ]
return inputs return inputs
...@@ -224,11 +219,8 @@ class TestBeamSearch(ModuleApiTest): ...@@ -224,11 +219,8 @@ class TestBeamSearch(ModuleApiTest):
def make_inputs(self): def make_inputs(self):
inputs = [ inputs = [
Input( Input("init_hidden", [None, self.inputs[0].shape[-1]], "float32"),
[None, self.inputs[0].shape[-1]], "float32", Input("init_cell", [None, self.inputs[1].shape[-1]], "float32"),
name="init_hidden"),
Input(
[None, self.inputs[1].shape[-1]], "float32", name="init_cell"),
] ]
return inputs return inputs
...@@ -280,14 +272,10 @@ class TestTransformerEncoder(ModuleApiTest): ...@@ -280,14 +272,10 @@ class TestTransformerEncoder(ModuleApiTest):
def make_inputs(self): def make_inputs(self):
inputs = [ inputs = [
Input( Input("enc_input", [None, None, self.inputs[0].shape[-1]],
[None, None, self.inputs[0].shape[-1]], "float32"),
"float32", Input("attn_bias", [None, self.inputs[1].shape[1], None, None],
name="enc_input"), "float32"),
Input(
[None, self.inputs[1].shape[1], None, None],
"float32",
name="attn_bias"),
] ]
return inputs return inputs
...@@ -348,22 +336,14 @@ class TestTransformerDecoder(TestTransformerEncoder): ...@@ -348,22 +336,14 @@ class TestTransformerDecoder(TestTransformerEncoder):
def make_inputs(self): def make_inputs(self):
inputs = [ inputs = [
Input( Input("dec_input", [None, None, self.inputs[0].shape[-1]],
[None, None, self.inputs[0].shape[-1]], "float32"),
"float32", Input("enc_output", [None, None, self.inputs[0].shape[-1]],
name="dec_input"), "float32"),
Input( Input("self_attn_bias",
[None, None, self.inputs[0].shape[-1]], [None, self.inputs[-1].shape[1], None, None], "float32"),
"float32", Input("cross_attn_bias",
name="enc_output"), [None, self.inputs[-1].shape[1], None, None], "float32"),
Input(
[None, self.inputs[-1].shape[1], None, None],
"float32",
name="self_attn_bias"),
Input(
[None, self.inputs[-1].shape[1], None, None],
"float32",
name="cross_attn_bias"),
] ]
return inputs return inputs
...@@ -451,14 +431,10 @@ class TestTransformerBeamSearchDecoder(ModuleApiTest): ...@@ -451,14 +431,10 @@ class TestTransformerBeamSearchDecoder(ModuleApiTest):
def make_inputs(self): def make_inputs(self):
inputs = [ inputs = [
Input( Input("enc_output", [None, None, self.inputs[0].shape[-1]],
[None, None, self.inputs[0].shape[-1]], "float32"),
"float32", Input("trg_src_attn_bias",
name="enc_output"), [None, self.inputs[1].shape[1], None, None], "float32"),
Input(
[None, self.inputs[1].shape[1], None, None],
"float32",
name="trg_src_attn_bias"),
] ]
return inputs return inputs
...@@ -497,12 +473,9 @@ class TestSequenceTagging(ModuleApiTest): ...@@ -497,12 +473,9 @@ class TestSequenceTagging(ModuleApiTest):
def make_inputs(self): def make_inputs(self):
inputs = [ inputs = [
Input( Input("word", [None, None], "int64"),
[None, None], "int64", name="word"), Input("lengths", [None], "int64"),
Input( Input("target", [None, None], "int64"),
[None], "int64", name="lengths"),
Input(
[None, None], "int64", name="target"),
] ]
return inputs return inputs
...@@ -544,10 +517,7 @@ class TestStackedRNN(ModuleApiTest): ...@@ -544,10 +517,7 @@ class TestStackedRNN(ModuleApiTest):
def make_inputs(self): def make_inputs(self):
inputs = [ inputs = [
Input( Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"),
[None, None, self.inputs[-1].shape[-1]],
"float32",
name="input"),
] ]
return inputs return inputs
...@@ -573,10 +543,7 @@ class TestLSTM(ModuleApiTest): ...@@ -573,10 +543,7 @@ class TestLSTM(ModuleApiTest):
def make_inputs(self): def make_inputs(self):
inputs = [ inputs = [
Input( Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"),
[None, None, self.inputs[-1].shape[-1]],
"float32",
name="input"),
] ]
return inputs return inputs
...@@ -612,10 +579,7 @@ class TestBiLSTM(ModuleApiTest): ...@@ -612,10 +579,7 @@ class TestBiLSTM(ModuleApiTest):
def make_inputs(self): def make_inputs(self):
inputs = [ inputs = [
Input( Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"),
[None, None, self.inputs[-1].shape[-1]],
"float32",
name="input"),
] ]
return inputs return inputs
...@@ -645,10 +609,7 @@ class TestGRU(ModuleApiTest): ...@@ -645,10 +609,7 @@ class TestGRU(ModuleApiTest):
def make_inputs(self): def make_inputs(self):
inputs = [ inputs = [
Input( Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"),
[None, None, self.inputs[-1].shape[-1]],
"float32",
name="input"),
] ]
return inputs return inputs
...@@ -684,10 +645,7 @@ class TestBiGRU(ModuleApiTest): ...@@ -684,10 +645,7 @@ class TestBiGRU(ModuleApiTest):
def make_inputs(self): def make_inputs(self):
inputs = [ inputs = [
Input( Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"),
[None, None, self.inputs[-1].shape[-1]],
"float32",
name="input"),
] ]
return inputs return inputs
...@@ -722,9 +680,7 @@ class TestCNNEncoder(ModuleApiTest): ...@@ -722,9 +680,7 @@ class TestCNNEncoder(ModuleApiTest):
def make_inputs(self): def make_inputs(self):
inputs = [ inputs = [
Input( Input("input", [None, self.inputs[-1].shape[1], None], "float32"),
[None, self.inputs[-1].shape[1], None], "float32",
name="input"),
] ]
return inputs return inputs
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
import numpy as np import numpy as np
import paddle.incubate.hapi.vision.models as models import paddle.incubate.hapi.vision.models as models
from paddle.incubate.hapi.model import Input import paddle.incubate.hapi as hapi
class TestVisonModels(unittest.TestCase): class TestVisonModels(unittest.TestCase):
...@@ -24,13 +24,13 @@ class TestVisonModels(unittest.TestCase): ...@@ -24,13 +24,13 @@ class TestVisonModels(unittest.TestCase):
x = np.array(np.random.random((2, 3, 224, 224)), dtype=np.float32) x = np.array(np.random.random((2, 3, 224, 224)), dtype=np.float32)
if batch_norm: if batch_norm:
model = models.__dict__[arch](pretrained=pretrained, net = models.__dict__[arch](pretrained=pretrained, batch_norm=True)
batch_norm=True)
else: else:
model = models.__dict__[arch](pretrained=pretrained) net = models.__dict__[arch](pretrained=pretrained)
inputs = [Input([None, 3, 224, 224], 'float32', name='image')]
model.prepare(inputs=inputs) input = hapi.Input('image', [None, 3, 224, 224], 'float32')
model = hapi.Model(net, input)
model.prepare()
model.test_batch(x) model.test_batch(x)
...@@ -71,10 +71,9 @@ class TestVisonModels(unittest.TestCase): ...@@ -71,10 +71,9 @@ class TestVisonModels(unittest.TestCase):
self.models_infer('resnet152') self.models_infer('resnet152')
def test_lenet(self): def test_lenet(self):
lenet = models.__dict__['LeNet']() input = hapi.Input('x', [None, 1, 28, 28], 'float32')
lenet = hapi.Model(models.__dict__['LeNet'](), input)
inputs = [Input([None, 1, 28, 28], 'float32', name='x')] lenet.prepare()
lenet.prepare(inputs=inputs)
x = np.array(np.random.random((2, 1, 28, 28)), dtype=np.float32) x = np.array(np.random.random((2, 1, 28, 28)), dtype=np.float32)
lenet.test_batch(x) lenet.test_batch(x)
......
...@@ -15,12 +15,10 @@ ...@@ -15,12 +15,10 @@
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.nn import Conv2D, Pool2D, Linear, ReLU, Sequential from paddle.nn import Conv2D, Pool2D, Linear, ReLU, Sequential
from ...model import Model
__all__ = ['LeNet'] __all__ = ['LeNet']
class LeNet(Model): class LeNet(fluid.dygraph.Layer):
"""LeNet model from """LeNet model from
`"LeCun Y, Bottou L, Bengio Y, et al. Gradient-based learning applied to document recognition[J]. Proceedings of the IEEE, 1998, 86(11): 2278-2324.`_ `"LeCun Y, Bottou L, Bengio Y, et al. Gradient-based learning applied to document recognition[J]. Proceedings of the IEEE, 1998, 86(11): 2278-2324.`_
......
...@@ -17,7 +17,6 @@ from paddle.fluid.initializer import MSRA ...@@ -17,7 +17,6 @@ from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from ...model import Model
from ...download import get_weights_path_from_url from ...download import get_weights_path_from_url
__all__ = ['MobileNetV1', 'mobilenet_v1'] __all__ = ['MobileNetV1', 'mobilenet_v1']
...@@ -103,7 +102,7 @@ class DepthwiseSeparable(fluid.dygraph.Layer): ...@@ -103,7 +102,7 @@ class DepthwiseSeparable(fluid.dygraph.Layer):
return y return y
class MobileNetV1(Model): class MobileNetV1(fluid.dygraph.Layer):
"""MobileNetV1 model from """MobileNetV1 model from
`"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications" <https://arxiv.org/abs/1704.04861>`_. `"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications" <https://arxiv.org/abs/1704.04861>`_.
...@@ -276,7 +275,8 @@ def _mobilenet(arch, pretrained=False, **kwargs): ...@@ -276,7 +275,8 @@ def _mobilenet(arch, pretrained=False, **kwargs):
model_urls[arch][1]) model_urls[arch][1])
assert weight_path.endswith( assert weight_path.endswith(
'.pdparams'), "suffix of weight must be .pdparams" '.pdparams'), "suffix of weight must be .pdparams"
model.load(weight_path) param, _ = fluid.load_dygraph(weight_path)
model.load_dict(param)
return model return model
......
...@@ -18,7 +18,6 @@ import paddle.fluid as fluid ...@@ -18,7 +18,6 @@ import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from ...model import Model
from ...download import get_weights_path_from_url from ...download import get_weights_path_from_url
__all__ = ['MobileNetV2', 'mobilenet_v2'] __all__ = ['MobileNetV2', 'mobilenet_v2']
...@@ -150,7 +149,7 @@ class InvresiBlocks(fluid.dygraph.Layer): ...@@ -150,7 +149,7 @@ class InvresiBlocks(fluid.dygraph.Layer):
return y return y
class MobileNetV2(Model): class MobileNetV2(fluid.dygraph.Layer):
"""MobileNetV2 model from """MobileNetV2 model from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
...@@ -252,7 +251,8 @@ def _mobilenet(arch, pretrained=False, **kwargs): ...@@ -252,7 +251,8 @@ def _mobilenet(arch, pretrained=False, **kwargs):
model_urls[arch][1]) model_urls[arch][1])
assert weight_path.endswith( assert weight_path.endswith(
'.pdparams'), "suffix of weight must be .pdparams" '.pdparams'), "suffix of weight must be .pdparams"
model.load(weight_path) param, _ = fluid.load_dygraph(weight_path)
model.load_dict(param)
return model return model
......
...@@ -21,7 +21,6 @@ import paddle.fluid as fluid ...@@ -21,7 +21,6 @@ import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from paddle.fluid.dygraph.container import Sequential from paddle.fluid.dygraph.container import Sequential
from ...model import Model
from ...download import get_weights_path_from_url from ...download import get_weights_path_from_url
__all__ = [ __all__ = [
...@@ -166,7 +165,7 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -166,7 +165,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
return fluid.layers.relu(x) return fluid.layers.relu(x)
class ResNet(Model): class ResNet(fluid.dygraph.Layer):
"""ResNet model from """ResNet model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
...@@ -278,7 +277,9 @@ def _resnet(arch, Block, depth, pretrained, **kwargs): ...@@ -278,7 +277,9 @@ def _resnet(arch, Block, depth, pretrained, **kwargs):
model_urls[arch][1]) model_urls[arch][1])
assert weight_path.endswith( assert weight_path.endswith(
'.pdparams'), "suffix of weight must be .pdparams" '.pdparams'), "suffix of weight must be .pdparams"
model.load(weight_path) param, _ = fluid.load_dygraph(weight_path)
model.set_dict(param)
return model return model
......
...@@ -16,7 +16,6 @@ import paddle.fluid as fluid ...@@ -16,7 +16,6 @@ import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from paddle.fluid.dygraph.container import Sequential from paddle.fluid.dygraph.container import Sequential
from ...model import Model
from ...download import get_weights_path_from_url from ...download import get_weights_path_from_url
__all__ = [ __all__ = [
...@@ -51,7 +50,7 @@ class Classifier(fluid.dygraph.Layer): ...@@ -51,7 +50,7 @@ class Classifier(fluid.dygraph.Layer):
return out return out
class VGG(Model): class VGG(fluid.dygraph.Layer):
"""VGG model from """VGG model from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
...@@ -144,7 +143,8 @@ def _vgg(arch, cfg, batch_norm, pretrained, **kwargs): ...@@ -144,7 +143,8 @@ def _vgg(arch, cfg, batch_norm, pretrained, **kwargs):
model_urls[arch][1]) model_urls[arch][1])
assert weight_path.endswith( assert weight_path.endswith(
'.pdparams'), "suffix of weight must be .pdparams" '.pdparams'), "suffix of weight must be .pdparams"
model.load(weight_path) param, _ = fluid.load_dygraph(weight_path)
model.load_dict(param)
return model return model
......
...@@ -130,7 +130,7 @@ class BatchCompose(object): ...@@ -130,7 +130,7 @@ class BatchCompose(object):
import numpy as np import numpy as np
from paddle.io import DataLoader from paddle.io import DataLoader
from paddle.incubate.hapi.model import set_device from paddle.incubate.hapi import set_device
from paddle.incubate.hapi.datasets import Flowers from paddle.incubate.hapi.datasets import Flowers
from paddle.incubate.hapi.vision.transforms import Compose, BatchCompose, Resize from paddle.incubate.hapi.vision.transforms import Compose, BatchCompose, Resize
......
...@@ -108,6 +108,11 @@ ...@@ -108,6 +108,11 @@
"Metric.accumulate", "Metric.accumulate",
"Metric.name", "Metric.name",
"Metric.add_metric_op", "Metric.add_metric_op",
"Accuracy.reset",
"Accuracy.update",
"Accuracy.accumulate",
"Accuracy.name",
"Accuracy.add_metric_op",
"Callback.set_params", "Callback.set_params",
"Callback.on_train_begin", "Callback.on_train_begin",
"Callback.on_train_end", "Callback.on_train_end",
...@@ -122,7 +127,8 @@ ...@@ -122,7 +127,8 @@
"Callback.on_eval_batch_begin", "Callback.on_eval_batch_begin",
"Callback.on_eval_batch_end", "Callback.on_eval_batch_end",
"Callback.on_test_batch_begin", "Callback.on_test_batch_begin",
"Callback.on_test_batch_end" "Callback.on_test_batch_end",
"Model.prepare"
], ],
"wlist_no_op_pass":[ "wlist_no_op_pass":[
"gelu", "gelu",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册