diff --git a/hapi/loss.py b/hapi/loss.py index 7e4843b2ced3f31aee6b20cecd7f96ec3c823a05..7abddf22f1519b6bd1a649f663b22f315366ca7a 100644 --- a/hapi/loss.py +++ b/hapi/loss.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,15 +20,9 @@ from paddle import fluid from paddle.fluid.framework import in_dygraph_mode, Variable from paddle.fluid.dygraph.base import to_variable -__all__ = ['Loss', 'CrossEntropy', 'SoftmaxWithCrossEntropy'] - +from hapi.utils import to_list -def to_list(value): - if value is None: - return value - if isinstance(value, (list, tuple)): - return list(value) - return [value] +__all__ = ['Loss', 'CrossEntropy', 'SoftmaxWithCrossEntropy'] class Loss(object): diff --git a/hapi/model.py b/hapi/model.py index 481d038f3868931eba0f87bf249015da79f76680..8c1c5216287b26645ca2d06178cb3b5176e7ab31 100644 --- a/hapi/model.py +++ b/hapi/model.py @@ -38,6 +38,7 @@ from hapi.loss import Loss from hapi.distributed import DistributedBatchSampler, _all_gather, prepare_distributed_context, _parallel_context_initialized from hapi.metrics import Metric from hapi.callbacks import config_callbacks +from hapi.utils import to_list, to_numpy, flatten_list, restore_flatten_list __all__ = [ 'Model', @@ -65,49 +66,6 @@ def set_device(device): return place -def to_list(value): - if value is None: - return value - if isinstance(value, (list, tuple)): - return list(value) - return [value] - - -def to_numpy(var): - assert isinstance(var, (Variable, fluid.core.VarBase)), "not a variable" - if isinstance(var, fluid.core.VarBase): - return var.numpy() - t = global_scope().find_var(var.name).get_tensor() - return np.array(t) - - -def flatten_list(l): - assert isinstance(l, list), "not a list" - outl = [] - splits = [] - for sl in l: - assert isinstance(sl, list), "sub content not a list" - splits.append(len(sl)) - outl += sl - return outl, splits - - -def restore_flatten_list(l, splits): - outl = [] - for split in splits: - assert len(l) >= split, "list length invalid" - sl, l = l[:split], l[split:] - outl.append(sl) - return outl - - -def extract_args(func): - if hasattr(inspect, 'getfullargspec'): - return inspect.getfullargspec(func)[0] - else: - return inspect.getargspec(func)[0] - - class Input(fluid.dygraph.Layer): def __init__(self, shape=None, dtype=None, name=None): super(Input, self).__init__() @@ -1180,7 +1138,6 @@ class Model(fluid.dygraph.Layer): save_dir, model_filename=None, params_filename=None, - export_for_deployment=True, program_only=False): """ Save inference model must in static mode. @@ -1193,12 +1150,6 @@ class Model(fluid.dygraph.Layer): params_filename(str|None): The name of file to save all related parameters. If it is set None, parameters will be saved in separate files . - export_for_deployment(bool): If True, programs are modified to only support - direct inference deployment. Otherwise, - more information will be stored for flexible - optimization and re-training. Currently, only - True is supported. - Default: True. program_only(bool): If True, It will save inference program only, and do not save params of Program. Default: False. @@ -1226,7 +1177,6 @@ class Model(fluid.dygraph.Layer): main_program=infer_prog, model_filename=model_filename, params_filename=params_filename, - export_for_deployment=export_for_deployment, program_only=program_only) def _run_one_epoch(self, diff --git a/hapi/utils.py b/hapi/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..de928945dc68a1800c3cd9b14aaa0659e50c9945 --- /dev/null +++ b/hapi/utils.py @@ -0,0 +1,63 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import numpy as np + +from paddle import fluid +from paddle.fluid.framework import Variable +from paddle.fluid.executor import global_scope + + +def to_list(value): + if value is None: + return value + if isinstance(value, (list, tuple)): + return list(value) + return [value] + + +def to_numpy(var): + assert isinstance(var, (Variable, fluid.core.VarBase)), "not a variable" + if isinstance(var, fluid.core.VarBase): + return var.numpy() + t = global_scope().find_var(var.name).get_tensor() + return np.array(t) + + +def flatten_list(l): + assert isinstance(l, list), "not a list" + outl = [] + splits = [] + for sl in l: + assert isinstance(sl, list), "sub content not a list" + splits.append(len(sl)) + outl += sl + return outl, splits + + +def restore_flatten_list(l, splits): + outl = [] + for split in splits: + assert len(l) >= split, "list length invalid" + sl, l = l[:split], l[split:] + outl.append(sl) + return outl + + +def extract_args(func): + if hasattr(inspect, 'getfullargspec'): + return inspect.getfullargspec(func)[0] + else: + return inspect.getargspec(func)[0] \ No newline at end of file