未验证 提交 2b6d0049 编写于 作者: H hong 提交者: GitHub

Api move 20a (#24559)

* add base name alias; test=develop

* fix embedding bug; test=develop

* fix io error; test=develop

* add metric; test=develop

* add import palce; test=develop

* add setup; test=develop

* fix incubate reader; test=develop

* fix initializer error; test=develop
上级 ca29abc8
......@@ -38,6 +38,8 @@ import paddle.tensor
import paddle.nn
import paddle.framework
import paddle.imperative
import paddle.optimizer
import paddle.metric
import paddle.incubate.complex as complex
# TODO: define alias in tensor and framework directory
......@@ -194,20 +196,42 @@ from .tensor.search import index_select #DEFINE_ALIAS
from .tensor.search import nonzero #DEFINE_ALIAS
from .tensor.search import sort #DEFINE_ALIAS
from .framework.random import manual_seed #DEFINE_ALIAS
from .framework import append_backward #DEFINE_ALIAS
from .framework import gradients #DEFINE_ALIAS
from .framework import Executor #DEFINE_ALIAS
from .framework import global_scope #DEFINE_ALIAS
from .framework import scope_guard #DEFINE_ALIAS
from .framework import BuildStrategy #DEFINE_ALIAS
from .framework import CompiledProgram #DEFINE_ALIAS
from .framework import default_main_program #DEFINE_ALIAS
from .framework import default_startup_program #DEFINE_ALIAS
from .framework import create_global_var #DEFINE_ALIAS
from .framework import create_parameter #DEFINE_ALIAS
from .framework import Print #DEFINE_ALIAS
from .framework import py_func #DEFINE_ALIAS
from .framework import ExecutionStrategy #DEFINE_ALIAS
from .framework import name_scope #DEFINE_ALIAS
from .framework import ParallelExecutor #DEFINE_ALIAS
from .framework import ParamAttr #DEFINE_ALIAS
from .framework import Program #DEFINE_ALIAS
from .framework import program_guard #DEFINE_ALIAS
from .framework import Variable #DEFINE_ALIAS
from .framework import WeightNormParamAttr #DEFINE_ALIAS
from .framework import CPUPlace #DEFINE_ALIAS
from .framework import CUDAPlace #DEFINE_ALIAS
from .framework import CUDAPinnedPlace #DEFINE_ALIAS
from .tensor.search import index_sample #DEFINE_ALIAS
from .tensor.stat import mean #DEFINE_ALIAS
from .tensor.stat import reduce_mean #DEFINE_ALIAS
from .tensor.stat import std #DEFINE_ALIAS
from .tensor.stat import var #DEFINE_ALIAS
from .fluid.data import data
# from .tensor.tensor import Tensor #DEFINE_ALIAS
# from .tensor.tensor import LoDTensor #DEFINE_ALIAS
# from .tensor.tensor import LoDTensorArray #DEFINE_ALIAS
from . import incubate
from .incubate import hapi
from .fluid.dygraph.base import enable_dygraph #DEFINE_ALIAS
from .fluid.dygraph.base import disable_dygraph #DEFINE_ALIAS
from .fluid.framework import in_dygraph_mode #DEFINE_ALIAS
enable_imperative = enable_dygraph #DEFINE_ALIAS
disable_imperative = disable_dygraph #DEFINE_ALIAS
in_imperative_mode = in_dygraph_mode
from .fluid.dygraph.base import enable_dygraph as enable_imperative #DEFINE_ALIAS
from .fluid.dygraph.base import disable_dygraph as disable_imperative #DEFINE_ALIAS
from .fluid.framework import in_dygraph_mode as in_imperative_mode #DEFINE_ALIAS
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'fc',
'batch_norm',
'embedding',
'bilinear_tensor_product'
'conv2d'
'conv2d_transpose'
'conv3d'
'conv3d_transpose'
'create_parameter'
'crf_decoding'
'data_norm'
'deformable_conv'
'group_norm'
'hsigmoid'
'instance_norm'
'layer_norm'
'multi_box_head'
'nce'
'prelu'
'row_conv'
'spectral_norm',
]
from ..fluid.layers import fc, batch_norm, bilinear_tensor_product, \
conv2d, conv2d_transpose, conv3d, conv3d_transpose, create_parameter, \
crf_decoding, data_norm, deformable_conv, group_norm, hsigmoid, instance_norm, \
layer_norm, multi_box_head, nce, prelu, row_conv, spectral_norm
from ..fluid.input import embedding
......@@ -37,7 +37,7 @@ class TestImperativeContainer(unittest.TestCase):
[fluid.dygraph.Linear(2**i, 2**(i + 1)) for i in range(6)])
def paddle_imperative_list(self):
return paddle.imperative.LayerList(
return paddle.nn.LayerList(
[fluid.dygraph.Linear(2**i, 2**(i + 1)) for i in range(6)])
def layer_list(self, use_fluid_api):
......
......@@ -35,7 +35,7 @@ class MyLayer(fluid.Layer):
shape=[2, 2], dtype='float32')] * num_stacked_param)
def paddle_imperative_ParameterList(self, num_stacked_param):
return paddle.imperative.ParameterList(
return paddle.nn.ParameterList(
[fluid.layers.create_parameter(
shape=[2, 2], dtype='float32')] * num_stacked_param)
......
......@@ -67,7 +67,7 @@ class TestImperativeNamedParameters(unittest.TestCase):
fc1 = fluid.Linear(10, 3)
fc2 = fluid.Linear(3, 10, bias_attr=False)
custom = MyLayer(3, 10)
model = paddle.imperative.Sequential(fc1, fc2, custom)
model = paddle.nn.Sequential(fc1, fc2, custom)
named_parameters = list(model.named_parameters())
expected_named_parameters = list()
......
......@@ -13,31 +13,24 @@
# limitations under the License.
# TODO: import framework api under this directory
# __all__ = ['append_backward',
# 'gradients',
# 'Executor',
# 'global_scope',
# 'scope_guard',
# 'BuildStrategy',
# 'CompiledProgram',
# 'default_main_program',
# 'default_startup_program',
# 'create_global_var',
# 'create_parameter',
# 'create_py_reader_by_data',
# 'Print',
# 'py_func',
# 'ExecutionStrategy',
# 'in_dygraph_mode',
# 'name_scope',
# 'ParallelExecutor',
# 'ParamAttr',
# 'Program',
# 'program_guard',
# 'Variable',
# 'WeightNormParamAttr',
# 'Model',
# 'Sequential']
__all__ = [
'append_backward', 'gradients', 'Executor', 'global_scope', 'scope_guard',
'BuildStrategy', 'CompiledProgram', 'default_main_program',
'default_startup_program', 'create_global_var', 'create_parameter', 'Print',
'py_func', 'ExecutionStrategy', 'name_scope', 'ParallelExecutor',
'ParamAttr', 'Program', 'program_guard', 'Variable', 'WeightNormParamAttr',
'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace'
]
from . import random
from .random import manual_seed
from ..fluid.executor import Executor, global_scope, scope_guard
from ..fluid.backward import append_backward, gradients
from ..fluid.compiler import BuildStrategy, CompiledProgram, ExecutionStrategy
from ..fluid.framework import default_main_program, default_startup_program, name_scope, Program, program_guard, Variable
from ..fluid.layers.control_flow import Print
from ..fluid.layers.nn import py_func
from ..fluid.parallel_executor import ParallelExecutor
from ..fluid.param_attr import ParamAttr, WeightNormParamAttr
from ..fluid.layers.tensor import create_global_var, create_parameter
from ..fluid.core import CPUPlace, CUDAPlace, CUDAPinnedPlace
......@@ -15,16 +15,24 @@
# define api used to run in imperative mode
__all__ = [
'BackwardStrategy', 'enabled', 'grad', 'guard', 'LayerList', 'load', 'save',
'prepare_context', 'to_variable', 'TracedLayer', 'no_grad', 'ParameterList',
'Sequential'
'prepare_context', 'to_variable', 'TracedLayer', 'no_grad', 'ParallelEnv',
'ProgramTranslator', 'declarative', 'DataParallel'
]
__all__ += [
'NoamDecay', 'PiecewiseDecay', 'NaturalExpDecay', 'ExponentialDecay',
'InverseTimeDecay', 'PolynomialDecay', 'CosineDecay'
]
from paddle.fluid import core
from ..fluid.dygraph.base import enabled, guard, no_grad, to_variable, grad
from ..fluid.dygraph.container import LayerList, ParameterList, Sequential
from ..fluid.dygraph.checkpoint import load_dygraph as load
from ..fluid.dygraph.checkpoint import save_dygraph as save
from ..fluid.dygraph.parallel import prepare_context
from ..fluid.dygraph.jit import TracedLayer
from ..fluid.dygraph.parallel import prepare_context, ParallelEnv, DataParallel
from ..fluid.dygraph.jit import TracedLayer, declarative
from ..fluid.dygraph import ProgramTranslator
from ..fluid.dygraph.learning_rate_scheduler import NoamDecay, PiecewiseDecay, NaturalExpDecay, ExponentialDecay, \
InverseTimeDecay, PolynomialDecay, CosineDecay
BackwardStrategy = core.BackwardStrategy
......@@ -16,3 +16,6 @@ from . import hapi
__all__ = []
__all__ += hapi.__all__
__all__ += ["reader"]
from ..fluid.contrib import reader
......@@ -18,22 +18,25 @@ __all__ = [
'BatchSampler',
# 'Transform',
'DataLoader',
# 'load',
# 'save',
# 'load_program_state',
# 'set_program_state',
# 'load_inference_model',
# 'save_inference_model',
# 'batch',
# 'shuffle',
# 'buffered',
# 'cache',
# 'chain',
# 'firstn',
# 'compose',
# 'map_readers',
# 'xmap_readers'
'load',
'save',
'load_program_state',
'set_program_state',
'load_inference_model',
'save_inference_model',
'batch',
'shuffle',
'buffered',
'cache',
'chain',
'firstn',
'compose',
'map_readers',
'xmap_readers'
]
from ..fluid.io import DataLoader
from ..fluid.dataloader import Dataset, BatchSampler
from ..fluid.io import load, save, load_program_state, set_program_state, \
load_inference_model, save_inference_model, batch
from ..reader import shuffle, buffered, cache, chain, firstn, compose, map_readers, xmap_readers
......@@ -13,16 +13,16 @@
# limitations under the License.
# TODO: define the functions to calculate metric in this directory
# __all__ = ['Accuracy',
# 'Auc',
# 'ChunkEvaluator',
# 'CompositeMetric',
# 'DetectionMAP',
# 'EditDistance',
# 'Precesion',
# 'Recall',
# 'accuracy',
# 'auc',
# 'chunk_eval',
# 'cos_sim',
# 'mean_iou']
__all__ = [
'Accuracy', 'Auc', 'ChunkEvaluator', 'CompositeMetric', 'DetectionMAP',
'EditDistance', 'Precision', 'Recall', 'accuracy', 'auc', 'chunk_eval',
'cos_sim', 'mean_iou'
]
from ..fluid.metrics import Accuracy, Auc, ChunkEvaluator, CompositeMetric, DetectionMAP, EditDistance, \
Precision, Recall
from ..fluid.layers.metric_op import accuracy, auc
from ..fluid.layers.nn import chunk_eval, cos_sim, mean_iou
......@@ -19,6 +19,8 @@ from .layer import norm
from .functional import extension
from .layer import common
from . import initializer
__all__ = []
__all__ += norm.__all__
__all__ += extension.__all__
......@@ -47,13 +49,6 @@ from .decode import beam_search_decode #DEFINE_ALIAS
# from .decode import ctc_greedy_decoder #DEFINE_ALIAS
# from .decode import dynamic_decode #DEFINE_ALIAS
# from .decode import gather_tree #DEFINE_ALIAS
# from .initalizer import Bilinear #DEFINE_ALIAS
# from .initalizer import Constant #DEFINE_ALIAS
# from .initalizer import MSRA #DEFINE_ALIAS
# from .initalizer import Normal #DEFINE_ALIAS
# from .initalizer import TruncatedNormal #DEFINE_ALIAS
# from .initalizer import Uniform #DEFINE_ALIAS
# from .initalizer import Xavier #DEFINE_ALIAS
from .input import data #DEFINE_ALIAS
# from .input import Input #DEFINE_ALIAS
# from .layer.activation import PReLU #DEFINE_ALIAS
......@@ -98,3 +93,4 @@ from .layer.norm import InstanceNorm #DEFINE_ALIAS
from .layer import loss #DEFINE_ALIAS
from .layer import conv #DEFINE_ALIAS
from ..fluid.dygraph.layers import Layer #DEFINE_ALIAS
from ..fluid.dygraph.container import LayerList, ParameterList, Sequential #DEFINE_ALIAS
......@@ -14,12 +14,20 @@
# TODO: define the initializers to create a Parameter in neural network
from ...fluid.initializer import Bilinear #DEFINE_ALIAS
from ...fluid.initializer import Constant #DEFINE_ALIAS
from ...fluid.initializer import MSRA #DEFINE_ALIAS
from ...fluid.initializer import Normal #DEFINE_ALIAS
from ...fluid.initializer import TruncatedNormal #DEFINE_ALIAS
from ...fluid.initializer import Uniform #DEFINE_ALIAS
from ...fluid.initializer import Xavier #DEFINE_ALIAS
__all__ = [
# 'Bilinear',
# 'Constant',
# 'MSRA',
# 'Normal',
# 'TruncatedNormal',
# 'Uniform',
# 'Xavier'
'Bilinear',
'Constant',
'MSRA',
'Normal',
'TruncatedNormal',
'Uniform',
'Xavier',
]
......@@ -12,32 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define all optimizers in this directory,
# __all__ = ['Adadelta',
# 'AdadeltaOptimizer',
# 'Adagrad',
# 'AdagradOptimizer',
# 'Adam',
# 'Adamax',
# 'AdamaxOptimizer',
# 'AdamOptimizer',
# 'DecayedAdagrad',
# 'DecayedAdagradOptimizer',
# 'DGCMomentumOptimizer',
# 'Dpsgd',
# 'DpsgdOptimizer',
# 'ExponentialMovingAverage',
# 'Ftrl',
# 'FtrlOptimizer',
# 'LambOptimizer',
# 'LarsMomentum',
# 'LarsMomentumOptimizer',
# 'LookaheadOptimizer',
# 'ModelAverage',
# 'Momentum',
# 'MomentumOptimizer',
# 'PipelineOptimizer',
# 'RecomputeOptimizer',
# 'RMSPropOptimizer',
# 'SGD',
# 'SGDOptimizer']
__all__ = [
'Adadelta', 'AdadeltaOptimizer', 'Adagrad', 'AdagradOptimizer', 'Adam',
'Adamax', 'AdamaxOptimizer', 'AdamOptimizer', 'DecayedAdagrad',
'DecayedAdagradOptimizer', 'DGCMomentumOptimizer', 'Dpsgd',
'DpsgdOptimizer', 'ExponentialMovingAverage', 'Ftrl', 'FtrlOptimizer',
'LambOptimizer', 'LarsMomentum', 'LarsMomentumOptimizer',
'LookaheadOptimizer', 'ModelAverage', 'Momentum', 'MomentumOptimizer',
'PipelineOptimizer', 'RecomputeOptimizer', 'RMSPropOptimizer', 'SGD',
'SGDOptimizer'
]
from ..fluid.optimizer import SGD, Momentum, Adagrad, Adam, Adamax, Dpsgd, DecayedAdagrad, \
Ftrl, SGDOptimizer, MomentumOptimizer, AdagradOptimizer, \
AdamOptimizer, AdamaxOptimizer, DpsgdOptimizer, \
DecayedAdagradOptimizer, RMSPropOptimizer, FtrlOptimizer, Adadelta, \
AdadeltaOptimizer, ModelAverage, LarsMomentum, \
LarsMomentumOptimizer, DGCMomentumOptimizer, LambOptimizer, \
ExponentialMovingAverage, PipelineOptimizer, LookaheadOptimizer, \
RecomputeOptimizer
......@@ -184,10 +184,14 @@ packages=['paddle',
'paddle.incubate.hapi.vision.models',
'paddle.incubate.hapi.vision.transforms',
'paddle.incubate.hapi.text',
'paddle.incubate',
'paddle.io',
'paddle.optimizer',
'paddle.nn',
'paddle.nn.functional',
'paddle.nn.layer',
'paddle.nn.initializer',
'paddle.metric',
'paddle.imperative',
'paddle.tensor',
]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册