未验证 提交 022198c5 编写于 作者: Z zhiboniu 提交者: GitHub

update 2.0 public api in all left files (#33313)

* update 2.0 public api in all left files

* reverse device.py all list;
fix some flake8 errors
上级 2de737eb
...@@ -21,8 +21,7 @@ except ImportError: ...@@ -21,8 +21,7 @@ except ImportError:
import paddle from the source directory; please install paddlepaddle*.whl firstly.''' import paddle from the source directory; please install paddlepaddle*.whl firstly.'''
) )
import paddle.batch from .batch import batch # noqa: F401
batch = batch.batch
from .fluid import monkey_patch_variable from .fluid import monkey_patch_variable
from .fluid.dygraph import monkey_patch_math_varbase from .fluid.dygraph import monkey_patch_math_varbase
monkey_patch_variable() monkey_patch_variable()
...@@ -136,7 +135,6 @@ from .tensor.manipulation import squeeze # noqa: F401 ...@@ -136,7 +135,6 @@ from .tensor.manipulation import squeeze # noqa: F401
from .tensor.manipulation import squeeze_ # noqa: F401 from .tensor.manipulation import squeeze_ # noqa: F401
from .tensor.manipulation import stack # noqa: F401 from .tensor.manipulation import stack # noqa: F401
from .tensor.manipulation import strided_slice # noqa: F401 from .tensor.manipulation import strided_slice # noqa: F401
from .tensor.manipulation import transpose # noqa: F401
from .tensor.manipulation import unique # noqa: F401 from .tensor.manipulation import unique # noqa: F401
from .tensor.manipulation import unsqueeze # noqa: F401 from .tensor.manipulation import unsqueeze # noqa: F401
from .tensor.manipulation import unsqueeze_ # noqa: F401 from .tensor.manipulation import unsqueeze_ # noqa: F401
...@@ -192,7 +190,6 @@ from .tensor.math import floor_mod # noqa: F401 ...@@ -192,7 +190,6 @@ from .tensor.math import floor_mod # noqa: F401
from .tensor.math import multiply # noqa: F401 from .tensor.math import multiply # noqa: F401
from .tensor.math import add # noqa: F401 from .tensor.math import add # noqa: F401
from .tensor.math import subtract # noqa: F401 from .tensor.math import subtract # noqa: F401
from .tensor.math import atan # noqa: F401
from .tensor.math import logsumexp # noqa: F401 from .tensor.math import logsumexp # noqa: F401
from .tensor.math import inverse # noqa: F401 from .tensor.math import inverse # noqa: F401
from .tensor.math import log1p # noqa: F401 from .tensor.math import log1p # noqa: F401
...@@ -247,9 +244,8 @@ from .framework import save # noqa: F401 ...@@ -247,9 +244,8 @@ from .framework import save # noqa: F401
from .framework import load # noqa: F401 from .framework import load # noqa: F401
from .framework import DataParallel # noqa: F401 from .framework import DataParallel # noqa: F401
from .framework import set_default_dtype #DEFINE_ALIAS from .framework import set_default_dtype # noqa: F401
from .framework import get_default_dtype #DEFINE_ALIAS from .framework import get_default_dtype # noqa: F401
from .framework import set_grad_enabled #DEFINE_ALIAS
from .tensor.search import index_sample # noqa: F401 from .tensor.search import index_sample # noqa: F401
from .tensor.stat import mean # noqa: F401 from .tensor.stat import mean # noqa: F401
...@@ -284,7 +280,7 @@ import paddle.vision # noqa: F401 ...@@ -284,7 +280,7 @@ import paddle.vision # noqa: F401
from .tensor.random import check_shape # noqa: F401 from .tensor.random import check_shape # noqa: F401
disable_static() disable_static()
__all__ = [ #noqa __all__ = [ # noqa
'dtype', 'dtype',
'uint8', 'uint8',
'int8', 'int8',
...@@ -327,7 +323,6 @@ __all__ = [ #noqa ...@@ -327,7 +323,6 @@ __all__ = [ #noqa
'cos', 'cos',
'tan', 'tan',
'mean', 'mean',
'XPUPlace',
'mv', 'mv',
'in_dynamic_mode', 'in_dynamic_mode',
'min', 'min',
...@@ -364,7 +359,6 @@ __all__ = [ #noqa ...@@ -364,7 +359,6 @@ __all__ = [ #noqa
'to_tensor', 'to_tensor',
'gather_nd', 'gather_nd',
'isinf', 'isinf',
'set_device',
'uniform', 'uniform',
'floor_divide', 'floor_divide',
'remainder', 'remainder',
...@@ -388,8 +382,6 @@ __all__ = [ #noqa ...@@ -388,8 +382,6 @@ __all__ = [ #noqa
'rand', 'rand',
'less_equal', 'less_equal',
'triu', 'triu',
'is_compiled_with_cuda',
'is_compiled_with_rocm',
'sin', 'sin',
'dist', 'dist',
'unbind', 'unbind',
...@@ -418,8 +410,6 @@ __all__ = [ #noqa ...@@ -418,8 +410,6 @@ __all__ = [ #noqa
'bernoulli', 'bernoulli',
'summary', 'summary',
'sinh', 'sinh',
'is_compiled_with_xpu',
'is_compiled_with_npu',
'round', 'round',
'DataParallel', 'DataParallel',
'argmin', 'argmin',
...@@ -443,7 +433,6 @@ __all__ = [ #noqa ...@@ -443,7 +433,6 @@ __all__ = [ #noqa
'not_equal', 'not_equal',
'sum', 'sum',
'tile', 'tile',
'get_device',
'greater_equal', 'greater_equal',
'isfinite', 'isfinite',
'create_parameter', 'create_parameter',
...@@ -476,7 +465,6 @@ __all__ = [ #noqa ...@@ -476,7 +465,6 @@ __all__ = [ #noqa
'scatter_nd', 'scatter_nd',
'set_default_dtype', 'set_default_dtype',
'expand_as', 'expand_as',
'get_cudnn_version',
'stack', 'stack',
'sqrt', 'sqrt',
'cholesky', 'cholesky',
...@@ -490,7 +478,6 @@ __all__ = [ #noqa ...@@ -490,7 +478,6 @@ __all__ = [ #noqa
'logical_not', 'logical_not',
'add_n', 'add_n',
'minimum', 'minimum',
'ComplexTensor',
'scatter', 'scatter',
'scatter_', 'scatter_',
'floor', 'floor',
...@@ -499,5 +486,6 @@ __all__ = [ #noqa ...@@ -499,5 +486,6 @@ __all__ = [ #noqa
'log2', 'log2',
'log10', 'log10',
'concat', 'concat',
'check_shape' 'check_shape',
'standard_normal'
] ]
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from .auto_cast import auto_cast from .auto_cast import auto_cast # noqa: F401
from .grad_scaler import GradScaler from .grad_scaler import GradScaler # noqa: F401
__all__ = ['auto_cast', 'GradScaler'] __all__ = ['auto_cast', 'GradScaler']
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
from paddle.fluid.dygraph.amp import amp_guard from paddle.fluid.dygraph.amp import amp_guard
__all__ = ['auto_cast'] __all__ = []
def auto_cast(enable=True, custom_white_list=None, custom_black_list=None): def auto_cast(enable=True, custom_white_list=None, custom_black_list=None):
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
from paddle.fluid.dygraph.amp import AmpScaler from paddle.fluid.dygraph.amp import AmpScaler
__all__ = ['GradScaler'] __all__ = []
class GradScaler(AmpScaler): class GradScaler(AmpScaler):
......
...@@ -12,10 +12,9 @@ ...@@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from ..fluid.dygraph.base import grad #DEFINE_ALIAS from ..fluid.dygraph.base import grad # noqa: F401
from . import backward_mode # noqa: F401
from . import backward_mode from .backward_mode import backward # noqa: F401
from .backward_mode import backward from .py_layer import PyLayer, PyLayerContext # noqa: F401
from .py_layer import PyLayer, PyLayerContext
__all__ = ['grad', 'backward', 'PyLayer', 'PyLayerContext'] __all__ = ['grad', 'backward', 'PyLayer', 'PyLayerContext']
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid import framework from paddle.fluid import framework
import paddle import paddle
__all__ = ['backward'] __all__ = []
@framework.dygraph_only @framework.dygraph_only
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import paddle import paddle
from paddle.fluid.framework import dygraph_only from paddle.fluid.framework import dygraph_only
from paddle.fluid import core from paddle.fluid import core
__all__ = ['PyLayer', 'PyLayerContext'] __all__ = []
class PyLayerContext(object): class PyLayerContext(object):
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
__all__ = ['batch'] __all__ = []
def batch(reader, batch_size, drop_last=False): def batch(reader, batch_size, drop_last=False):
...@@ -35,11 +35,11 @@ def batch(reader, batch_size, drop_last=False): ...@@ -35,11 +35,11 @@ def batch(reader, batch_size, drop_last=False):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
def reader(): def reader():
for i in range(10): for i in range(10):
yield i yield i
batch_reader = fluid.io.batch(reader, batch_size=2) batch_reader = paddle.batch(reader, batch_size=2)
for data in batch_reader(): for data in batch_reader():
print(data) print(data)
...@@ -60,7 +60,7 @@ def batch(reader, batch_size, drop_last=False): ...@@ -60,7 +60,7 @@ def batch(reader, batch_size, drop_last=False):
if len(b) == batch_size: if len(b) == batch_size:
yield b yield b
b = [] b = []
if drop_last == False and len(b) != 0: if drop_last is False and len(b) != 0:
yield b yield b
# Batch size check # Batch size check
......
...@@ -15,18 +15,11 @@ ...@@ -15,18 +15,11 @@
import six import six
import math import math
__all__ = [ __all__ = []
'long_type',
'to_text',
'to_bytes',
'round',
'floor_division',
'get_exception_message',
]
if six.PY2: if six.PY2:
int_type = int int_type = int
long_type = long long_type = long # noqa: F821
else: else:
int_type = int int_type = int
long_type = int long_type = int
......
...@@ -18,21 +18,16 @@ import os ...@@ -18,21 +18,16 @@ import os
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid import framework from paddle.fluid import framework
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.framework import is_compiled_with_cuda #DEFINE_ALIAS from paddle.fluid.framework import is_compiled_with_cuda # noqa: F401
from paddle.fluid.framework import is_compiled_with_rocm #DEFINE_ALIAS from paddle.fluid.framework import is_compiled_with_rocm # noqa: F401
__all__ = [
__all__ = [ # npqa
'get_cudnn_version', 'get_cudnn_version',
'set_device', 'set_device',
'get_device', 'get_device',
'XPUPlace', 'XPUPlace',
'is_compiled_with_xpu', 'is_compiled_with_xpu',
# 'cpu_places',
# 'CPUPlace',
# 'cuda_pinned_places',
# 'cuda_places',
# 'CUDAPinnedPlace',
# 'CUDAPlace',
'is_compiled_with_cuda', 'is_compiled_with_cuda',
'is_compiled_with_rocm', 'is_compiled_with_rocm',
'is_compiled_with_npu' 'is_compiled_with_npu'
...@@ -68,7 +63,7 @@ def is_compiled_with_xpu(): ...@@ -68,7 +63,7 @@ def is_compiled_with_xpu():
.. code-block:: python .. code-block:: python
import paddle import paddle
support_xpu = paddle.device.is_compiled_with_xpu() support_xpu = paddle.is_compiled_with_xpu()
""" """
return core.is_compiled_with_xpu() return core.is_compiled_with_xpu()
...@@ -82,9 +77,10 @@ def XPUPlace(dev_id): ...@@ -82,9 +77,10 @@ def XPUPlace(dev_id):
Examples: Examples:
.. code-block:: python .. code-block:: python
# required: xpu
import paddle import paddle
place = paddle.device.XPUPlace(0) place = paddle.XPUPlace(0)
""" """
return core.XPUPlace(dev_id) return core.XPUPlace(dev_id)
...@@ -127,15 +123,13 @@ def _convert_to_place(device): ...@@ -127,15 +123,13 @@ def _convert_to_place(device):
place = core.CPUPlace() place = core.CPUPlace()
elif lower_device == 'gpu': elif lower_device == 'gpu':
if not core.is_compiled_with_cuda(): if not core.is_compiled_with_cuda():
raise ValueError( raise ValueError("The device should not be 'gpu', "
"The device should not be 'gpu', " \ "since PaddlePaddle is not compiled with CUDA")
"since PaddlePaddle is not compiled with CUDA")
place = core.CUDAPlace(ParallelEnv().dev_id) place = core.CUDAPlace(ParallelEnv().dev_id)
elif lower_device == 'xpu': elif lower_device == 'xpu':
if not core.is_compiled_with_xpu(): if not core.is_compiled_with_xpu():
raise ValueError( raise ValueError("The device should not be 'xpu', "
"The device should not be 'xpu', " \ "since PaddlePaddle is not compiled with XPU")
"since PaddlePaddle is not compiled with XPU")
selected_xpus = os.getenv("FLAGS_selected_xpus", "0").split(",") selected_xpus = os.getenv("FLAGS_selected_xpus", "0").split(",")
device_id = int(selected_xpus[0]) device_id = int(selected_xpus[0])
place = core.XPUPlace(device_id) place = core.XPUPlace(device_id)
...@@ -149,7 +143,7 @@ def _convert_to_place(device): ...@@ -149,7 +143,7 @@ def _convert_to_place(device):
if avaliable_gpu_device: if avaliable_gpu_device:
if not core.is_compiled_with_cuda(): if not core.is_compiled_with_cuda():
raise ValueError( raise ValueError(
"The device should not be {}, since PaddlePaddle is " \ "The device should not be {}, since PaddlePaddle is "
"not compiled with CUDA".format(avaliable_gpu_device)) "not compiled with CUDA".format(avaliable_gpu_device))
device_info_list = device.split(':', 1) device_info_list = device.split(':', 1)
device_id = device_info_list[1] device_id = device_info_list[1]
...@@ -158,7 +152,7 @@ def _convert_to_place(device): ...@@ -158,7 +152,7 @@ def _convert_to_place(device):
if avaliable_xpu_device: if avaliable_xpu_device:
if not core.is_compiled_with_xpu(): if not core.is_compiled_with_xpu():
raise ValueError( raise ValueError(
"The device should not be {}, since PaddlePaddle is " \ "The device should not be {}, since PaddlePaddle is "
"not compiled with XPU".format(avaliable_xpu_device)) "not compiled with XPU".format(avaliable_xpu_device))
device_info_list = device.split(':', 1) device_info_list = device.split(':', 1)
device_id = device_info_list[1] device_id = device_info_list[1]
......
...@@ -29,9 +29,7 @@ from paddle.fluid.dygraph import parallel_helper ...@@ -29,9 +29,7 @@ from paddle.fluid.dygraph import parallel_helper
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready # noqa: F401 from paddle.distributed.fleet.base.private_helper_function import wait_server_ready # noqa: F401
__all__ = [ #noqa __all__ = []
"init_parallel_env"
]
ParallelStrategy = core.ParallelStrategy ParallelStrategy = core.ParallelStrategy
...@@ -152,7 +150,6 @@ def init_parallel_env(): ...@@ -152,7 +150,6 @@ def init_parallel_env():
init_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0")) init_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
if init_gloo: if init_gloo:
ep_rank_0 = parallel_env.trainer_endpoints[0].split(":") ep_rank_0 = parallel_env.trainer_endpoints[0].split(":")
ep_rank = parallel_env.trainer_endpoints[parallel_env.rank].split(":")
manager = Manager() manager = Manager()
# glboal dict to store status # glboal dict to store status
http_server_d = manager.dict() http_server_d = manager.dict()
......
...@@ -12,10 +12,11 @@ ...@@ -12,10 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from . import optimizer from .optimizer import LookAhead # noqa: F401
from . import checkpoint from .optimizer import ModelAverage # noqa: F401
from ..fluid.layer_helper import LayerHelper from .checkpoint import auto_checkpoint # noqa: F401
from ..fluid.layer_helper import LayerHelper # noqa: F401
__all__ = [] __all__ = [ # noqa
__all__ += optimizer.__all__ 'LookAhead', 'ModelAverage'
__all__ += checkpoint.__all__ ]
...@@ -12,6 +12,6 @@ ...@@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from ...fluid.incubate.checkpoint import auto_checkpoint from ...fluid.incubate.checkpoint import auto_checkpoint # noqa: F401
__all__ = ["auto_checkpoint"] __all__ = []
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from .lookahead import LookAhead from .lookahead import LookAhead # noqa: F401
from .modelaverage import ModelAverage from .modelaverage import ModelAverage # noqa: F401
__all__ = ['LookAhead', 'ModelAverage'] __all__ = []
...@@ -20,7 +20,7 @@ import paddle ...@@ -20,7 +20,7 @@ import paddle
import numpy as np import numpy as np
from paddle.fluid.dygraph import base as imperative_base from paddle.fluid.dygraph import base as imperative_base
__all__ = ["LookAhead"] __all__ = []
class LookAhead(Optimizer): class LookAhead(Optimizer):
...@@ -99,7 +99,7 @@ class LookAhead(Optimizer): ...@@ -99,7 +99,7 @@ class LookAhead(Optimizer):
layer = LinearNet() layer = LinearNet()
loss_fn = nn.CrossEntropyLoss() loss_fn = nn.CrossEntropyLoss()
optimizer = paddle.optimizer.SGD(learning_rate=0.1, parameters=layer.parameters()) optimizer = paddle.optimizer.SGD(learning_rate=0.1, parameters=layer.parameters())
lookahead = paddle.incubate.optimizer.LookAhead(optimizer, alpha=0.2, k=5) lookahead = paddle.incubate.LookAhead(optimizer, alpha=0.2, k=5)
# create data loader # create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE) dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
...@@ -163,7 +163,7 @@ class LookAhead(Optimizer): ...@@ -163,7 +163,7 @@ class LookAhead(Optimizer):
out = linear(inp) out = linear(inp)
loss = paddle.mean(out) loss = paddle.mean(out)
sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters())
lookahead = paddle.incubate.optimizer.LookAhead(sgd, alpha=0.2, k=5) lookahead = paddle.incubate.LookAhead(sgd, alpha=0.2, k=5)
loss.backward() loss.backward()
lookahead.step() lookahead.step()
lookahead.clear_grad() lookahead.clear_grad()
...@@ -274,7 +274,7 @@ class LookAhead(Optimizer): ...@@ -274,7 +274,7 @@ class LookAhead(Optimizer):
out = linear(inp) out = linear(inp)
loss = paddle.mean(out) loss = paddle.mean(out)
sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters())
lookahead = paddle.incubate.optimizer.LookAhead(sgd, alpha=0.2, k=5) lookahead = paddle.incubate.LookAhead(sgd, alpha=0.2, k=5)
loss.backward() loss.backward()
lookahead.minimize(loss) lookahead.minimize(loss)
lookahead.clear_grad() lookahead.clear_grad()
...@@ -282,9 +282,6 @@ class LookAhead(Optimizer): ...@@ -282,9 +282,6 @@ class LookAhead(Optimizer):
""" """
assert isinstance(loss, Variable), "The loss should be an Tensor." assert isinstance(loss, Variable), "The loss should be an Tensor."
parameter_list = parameters if parameters \
else self._parameter_list
# Apply inner optimizer to the main_program # Apply inner optimizer to the main_program
optimize_ops, params_grads = self.inner_optimizer.minimize( optimize_ops, params_grads = self.inner_optimizer.minimize(
loss, loss,
......
...@@ -21,7 +21,7 @@ import numpy as np ...@@ -21,7 +21,7 @@ import numpy as np
from paddle.fluid.dygraph import base as imperative_base from paddle.fluid.dygraph import base as imperative_base
from paddle.fluid.wrapped_decorator import signature_safe_contextmanager from paddle.fluid.wrapped_decorator import signature_safe_contextmanager
__all__ = ["ModelAverage"] __all__ = []
class ModelAverage(Optimizer): class ModelAverage(Optimizer):
...@@ -129,7 +129,7 @@ class ModelAverage(Optimizer): ...@@ -129,7 +129,7 @@ class ModelAverage(Optimizer):
layer = LinearNet() layer = LinearNet()
loss_fn = nn.CrossEntropyLoss() loss_fn = nn.CrossEntropyLoss()
optimizer = opt.Momentum(learning_rate=0.2, momentum=0.1, parameters=layer.parameters()) optimizer = opt.Momentum(learning_rate=0.2, momentum=0.1, parameters=layer.parameters())
model_average = paddle.incubate.optimizer.ModelAverage(0.15, model_average = paddle.incubate.ModelAverage(0.15,
parameters=layer.parameters(), parameters=layer.parameters(),
min_average_window=2, min_average_window=2,
max_average_window=10) max_average_window=10)
...@@ -313,7 +313,7 @@ class ModelAverage(Optimizer): ...@@ -313,7 +313,7 @@ class ModelAverage(Optimizer):
sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters())
sgd.minimize(loss) sgd.minimize(loss)
modelaverage = paddle.incubate.optimizer.ModelAverage(0.15, modelaverage = paddle.incubate.ModelAverage(0.15,
parameters=linear.parameters(), parameters=linear.parameters(),
min_average_window=2, min_average_window=2,
max_average_window=4) max_average_window=4)
...@@ -345,7 +345,7 @@ class ModelAverage(Optimizer): ...@@ -345,7 +345,7 @@ class ModelAverage(Optimizer):
out = linear(inp) out = linear(inp)
loss = paddle.mean(out) loss = paddle.mean(out)
sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters())
modelaverage = paddle.incubate.optimizer.ModelAverage(0.15, modelaverage = paddle.incubate.ModelAverage(0.15,
parameters=linear.parameters(), parameters=linear.parameters(),
min_average_window=2, min_average_window=2,
max_average_window=4) max_average_window=4)
...@@ -395,7 +395,7 @@ class ModelAverage(Optimizer): ...@@ -395,7 +395,7 @@ class ModelAverage(Optimizer):
sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters())
modelaverage = paddle.incubate.optimizer.ModelAverage(0.15, modelaverage = paddle.incubate.ModelAverage(0.15,
parameters=linear.parameters(), parameters=linear.parameters(),
min_average_window=2, min_average_window=2,
max_average_window=4) max_average_window=4)
...@@ -415,7 +415,6 @@ class ModelAverage(Optimizer): ...@@ -415,7 +415,6 @@ class ModelAverage(Optimizer):
param) param)
old_num_accumulates = self._get_accumulator( old_num_accumulates = self._get_accumulator(
'old_num_accumulates', param) 'old_num_accumulates', param)
num_updates = self._get_accumulator('num_updates', param)
sum_1 = self._get_accumulator('sum_1', param) sum_1 = self._get_accumulator('sum_1', param)
sum_2 = self._get_accumulator('sum_2', param) sum_2 = self._get_accumulator('sum_2', param)
sum_3 = self._get_accumulator('sum_3', param) sum_3 = self._get_accumulator('sum_3', param)
...@@ -467,7 +466,7 @@ class ModelAverage(Optimizer): ...@@ -467,7 +466,7 @@ class ModelAverage(Optimizer):
sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters()) sgd = paddle.optimizer.SGD(learning_rate=0.1,parameters=linear.parameters())
modelaverage = paddle.incubate.optimizer.ModelAverage(0.15, modelaverage = paddle.incubate.ModelAverage(0.15,
parameters=linear.parameters(), parameters=linear.parameters(),
min_average_window=2, min_average_window=2,
max_average_window=4) max_average_window=4)
...@@ -506,17 +505,15 @@ class ModelAverage(Optimizer): ...@@ -506,17 +505,15 @@ class ModelAverage(Optimizer):
self._get_accumulator('num_accumulates', param)) self._get_accumulator('num_accumulates', param))
old_num_accumulates = block._clone_variable( old_num_accumulates = block._clone_variable(
self._get_accumulator('old_num_accumulates', param)) self._get_accumulator('old_num_accumulates', param))
num_updates = block._clone_variable(
self._get_accumulator('num_updates', param))
# backup param value to grad # backup param value to grad
layers.assign(input=param, output=grad) layers.assign(input=param, output=grad)
# param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates) # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)
tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) tmp = layers.sum(x=[num_accumulates, old_num_accumulates])
sum = layers.sum(x=[sum_1, sum_2, sum_3]) sum = layers.sum(x=[sum_1, sum_2, sum_3])
tmp = layers.cast( tmp = layers.cast(
x=tmp, dtype='float32' if self._dtype == None else self._dtype) x=tmp, dtype='float32' if self._dtype is None else self._dtype)
sum = layers.cast( sum = layers.cast(
x=sum, dtype='float32' if self._dtype == None else self._dtype) x=sum, dtype='float32' if self._dtype is None else self._dtype)
layers.ops._elementwise_div(x=sum, y=tmp, out=param) layers.ops._elementwise_div(x=sum, y=tmp, out=param)
def _add_average_restore_op(self, block, param): def _add_average_restore_op(self, block, param):
......
...@@ -12,5 +12,26 @@ ...@@ -12,5 +12,26 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from ..fluid.inference import Config, DataType, PlaceType, PrecisionType, Tensor, \ from ..fluid.inference import Config # noqa: F401
Predictor, create_predictor, get_version, get_num_bytes_of_data_type, PredictorPool from ..fluid.inference import DataType # noqa: F401
from ..fluid.inference import PlaceType # noqa: F401
from ..fluid.inference import PrecisionType # noqa: F401
from ..fluid.inference import Tensor # noqa: F401
from ..fluid.inference import Predictor # noqa: F401
from ..fluid.inference import create_predictor # noqa: F401
from ..fluid.inference import get_version # noqa: F401
from ..fluid.inference import get_num_bytes_of_data_type # noqa: F401
from ..fluid.inference import PredictorPool # noqa: F401
__all__ = [ # noqa
'Config',
'DataType',
'PlaceType',
'PrecisionType',
'Tensor',
'Predictor',
'create_predictor',
'get_version',
'get_num_bytes_of_data_type',
'PredictorPool'
]
...@@ -14,19 +14,26 @@ ...@@ -14,19 +14,26 @@
from __future__ import print_function from __future__ import print_function
from ..fluid.dygraph.jit import save #DEFINE_ALIAS from ..fluid.dygraph.jit import save # noqa: F401
from ..fluid.dygraph.jit import load #DEFINE_ALIAS from ..fluid.dygraph.jit import load # noqa: F401
from ..fluid.dygraph.jit import TracedLayer #DEFINE_ALIAS from ..fluid.dygraph.jit import TracedLayer # noqa: F401
from ..fluid.dygraph.jit import set_code_level #DEFINE_ALIAS from ..fluid.dygraph.jit import set_code_level # noqa: F401
from ..fluid.dygraph.jit import set_verbosity #DEFINE_ALIAS from ..fluid.dygraph.jit import set_verbosity # noqa: F401
from ..fluid.dygraph.jit import declarative as to_static #DEFINE_ALIAS from ..fluid.dygraph.jit import declarative as to_static # noqa: F401
from ..fluid.dygraph.jit import not_to_static #DEFINE_ALIAS from ..fluid.dygraph.jit import not_to_static # noqa: F401
from ..fluid.dygraph import ProgramTranslator #DEFINE_ALIAS from ..fluid.dygraph import ProgramTranslator # noqa: F401
from ..fluid.dygraph.io import TranslatedLayer #DEFINE_ALIAS from ..fluid.dygraph.io import TranslatedLayer # noqa: F401
from . import dy2static from . import dy2static # noqa: F401
__all__ = [ __all__ = [ # noqa
'save', 'load', 'TracedLayer', 'to_static', 'ProgramTranslator', 'save',
'TranslatedLayer', 'set_code_level', 'set_verbosity', 'not_to_static' 'load',
'TracedLayer',
'to_static',
'ProgramTranslator',
'TranslatedLayer',
'set_code_level',
'set_verbosity',
'not_to_static'
] ]
...@@ -12,18 +12,28 @@ ...@@ -12,18 +12,28 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function from .convert_call_func import convert_call # noqa: F401
from .convert_operators import cast_bool_if_necessary # noqa: F401
from . import convert_operators from .convert_operators import convert_assert # noqa: F401
from .convert_operators import * from .convert_operators import convert_ifelse # noqa: F401
from .convert_operators import convert_len # noqa: F401
from . import convert_call_func from .convert_operators import convert_logical_and # noqa: F401
from .convert_call_func import * from .convert_operators import convert_logical_not # noqa: F401
from .convert_operators import convert_logical_or # noqa: F401
from . import variable_trans_func from .convert_operators import convert_pop # noqa: F401
from .variable_trans_func import * from .convert_operators import convert_print # noqa: F401
from .convert_operators import convert_shape_compare # noqa: F401
from .convert_operators import convert_var_dtype # noqa: F401
from .convert_operators import convert_var_shape # noqa: F401
from .convert_operators import convert_var_shape_simple # noqa: F401
from .convert_operators import eval_if_exist_else_none # noqa: F401
from .convert_operators import choose_shape_attr_or_api # noqa: F401
from .convert_operators import convert_while_loop # noqa: F401
from .variable_trans_func import create_bool_as_type # noqa: F401
from .variable_trans_func import create_fill_constant_node # noqa: F401
from .variable_trans_func import create_static_variable_gast_node # noqa: F401
from .variable_trans_func import data_layer_not_check # noqa: F401
from .variable_trans_func import to_static_variable # noqa: F401
from .variable_trans_func import to_static_variable_gast_node # noqa: F401
__all__ = [] __all__ = []
__all__ += convert_operators.__all__
__all__ += convert_call_func.__all__
__all__ += variable_trans_func.__all__
...@@ -13,6 +13,6 @@ ...@@ -13,6 +13,6 @@
# limitations under the License. # limitations under the License.
from __future__ import print_function from __future__ import print_function
from ...fluid.dygraph.dygraph_to_static.convert_call_func import convert_call #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_call_func import convert_call # noqa: F401
__all__ = ['convert_call'] __all__ = []
...@@ -13,27 +13,21 @@ ...@@ -13,27 +13,21 @@
# limitations under the License. # limitations under the License.
from __future__ import print_function from __future__ import print_function
from ...fluid.dygraph.dygraph_to_static.convert_operators import cast_bool_if_necessary #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import cast_bool_if_necessary # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_assert #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_assert # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_ifelse #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_ifelse # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_len #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_len # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_and #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_and # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_not #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_not # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_or #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_logical_or # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_pop #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_pop # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_print #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_print # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_shape_compare #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_shape_compare # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_dtype #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_dtype # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_shape #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_shape # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_shape_simple #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_var_shape_simple # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import eval_if_exist_else_none #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import eval_if_exist_else_none # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import choose_shape_attr_or_api #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import choose_shape_attr_or_api # noqa: F401
from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_while_loop #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.convert_operators import convert_while_loop # noqa: F401
__all__ = [ __all__ = []
'cast_bool_if_necessary', 'convert_assert', 'convert_ifelse', 'convert_len',
'convert_logical_and', 'convert_logical_not', 'convert_logical_or',
'convert_pop', 'convert_print', 'convert_shape_compare',
'convert_var_dtype', 'convert_var_shape', 'convert_var_shape_simple',
'eval_if_exist_else_none', 'choose_shape_attr_or_api', 'convert_while_loop'
]
...@@ -14,15 +14,11 @@ ...@@ -14,15 +14,11 @@
from __future__ import print_function from __future__ import print_function
from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_bool_as_type #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_bool_as_type # noqa: F401
from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_fill_constant_node #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_fill_constant_node # noqa: F401
from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_static_variable_gast_node #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.variable_trans_func import create_static_variable_gast_node # noqa: F401
from ...fluid.dygraph.dygraph_to_static.variable_trans_func import data_layer_not_check #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.variable_trans_func import data_layer_not_check # noqa: F401
from ...fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable # noqa: F401
from ...fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable_gast_node #DEFINE_ALIAS from ...fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable_gast_node # noqa: F401
__all__ = [ __all__ = []
'create_bool_as_type', 'create_fill_constant_node',
'create_static_variable_gast_node', 'data_layer_not_check',
'to_static_variable', 'to_static_variable_gast_node'
]
...@@ -12,7 +12,18 @@ ...@@ -12,7 +12,18 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from .metrics import * from .metrics import Metric # noqa: F401
from . import metrics from .metrics import Accuracy # noqa: F401
from .metrics import Precision # noqa: F401
from .metrics import Recall # noqa: F401
from .metrics import Auc # noqa: F401
from .metrics import accuracy # noqa: F401
__all__ = metrics.__all__ __all__ = [ #noqa
'Metric',
'Accuracy',
'Precision',
'Recall',
'Auc',
'accuracy'
]
...@@ -26,7 +26,7 @@ from ..fluid.layers.nn import topk ...@@ -26,7 +26,7 @@ from ..fluid.layers.nn import topk
from ..fluid.framework import core, _varbase_creator, in_dygraph_mode from ..fluid.framework import core, _varbase_creator, in_dygraph_mode
import paddle import paddle
__all__ = ['Metric', 'Accuracy', 'Precision', 'Recall', 'Auc', 'accuracy'] __all__ = []
def _is_numpy_(var): def _is_numpy_(var):
......
...@@ -287,5 +287,6 @@ __all__ = [ #noqa ...@@ -287,5 +287,6 @@ __all__ = [ #noqa
'Swish', 'Swish',
'PixelShuffle', 'PixelShuffle',
'ELU', 'ELU',
'ReLU6' 'ReLU6',
'LayerDict'
] ]
...@@ -194,5 +194,6 @@ __all__ = [ #noqa ...@@ -194,5 +194,6 @@ __all__ = [ #noqa
'embedding', 'embedding',
'gather_tree', 'gather_tree',
'one_hot', 'one_hot',
'normalize' 'normalize',
'temporal_shift'
] ]
...@@ -20,7 +20,7 @@ from ..layer.conv import Conv1DTranspose, Conv2DTranspose, Conv3DTranspose ...@@ -20,7 +20,7 @@ from ..layer.conv import Conv1DTranspose, Conv2DTranspose, Conv3DTranspose
from ..layer.common import Linear from ..layer.common import Linear
from .. import functional as F from .. import functional as F
__all__ = ['spectral_norm'] __all__ = []
def normal_(x, mean=0., std=1.): def normal_(x, mean=0., std=1.):
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function from .export import export # noqa: F401
from .export import export
__all__ = ['export'] __all__ = ['export']
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import os import os
from paddle.utils import try_import from paddle.utils import try_import
__all__ = ['export'] __all__ = []
def export(layer, path, input_spec=None, opset_version=9, **configs): def export(layer, path, input_spec=None, opset_version=9, **configs):
......
...@@ -85,11 +85,21 @@ __all__ = [ #noqa ...@@ -85,11 +85,21 @@ __all__ = [ #noqa
'load', 'load',
'save_inference_model', 'save_inference_model',
'load_inference_model', 'load_inference_model',
'serialize_program',
'serialize_persistables',
'save_to_file',
'deserialize_program',
'deserialize_persistables',
'load_from_file',
'normalize_program', 'normalize_program',
'load_program_state', 'load_program_state',
'set_program_state', 'set_program_state',
'cpu_places', 'cpu_places',
'cuda_places', 'cuda_places',
'Variable', 'Variable',
'create_global_var' 'create_global_var',
'accuracy',
'auc',
'device_guard',
'create_parameter'
] ]
...@@ -68,7 +68,6 @@ __all__ = [ #noqa ...@@ -68,7 +68,6 @@ __all__ = [ #noqa
'conv2d_transpose', 'conv2d_transpose',
'conv3d', 'conv3d',
'conv3d_transpose', 'conv3d_transpose',
'create_parameter',
'crf_decoding', 'crf_decoding',
'data_norm', 'data_norm',
'deform_conv2d', 'deform_conv2d',
......
...@@ -26,7 +26,6 @@ from .creation import ones_like # noqa: F401 ...@@ -26,7 +26,6 @@ from .creation import ones_like # noqa: F401
from .creation import zeros # noqa: F401 from .creation import zeros # noqa: F401
from .creation import zeros_like # noqa: F401 from .creation import zeros_like # noqa: F401
from .creation import arange # noqa: F401 from .creation import arange # noqa: F401
from .creation import eye # noqa: F401
from .creation import full # noqa: F401 from .creation import full # noqa: F401
from .creation import full_like # noqa: F401 from .creation import full_like # noqa: F401
from .creation import triu # noqa: F401 from .creation import triu # noqa: F401
...@@ -83,7 +82,6 @@ from .manipulation import squeeze # noqa: F401 ...@@ -83,7 +82,6 @@ from .manipulation import squeeze # noqa: F401
from .manipulation import squeeze_ # noqa: F401 from .manipulation import squeeze_ # noqa: F401
from .manipulation import stack # noqa: F401 from .manipulation import stack # noqa: F401
from .manipulation import strided_slice # noqa: F401 from .manipulation import strided_slice # noqa: F401
from .manipulation import transpose # noqa: F401
from .manipulation import unique # noqa: F401 from .manipulation import unique # noqa: F401
from .manipulation import unsqueeze # noqa: F401 from .manipulation import unsqueeze # noqa: F401
from .manipulation import unsqueeze_ # noqa: F401 from .manipulation import unsqueeze_ # noqa: F401
...@@ -144,7 +142,6 @@ from .math import add # noqa: F401 ...@@ -144,7 +142,6 @@ from .math import add # noqa: F401
from .math import add_ # noqa: F401 from .math import add_ # noqa: F401
from .math import subtract # noqa: F401 from .math import subtract # noqa: F401
from .math import subtract_ # noqa: F401 from .math import subtract_ # noqa: F401
from .math import atan # noqa: F401
from .math import logsumexp # noqa: F401 from .math import logsumexp # noqa: F401
from .math import inverse # noqa: F401 from .math import inverse # noqa: F401
from .math import log2 # noqa: F401 from .math import log2 # noqa: F401
...@@ -230,7 +227,6 @@ tensor_method_func = [ #noqa ...@@ -230,7 +227,6 @@ tensor_method_func = [ #noqa
'log2', 'log2',
'log10', 'log10',
'logsumexp', 'logsumexp',
'mul',
'multiplex', 'multiplex',
'pow', 'pow',
'prod', 'prod',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册