diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 6cc986c61e1db1990cde9598cccd5ee307b31df5..05e8c9c6727801984d39c8acb26157fcee9eec37 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -38,7 +38,6 @@ import paddle.tensor import paddle.nn import paddle.fleet import paddle.framework -import paddle.imperative import paddle.optimizer import paddle.metric import paddle.incubate.complex as complex @@ -69,8 +68,6 @@ from .tensor.creation import full_like #DEFINE_ALIAS from .tensor.creation import triu #DEFINE_ALIAS from .tensor.creation import tril #DEFINE_ALIAS from .tensor.creation import meshgrid #DEFINE_ALIAS -from .tensor.io import save #DEFINE_ALIAS -from .tensor.io import load #DEFINE_ALIAS from .tensor.linalg import matmul #DEFINE_ALIAS from .tensor.linalg import dot #DEFINE_ALIAS # from .tensor.linalg import einsum #DEFINE_ALIAS @@ -201,30 +198,34 @@ from .tensor.search import index_select #DEFINE_ALIAS from .tensor.search import nonzero #DEFINE_ALIAS from .tensor.search import sort #DEFINE_ALIAS from .framework.random import manual_seed #DEFINE_ALIAS -from .framework import append_backward #DEFINE_ALIAS -from .framework import gradients #DEFINE_ALIAS -from .framework import Executor #DEFINE_ALIAS -from .framework import global_scope #DEFINE_ALIAS -from .framework import scope_guard #DEFINE_ALIAS -from .framework import BuildStrategy #DEFINE_ALIAS -from .framework import CompiledProgram #DEFINE_ALIAS -from .framework import default_main_program #DEFINE_ALIAS -from .framework import default_startup_program #DEFINE_ALIAS +from .framework import Variable #DEFINE_ALIAS +from .framework import ParamAttr #DEFINE_ALIAS from .framework import create_global_var #DEFINE_ALIAS from .framework import create_parameter #DEFINE_ALIAS -from .framework import Print #DEFINE_ALIAS -from .framework import py_func #DEFINE_ALIAS -from .framework import ExecutionStrategy #DEFINE_ALIAS -from .framework import name_scope #DEFINE_ALIAS -from .framework import ParallelExecutor #DEFINE_ALIAS -from .framework import ParamAttr #DEFINE_ALIAS -from .framework import Program #DEFINE_ALIAS -from .framework import program_guard #DEFINE_ALIAS -from .framework import Variable #DEFINE_ALIAS -from .framework import WeightNormParamAttr #DEFINE_ALIAS from .framework import CPUPlace #DEFINE_ALIAS from .framework import CUDAPlace #DEFINE_ALIAS from .framework import CUDAPinnedPlace #DEFINE_ALIAS + +from .framework import BackwardStrategy #DEFINE_ALIAS +from .framework import to_variable #DEFINE_ALIAS +from .framework import grad #DEFINE_ALIAS +from .framework import no_grad #DEFINE_ALIAS +from .framework import save_dygraph #DEFINE_ALIAS +from .framework import load_dygraph #DEFINE_ALIAS +from .framework import save #DEFINE_ALIAS +from .framework import load #DEFINE_ALIAS +from .framework import prepare_context #DEFINE_ALIAS +from .framework import ParallelEnv #DEFINE_ALIAS +from .framework import DataParallel #DEFINE_ALIAS + +from .framework import NoamDecay #DEFINE_ALIAS +from .framework import PiecewiseDecay #DEFINE_ALIAS +from .framework import NaturalExpDecay #DEFINE_ALIAS +from .framework import ExponentialDecay #DEFINE_ALIAS +from .framework import InverseTimeDecay #DEFINE_ALIAS +from .framework import PolynomialDecay #DEFINE_ALIAS +from .framework import CosineDecay #DEFINE_ALIAS + from .tensor.search import index_sample #DEFINE_ALIAS from .tensor.stat import mean #DEFINE_ALIAS from .tensor.stat import reduce_mean #DEFINE_ALIAS @@ -237,6 +238,11 @@ from .fluid.data import data from . import incubate from .incubate import hapi -from .fluid.dygraph.base import enable_dygraph as enable_imperative #DEFINE_ALIAS -from .fluid.dygraph.base import disable_dygraph as disable_imperative #DEFINE_ALIAS -from .fluid.framework import in_dygraph_mode as in_imperative_mode #DEFINE_ALIAS +from .fluid.dygraph.base import enable_dygraph #DEFINE_ALIAS +from .fluid.dygraph.base import disable_dygraph #DEFINE_ALIAS +from .fluid.dygraph.base import enable_dygraph as disable_static #DEFINE_ALIAS +from .fluid.dygraph.base import disable_dygraph as enable_static #DEFINE_ALIAS +from .fluid.framework import in_dygraph_mode as in_dynamic_mode #DEFINE_ALIAS + +from . import jit +from . import static diff --git a/python/paddle/declarative/__init__.py b/python/paddle/declarative/__init__.py deleted file mode 100644 index 0f28cc7f424d5f77f9080dae89f1ec5fa6adb760..0000000000000000000000000000000000000000 --- a/python/paddle/declarative/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -__all__ = [ - 'fc', - 'batch_norm', - 'embedding', - 'bilinear_tensor_product' - 'conv2d' - 'conv2d_transpose' - 'conv3d' - 'conv3d_transpose' - 'create_parameter' - 'crf_decoding' - 'data_norm' - 'deformable_conv' - 'group_norm' - 'hsigmoid' - 'instance_norm' - 'layer_norm' - 'multi_box_head' - 'nce' - 'prelu' - 'row_conv' - 'spectral_norm', -] - -from ..fluid.layers import fc, batch_norm, bilinear_tensor_product, \ - conv2d, conv2d_transpose, conv3d, conv3d_transpose, create_parameter, \ - crf_decoding, data_norm, deformable_conv, group_norm, hsigmoid, instance_norm, \ - layer_norm, multi_box_head, nce, prelu, row_conv, spectral_norm - -from ..fluid.input import embedding diff --git a/python/paddle/fleet/base/fleet_base.py b/python/paddle/fleet/base/fleet_base.py index 459afedf3a4e6c9c941fe1884fbc7d7213102ee1..6f4bdc166d603b7f71c106630d662a41fa6ebaea 100644 --- a/python/paddle/fleet/base/fleet_base.py +++ b/python/paddle/fleet/base/fleet_base.py @@ -286,8 +286,8 @@ class Fleet(object): context["loss"] = loss if startup_program == None: self.origin_startup_program = \ - paddle.default_startup_program().clone(for_test=False) - startup_program = paddle.default_startup_program() + paddle.static.default_startup_program().clone(for_test=False) + startup_program = paddle.static.default_startup_program() else: self.origin_startup_program = \ startup_program.clone(for_test=False) @@ -338,7 +338,7 @@ class Fleet(object): parameter_list=parameter_list, no_grad_set=no_grad_set) - default_program = paddle.default_main_program() + default_program = paddle.static.default_main_program() if id(default_program) != id(loss.block.program): paddle.fluid.framework.switch_main_program(loss.block.program) diff --git a/python/paddle/fleet/meta_optimizers/graph_execution_optimizer.py b/python/paddle/fleet/meta_optimizers/graph_execution_optimizer.py index 0473f7c1d689fb9cc2fc856a41076d0ab68baf0d..78478b9691b2174612669a8dca3fc749f8d8a7b3 100644 --- a/python/paddle/fleet/meta_optimizers/graph_execution_optimizer.py +++ b/python/paddle/fleet/meta_optimizers/graph_execution_optimizer.py @@ -190,7 +190,7 @@ class GraphExecutionOptimizer(MetaOptimizerBase): parameter_list=None, no_grad_set=None): if startup_program == None: - startup_program = paddle.default_startup_program() + startup_program = paddle.static.default_startup_program() compiled_program = self._try_to_compile(startup_program, loss.block.program, loss) loss.block.program._graph = compiled_program diff --git a/python/paddle/fluid/dygraph/base.py b/python/paddle/fluid/dygraph/base.py index 7d972cbbd09b95e5d7476837cb3f3318526deed8..826de0588efe97de53dbe2c6530dc724a935dcf9 100644 --- a/python/paddle/fluid/dygraph/base.py +++ b/python/paddle/fluid/dygraph/base.py @@ -121,10 +121,6 @@ def enabled(): def enable_dygraph(place=None): """ - :alias_main: paddle.enable_dygraph - :alias: paddle.enable_dygraph,paddle.enable_imperative.enable_dygraph - :old_api: paddle.fluid.dygraph.base.enable_dygraph - This function enables dynamic graph mode. Parameters: @@ -155,10 +151,6 @@ def enable_dygraph(place=None): def disable_dygraph(): """ - :alias_main: paddle.disable_dygraph - :alias: paddle.disable_dygraph,paddle.disable_imperative.disable_dygraph - :old_api: paddle.fluid.dygraph.base.disable_dygraph - This function disables dynamic graph mode. return: diff --git a/python/paddle/fluid/dygraph/jit.py b/python/paddle/fluid/dygraph/jit.py index 8439b87dd9ced618ad4f0b2e6d9d321d5f8662be..337d2dfc008e8298925bb3febdc2007736e4a1b6 100644 --- a/python/paddle/fluid/dygraph/jit.py +++ b/python/paddle/fluid/dygraph/jit.py @@ -701,11 +701,11 @@ def save(layer, model_path, input_spec=None, configs=None): prog_translator = ProgramTranslator() if not prog_translator.enable: raise RuntimeError( - "The paddle.imperative.jit.save doesn't work when setting ProgramTranslator.enable=False." + "The paddle.jit.save doesn't work when setting ProgramTranslator.enable=False." ) if not isinstance(layer, Layer): raise TypeError( - "The input layer of paddle.imperative.jit.save should be 'Layer', but received layer type is %s." + "The input layer of paddle.jit.save should be 'Layer', but received layer type is %s." % type(layer)) if configs is None: diff --git a/python/paddle/fluid/dygraph/layers.py b/python/paddle/fluid/dygraph/layers.py index 72f105933dca919c8b3c2cbdf90318a5444d0866..250e2b3b3882ccccd3a6582eabd565ea9cba5cc8 100644 --- a/python/paddle/fluid/dygraph/layers.py +++ b/python/paddle/fluid/dygraph/layers.py @@ -146,7 +146,7 @@ class Layer(core.Layer): import paddle import paddle.nn as nn - paddle.enable_imperative() + paddle.disable_static() net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2)) diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 4afed8c174b06507f2eeb18d39579bc3df026a3e..e56f26f1b1b9493928b106bd9e5d16afad0d94ce 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -3207,12 +3207,12 @@ class Flatten(layers.Layer): .. code-block:: python import paddle - from paddle.imperative import to_variable + from paddle import to_variable import numpy as np inp_np = np.ones([5, 2, 3, 4]).astype('float32') - paddle.enable_imperative() + paddle.disable_static() inp_np = to_variable(inp_np) flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2) diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index d509fcc38e771bf5a5bacb63602966a871c7c885..7b4390c7a7b4e32fcb7937d47bedd875f1236006 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -226,7 +226,7 @@ def monkey_patch_varbase(): .. code-block:: python import paddle - paddle.enable_imperative() + paddle.disable_static() x = paddle.rand([1, 5]) print(x) # Variable: eager_tmp_0 @@ -235,7 +235,7 @@ def monkey_patch_varbase(): # - layout: NCHW # - dtype: float # - data: [0.645307 0.597973 0.732793 0.646921 0.540328] - paddle.disable_imperative() + paddle.enable_static() """ tensor = self.value().get_tensor() if tensor._is_initialized(): diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 85d4a73b35cf98083b5cafa67546eb974d1088a8..8fe22024e6f12238e1b5bdb5adab052aff811b04 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -5175,7 +5175,7 @@ class ParamBase(core.VarBase): .. code-block:: python import paddle - paddle.enable_imperative() + paddle.disable_static() conv = paddle.nn.Conv2D(3, 3, 5) print(conv.weight) # Parameter: conv2d_0.w_0 @@ -5184,7 +5184,7 @@ class ParamBase(core.VarBase): # - layout: NCHW # - dtype: float # - data: [...] - paddle.disable_imperative() + paddle.enable_static() """ tensor = self.value().get_tensor() if tensor._is_initialized(): diff --git a/python/paddle/fluid/install_check.py b/python/paddle/fluid/install_check.py index 201cc61e4d479dc11b169e02481ac4ff4780c2b8..0e813e21ea3c0677fff8e9ac06af654ca52c02c4 100644 --- a/python/paddle/fluid/install_check.py +++ b/python/paddle/fluid/install_check.py @@ -13,6 +13,7 @@ # limitations under the License. import os +import paddle from .framework import Program, program_guard, unique_name, cuda_places, cpu_places from .param_attr import ParamAttr from .initializer import Constant diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index ffe8939cd7a39cd7835fd9d0ab74dd66d4f24981..6e5f7fd035acfeab975f63b0794829d57f9bb239 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -1669,9 +1669,6 @@ def _load_persistable_nodes(executor, dirname, graph): def save(program, model_path): """ :api_attr: Static Graph - :alias_main: paddle.save - :alias: paddle.save,paddle.tensor.save,paddle.tensor.io.save - :old_api: paddle.fluid.save This function save parameters, optimizer information and network description to model_path. @@ -1733,9 +1730,6 @@ def save(program, model_path): def load(program, model_path, executor=None, var_list=None): """ :api_attr: Static Graph - :alias_main: paddle.load - :alias: paddle.load,paddle.tensor.load,paddle.tensor.io.load - :old_api: paddle.fluid.io.load This function get parameters and optimizer information from program, and then get corresponding value from file. An exception will throw if shape or dtype of the parameters is not match. diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 32532f7ed008ecd72154a298bbedbc6dcefd7e9a..d23f20e1b3d4b917a7618aa36a5efe4ac734a22d 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -12073,11 +12073,11 @@ def logical_and(x, y, out=None, name=None): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([True, True, False, False], dtype=np.bool) y_data = np.array([True, False, True, False], dtype=np.bool) - x = paddle.imperative.to_variable(x_data) - y = paddle.imperative.to_variable(y_data) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) res = paddle.logical_and(x, y) print(res.numpy()) # [True False False False] """ @@ -12115,11 +12115,11 @@ def logical_or(x, y, out=None, name=None): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([True, True, False, False], dtype=np.bool) y_data = np.array([True, False, True, False], dtype=np.bool) - x = paddle.imperative.to_variable(x_data) - y = paddle.imperative.to_variable(y_data) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) res = paddle.logical_or(x, y) print(res.numpy()) # [True True True False] """ @@ -12157,11 +12157,11 @@ def logical_xor(x, y, out=None, name=None): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([True, True, False, False], dtype=np.bool) y_data = np.array([True, False, True, False], dtype=np.bool) - x = paddle.imperative.to_variable(x_data) - y = paddle.imperative.to_variable(y_data) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) res = paddle.logical_xor(x, y) print(res.numpy()) # [False True True False] """ @@ -12197,9 +12197,9 @@ def logical_not(x, out=None, name=None): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([True, False, True, False], dtype=np.bool) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) res = paddle.logical_not(x) print(res.numpy()) # [False True False True] """ diff --git a/python/paddle/fluid/tests/unittests/test_arange.py b/python/paddle/fluid/tests/unittests/test_arange.py index 1736e49f3b67b380b88e53ac9876f3ccde53104c..29003d28e441c02e040a8d6cb9888e376521bc72 100644 --- a/python/paddle/fluid/tests/unittests/test_arange.py +++ b/python/paddle/fluid/tests/unittests/test_arange.py @@ -16,7 +16,7 @@ from __future__ import print_function import paddle from paddle.fluid import core -from paddle import program_guard, Program +from paddle.static import program_guard, Program import unittest import numpy as np from op_test import OpTest @@ -82,7 +82,7 @@ class TestArangeAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( ) else paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) out = exe.run(fetch_list=[x1]) expected_data = np.arange(0, 5, 1).astype(np.float32) @@ -93,15 +93,16 @@ class TestArangeImperative(unittest.TestCase): def test_out(self): place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( ) else paddle.CPUPlace() - with paddle.imperative.guard(place): - x1 = paddle.arange(0, 5, 1) - x2 = paddle.tensor.arange(5) - x3 = paddle.tensor.creation.arange(5) - - start = paddle.imperative.to_variable(np.array([0], 'float32')) - end = paddle.imperative.to_variable(np.array([5], 'float32')) - step = paddle.imperative.to_variable(np.array([1], 'float32')) - x4 = paddle.arange(start, end, step, 'int64') + paddle.disable_static(place) + x1 = paddle.arange(0, 5, 1) + x2 = paddle.tensor.arange(5) + x3 = paddle.tensor.creation.arange(5) + + start = paddle.to_variable(np.array([0], 'float32')) + end = paddle.to_variable(np.array([5], 'float32')) + step = paddle.to_variable(np.array([1], 'float32')) + x4 = paddle.arange(start, end, step, 'int64') + paddle.enable_static() expected_data = np.arange(0, 5, 1).astype(np.int64) for i in [x1, x2, x3, x4]: diff --git a/python/paddle/fluid/tests/unittests/test_argsort_op.py b/python/paddle/fluid/tests/unittests/test_argsort_op.py index eb19c8fd6b45cab65e9c9bced189478098bdb66c..2a8e0e6c7f0bcf4a779b4c098cd4af816e976205 100644 --- a/python/paddle/fluid/tests/unittests/test_argsort_op.py +++ b/python/paddle/fluid/tests/unittests/test_argsort_op.py @@ -17,7 +17,6 @@ from __future__ import print_function import unittest import paddle import paddle.fluid as fluid -import paddle.imperative as imperative import paddle.fluid.layers as layers import numpy as np import six @@ -384,20 +383,21 @@ class TestArgsortDygraph(unittest.TestCase): self.place = core.CPUPlace() def test_api_0(self): - with imperative.guard(self.place): - var_x = imperative.to_variable(self.input_data) - out = paddle.argsort(var_x) - self.assertEqual((np.argsort(self.input_data) == out.numpy()).all(), - True) + paddle.disable_static(self.place) + var_x = paddle.to_variable(self.input_data) + out = paddle.argsort(var_x) + self.assertEqual((np.argsort(self.input_data) == out.numpy()).all(), + True) + paddle.enable_static() def test_api_1(self): - with imperative.guard(self.place): - var_x = imperative.to_variable(self.input_data) - out = paddle.argsort(var_x, axis=-1) - self.assertEqual( - (np.argsort( - self.input_data, axis=-1) == out.numpy()).all(), - True) + paddle.disable_static(self.place) + var_x = paddle.to_variable(self.input_data) + out = paddle.argsort(var_x, axis=-1) + self.assertEqual( + (np.argsort( + self.input_data, axis=-1) == out.numpy()).all(), True) + paddle.enable_static() if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_compare_op.py b/python/paddle/fluid/tests/unittests/test_compare_op.py index a97f54d6cac1ea91f05cb3dc68729f5b68df7c9e..99d0c77fce50ffdfae8a3ec11fce42ea7942c5e6 100644 --- a/python/paddle/fluid/tests/unittests/test_compare_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_op.py @@ -97,7 +97,7 @@ def create_paddle_case(op_type, callback): y = paddle.nn.data(name='y', shape=[1, 2, 3], dtype='int32') op = eval("paddle.%s" % (self.op_type)) out = op(x, y) - exe = paddle.Executor(self.place) + exe = paddle.static.Executor(self.place) input_x = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(np.int32) input_y = np.arange(0, 6).reshape((1, 2, 3)).astype(np.int32) real_result = callback(input_x, input_y) diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index 48b597ab282351739fcca894aa69685a13a9688f..b4dbba7eead397c46c37a8df013dabb00177f030 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -268,9 +268,9 @@ class TestConcatAPI(unittest.TestCase): out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64) out_4 = paddle.concat(x=[x_2, x_3], axis=negative_int64) - exe = paddle.Executor(place=paddle.CPUPlace()) + exe = paddle.static.Executor(place=paddle.CPUPlace()) [res_1, res_2, res_3, res_4] = exe.run( - paddle.default_main_program(), + paddle.static.default_main_program(), feed={"x_1": input_2, "x_2": input_2, "x_3": input_3}, @@ -284,14 +284,15 @@ class TestConcatAPI(unittest.TestCase): in1 = np.array([[1, 2, 3], [4, 5, 6]]) in2 = np.array([[11, 12, 13], [14, 15, 16]]) in3 = np.array([[21, 22], [23, 24]]) - with paddle.imperative.guard(): - x1 = paddle.imperative.to_variable(in1) - x2 = paddle.imperative.to_variable(in2) - x3 = paddle.imperative.to_variable(in3) - out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1) - out2 = paddle.concat(x=[x1, x2], axis=0) - np_out1 = np.concatenate([in1, in2, in3], axis=-1) - np_out2 = np.concatenate([in1, in2], axis=0) + paddle.disable_static() + x1 = paddle.to_variable(in1) + x2 = paddle.to_variable(in2) + x3 = paddle.to_variable(in3) + out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1) + out2 = paddle.concat(x=[x1, x2], axis=0) + np_out1 = np.concatenate([in1, in2, in3], axis=-1) + np_out2 = np.concatenate([in1, in2], axis=0) + paddle.enable_static() self.assertEqual((out1.numpy() == np_out1).all(), True) self.assertEqual((out2.numpy() == np_out2).all(), True) diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index c3283324bdc330ab15f2edf0a911a4a8c16b3f9a..57024e8ae5cd5dd2fbb231269fda50b5ef6e7a47 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -21,7 +21,7 @@ import paddle import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard -from paddle.imperative import to_variable +from paddle import to_variable class TestCumsumOp(unittest.TestCase): @@ -83,16 +83,18 @@ class TestCumsumOp(unittest.TestCase): self.assertTrue(np.allclose(z, out[5])) def test_cpu(self): - with paddle.imperative.guard(paddle.fluid.CPUPlace()): - self.run_cases() + paddle.disable_static(paddle.fluid.CPUPlace()) + self.run_cases() + paddle.enable_static() self.run_static() def test_gpu(self): if not fluid.core.is_compiled_with_cuda(): return - with paddle.imperative.guard(paddle.fluid.CUDAPlace(0)): - self.run_cases() + paddle.disable_static(paddle.fluid.CUDAPlace(0)) + self.run_cases() + paddle.enable_static() self.run_static(use_gpu=True) diff --git a/python/paddle/fluid/tests/unittests/test_directory_migration.py b/python/paddle/fluid/tests/unittests/test_directory_migration.py new file mode 100644 index 0000000000000000000000000000000000000000..4dc2c92ad918c269d5277da0c13d4a96182a253d --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_directory_migration.py @@ -0,0 +1,181 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +import sys +import time +import subprocess +import unittest +import numpy as np +import paddle + + +class TestDirectory(unittest.TestCase): + def get_import_command(self, module): + paths = module.split('.') + if len(paths) <= 1: + return module + package = '.'.join(paths[:-1]) + func = paths[-1] + cmd = 'from {} import {}'.format(package, func) + return cmd + + def test_new_directory(self): + new_directory = [ + 'paddle.enable_static', 'paddle.disable_static', + 'paddle.in_dynamic_mode', 'paddle.to_variable', 'paddle.grad', + 'paddle.no_grad', 'paddle.save', 'paddle.load', + 'paddle.static.save', 'paddle.static.load', + 'paddle.BackwardStrategy', 'paddle.ParallelEnv', + 'paddle.prepare_context', 'paddle.DataParallel', 'paddle.jit', + 'paddle.jit.TracedLayer', 'paddle.jit.to_static', + 'paddle.jit.ProgramTranslator', 'paddle.jit.TranslatedLayer', + 'paddle.jit.save', 'paddle.jit.load', 'paddle.jit.SaveLoadConfig', + 'paddle.NoamDecay', 'paddle.PiecewiseDecay', + 'paddle.NaturalExpDecay', 'paddle.ExponentialDecay', + 'paddle.InverseTimeDecay', 'paddle.PolynomialDecay', + 'paddle.CosineDecay', 'paddle.static.Executor', + 'paddle.static.global_scope', 'paddle.static.scope_guard', + 'paddle.static.append_backward', 'paddle.static.gradients', + 'paddle.static.BuildStrategy', 'paddle.static.CompiledProgram', + 'paddle.static.ExecutionStrategy', + 'paddle.static.default_main_program', + 'paddle.static.default_startup_program', 'paddle.static.Program', + 'paddle.static.name_scope', 'paddle.static.program_guard', + 'paddle.static.Print', 'paddle.static.py_func', + 'paddle.static.ParallelExecutor', + 'paddle.static.WeightNormParamAttr', 'paddle.static.nn.fc', + 'paddle.static.nn.batch_norm', + 'paddle.static.nn.bilinear_tensor_product', + 'paddle.static.nn.conv2d', 'paddle.static.nn.conv2d_transpose', + 'paddle.static.nn.conv3d', 'paddle.static.nn.conv3d_transpose', + 'paddle.static.nn.create_parameter', + 'paddle.static.nn.crf_decoding', 'paddle.static.nn.data_norm', + 'paddle.static.nn.deformable_conv', 'paddle.static.nn.group_norm', + 'paddle.static.nn.hsigmoid', 'paddle.static.nn.instance_norm', + 'paddle.static.nn.layer_norm', 'paddle.static.nn.multi_box_head', + 'paddle.static.nn.nce', 'paddle.static.nn.prelu', + 'paddle.static.nn.row_conv', 'paddle.static.nn.spectral_norm', + 'paddle.static.nn.embedding' + ] + + import_file = 'run_import_modules.py' + + with open(import_file, "w") as wb: + for module in new_directory: + run_cmd = self.get_import_command(module) + wb.write("{}\n".format(run_cmd)) + + _python = sys.executable + + ps_cmd = "{} {}".format(_python, import_file) + ps_proc = subprocess.Popen( + ps_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = ps_proc.communicate() + + assert "Error" not in str(stderr), "Error: Can't" \ + " import Module {}".format(module) + + def test_old_directory(self): + old_directory = [ + 'paddle.enable_imperative', 'paddle.disable_imperative', + 'paddle.in_imperative_mode', 'paddle.imperative.to_variable', + 'paddle.imperative.enable', 'paddle.imperative.guard', + 'paddle.imperative.grad', 'paddle.imperative.no_grad', + 'paddle.imperative.save', 'paddle.imperative.load', + 'paddle.imperative.BackwardStrategy', + 'paddle.imperative.ParallelEnv', + 'paddle.imperative.prepare_context', + 'paddle.imperative.DataParalell', 'paddle.imperative.jit', + 'paddle.imperative.TracedLayer', 'paddle.imperative.declarative', + 'paddle.imperative.ProgramTranslator', + 'paddle.imperative.TranslatedLayer', 'paddle.imperative.jit.save', + 'paddle.imperative.jit.load', + 'paddle.imperative.jit.SaveLoadConfig', + 'paddle.imperative.NoamDecay' + 'paddle.imperative.PiecewiseDecay', + 'paddle.imperative.NaturalExpDecay', + 'paddle.imperative.ExponentialDecay', + 'paddle.imperative.InverseTimeDecay', + 'paddle.imperative.PolynomialDecay', + 'paddle.imperative.CosineDecay', 'paddle.Executor', + 'paddle.global_scope', 'paddle.scope_guard', + 'paddle.append_backward', 'paddle.gradients', + 'paddle.BuildStrategy', 'paddle.CompiledProgram', + 'paddle.ExecutionStrategy', 'paddle.name_scope', + 'paddle.program_guard', 'paddle.Print', 'paddle.py_func', + 'paddle.ParallelExecutor', 'paddle.default_main_program', + 'paddle.default_startup_program', 'paddle.Program', + 'paddle.WeightNormParamAttr', 'paddle.declarative.fc', + 'paddle.declarative.batch_norm', + 'paddle.declarative.bilinear_tensor_product', + 'paddle.declarative.conv2d', 'paddle.declarative.conv2d_transpose', + 'paddle.declarative.conv3d', 'paddle.declarative.conv3d_transpose', + 'paddle.declarative.create_parameter', + 'paddle.declarative.crf_decoding', 'paddle.declarative.data_norm', + 'paddle.declarative.deformable_conv', + 'paddle.declarative.group_norm', 'paddle.declarative.hsigmoid', + 'paddle.declarative.instance_norm', 'paddle.declarative.layer_norm', + 'paddle.declarative.multi_box_head', 'paddle.declarative.nce', + 'paddle.declarative.prelu', 'paddle.declarative.row_conv', + 'paddle.declarative.spectral_norm', 'paddle.declarative.embedding' + ] + + import_file = 'run_old_import_modules.py' + + with open(import_file, "w") as wb: + cmd_context_count = """ +count = 0 +err_module = "" +""" + wb.write(cmd_context_count) + for module in old_directory: + run_cmd = self.get_import_command(module) + cmd_context_loop_template = """ +try: + {run_cmd} +except: + count += 1 +else: + err_module = "{module}" +""" + cmd_context_loop = cmd_context_loop_template.format( + run_cmd=run_cmd, module=module) + wb.write(cmd_context_loop) + cmd_context_print_template = """ +if count != {len_old_directory}: + print("Error: Module " + err_module + " should not be imported") +""" + cmd_context_print = cmd_context_print_template.format( + len_old_directory=str(len(old_directory))) + wb.write(cmd_context_print) + + _python = sys.executable + + ps_cmd = "{} {}".format(_python, import_file) + ps_proc = subprocess.Popen( + ps_cmd.strip().split(" "), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = ps_proc.communicate() + + assert "Error" not in str(stdout), str(stdout) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_eye_op.py b/python/paddle/fluid/tests/unittests/test_eye_op.py index 1a0a4ecb74d56910b3f92924085203f83b2c0145..9b541c323eceaa32591dbdc2ec149868ad7e8673 100644 --- a/python/paddle/fluid/tests/unittests/test_eye_op.py +++ b/python/paddle/fluid/tests/unittests/test_eye_op.py @@ -74,73 +74,70 @@ class TestEyeOp2(OpTest): class API_TestTensorEye(unittest.TestCase): def test_out(self): - with paddle.program_guard(paddle.Program()): + with paddle.static.program_guard(paddle.static.Program()): data = paddle.eye(10) place = fluid.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) result, = exe.run(fetch_list=[data]) expected_result = np.eye(10, dtype="float32") self.assertEqual((result == expected_result).all(), True) - with paddle.program_guard(paddle.Program()): + with paddle.static.program_guard(paddle.static.Program()): data = paddle.eye(10, num_columns=7, dtype="float64") place = paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) result, = exe.run(fetch_list=[data]) expected_result = np.eye(10, 7, dtype="float64") self.assertEqual((result == expected_result).all(), True) - with paddle.program_guard(paddle.Program()): + with paddle.static.program_guard(paddle.static.Program()): data = paddle.eye(10, dtype="int64") place = paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) result, = exe.run(fetch_list=[data]) expected_result = np.eye(10, dtype="int64") self.assertEqual((result == expected_result).all(), True) - with paddle.imperative.guard(): - out = paddle.eye(10, dtype="int64") - expected_result = np.eye(10, dtype="int64") + paddle.disable_static() + out = paddle.eye(10, dtype="int64") + expected_result = np.eye(10, dtype="int64") + paddle.enable_static() self.assertEqual((out.numpy() == expected_result).all(), True) - with paddle.imperative.guard(): - batch_shape = [2] - out = fluid.layers.eye(10, - 10, - dtype="int64", - batch_shape=batch_shape) - result = np.eye(10, dtype="int64") - expected_result = [] - for index in reversed(batch_shape): - tmp_result = [] - for i in range(index): - tmp_result.append(result) - result = tmp_result - expected_result = np.stack(result, axis=0) + paddle.disable_static() + batch_shape = [2] + out = fluid.layers.eye(10, 10, dtype="int64", batch_shape=batch_shape) + result = np.eye(10, dtype="int64") + expected_result = [] + for index in reversed(batch_shape): + tmp_result = [] + for i in range(index): + tmp_result.append(result) + result = tmp_result + expected_result = np.stack(result, axis=0) + paddle.enable_static() self.assertEqual(out.numpy().shape == np.array(expected_result).shape, True) self.assertEqual((out.numpy() == expected_result).all(), True) - with paddle.imperative.guard(): - batch_shape = [3, 2] - out = fluid.layers.eye(10, - 10, - dtype="int64", - batch_shape=batch_shape) - result = np.eye(10, dtype="int64") - expected_result = [] - for index in reversed(batch_shape): - tmp_result = [] - for i in range(index): - tmp_result.append(result) - result = tmp_result - expected_result = np.stack(result, axis=0) + paddle.disable_static() + batch_shape = [3, 2] + out = fluid.layers.eye(10, 10, dtype="int64", batch_shape=batch_shape) + result = np.eye(10, dtype="int64") + expected_result = [] + for index in reversed(batch_shape): + tmp_result = [] + for i in range(index): + tmp_result.append(result) + result = tmp_result + expected_result = np.stack(result, axis=0) + paddle.enable_static() self.assertEqual(out.numpy().shape == np.array(expected_result).shape, True) self.assertEqual((out.numpy() == expected_result).all(), True) def test_errors(self): - with paddle.program_guard(paddle.Program()): + with paddle.static.program_guard(paddle.static.Program()): def test_num_rows_type_check(): paddle.eye(-1, dtype="int64") diff --git a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py index 6d67afe6cbfbb036ef54738a72d86ed798625112..4bd56802efd462ff63498f54699012776ce6f47c 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py @@ -191,8 +191,8 @@ class TestFlattenPython(unittest.TestCase): self.assertRaises(ValueError, test_InputError) def test_Negative(): - paddle.enable_imperative() - img = paddle.imperative.to_variable(x) + paddle.disable_static() + img = paddle.to_variable(x) out = paddle.flatten(img, start_axis=-2, stop_axis=-1) return out.numpy().shape diff --git a/python/paddle/fluid/tests/unittests/test_full_like_op.py b/python/paddle/fluid/tests/unittests/test_full_like_op.py index 21cbab193419be9413c487c8631671097016d959..ba14aeae990329915e080969ca74b8a9658632e9 100644 --- a/python/paddle/fluid/tests/unittests/test_full_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_like_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import paddle import paddle.fluid.core as core -from paddle import Program, program_guard +from paddle.static import program_guard, Program import paddle.compat as cpt import unittest import numpy as np @@ -38,7 +38,7 @@ class TestFullOp(unittest.TestCase): place = paddle.CPUPlace() if core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) @@ -53,12 +53,13 @@ class TestFullOp(unittest.TestCase): msg="full_like output is wrong, out = " + str(out_np)) def test_full_like_imperative(self): - with paddle.imperative.guard(): - input = paddle.arange(6, 10, dtype='float32') - out = paddle.full_like(input, fill_value=888.88, dtype='float32') - out_numpy = np.random.random((4)).astype("float32") - out_numpy.fill(888.88) - self.assertTrue((out.numpy() == out_numpy).all(), True) + paddle.disable_static() + input = paddle.arange(6, 10, dtype='float32') + out = paddle.full_like(input, fill_value=888.88, dtype='float32') + out_numpy = np.random.random((4)).astype("float32") + out_numpy.fill(888.88) + self.assertTrue((out.numpy() == out_numpy).all(), True) + paddle.enable_static() class TestFullOpError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index 9b6c307bbec5d272aa3c5644aeaabfe9d7f5df8f..8a88c2d673c4d1450064c84a8036e1cbe7179b66 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -205,27 +205,28 @@ class TestImperative(unittest.TestCase): self.assertTrue(np.array_equal(dy_grad1, dy_grad2)) def test_functional_paddle_imperative_dygraph_context(self): - self.assertFalse(paddle.imperative.enabled()) - paddle.enable_imperative() - self.assertTrue(paddle.imperative.enabled()) + self.assertFalse(paddle.in_dynamic_mode()) + paddle.disable_static() + self.assertTrue(paddle.in_dynamic_mode()) np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) - var_inp = paddle.imperative.to_variable(np_inp) + var_inp = paddle.to_variable(np_inp) mlp = MLP(input_size=2) out = mlp(var_inp) dy_out1 = out.numpy() out.backward() dy_grad1 = mlp._linear1.weight.gradient() - paddle.disable_imperative() - self.assertFalse(paddle.imperative.enabled()) - with paddle.imperative.guard(): - self.assertTrue(paddle.imperative.enabled()) - var_inp = paddle.imperative.to_variable(np_inp) - mlp = MLP(input_size=2) - out = mlp(var_inp) - dy_out2 = out.numpy() - out.backward() - dy_grad2 = mlp._linear1.weight.gradient() - self.assertFalse(paddle.imperative.enabled()) + paddle.enable_static() + self.assertFalse(paddle.in_dynamic_mode()) + paddle.disable_static() + self.assertTrue(paddle.in_dynamic_mode()) + var_inp = paddle.to_variable(np_inp) + mlp = MLP(input_size=2) + out = mlp(var_inp) + dy_out2 = out.numpy() + out.backward() + dy_grad2 = mlp._linear1.weight.gradient() + paddle.enable_static() + self.assertFalse(paddle.in_dynamic_mode()) self.assertTrue(np.array_equal(dy_out1, dy_out2)) self.assertTrue(np.array_equal(dy_grad1, dy_grad2)) @@ -281,7 +282,7 @@ class TestImperative(unittest.TestCase): l0 = fluid.Linear(2, 2) self.assertTrue(l0.weight._grad_ivar() is None) l1 = fluid.Linear(2, 2) - with paddle.imperative.no_grad(): + with paddle.no_grad(): self.assertTrue(l1.weight.stop_gradient is False) tmp = l1.weight * 2 self.assertTrue(tmp.stop_gradient) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_data_parallel.py b/python/paddle/fluid/tests/unittests/test_imperative_data_parallel.py index a61950f2dc0775fcbad5fd970ee95ed5ebf1c558..d3f488d92ac455072b37274e2ce782bcf41e8cc7 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_data_parallel.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_data_parallel.py @@ -43,7 +43,7 @@ class MLP(fluid.Layer): class TestDataParallelStateDict(unittest.TestCase): def test_data_parallel_state_dict(self): with fluid.dygraph.guard(): - strategy = paddle.imperative.prepare_context() + strategy = paddle.prepare_context() mlp = MLP() parallel_mlp = dygraph.parallel.DataParallel(mlp, strategy) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py index a3c602646b700556cea53a9b06295e38baf705dd..1e509960c076339d2d56ccfcdd7a795fa462ca82 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py @@ -153,7 +153,7 @@ class TestImperativeMnist(unittest.TestCase): label.stop_gradient = True if batch_id % 10 == 0: - cost, traced_layer = paddle.imperative.TracedLayer.trace( + cost, traced_layer = paddle.jit.TracedLayer.trace( mnist, inputs=img) if program is not None: self.assertTrue(program, traced_layer.program) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py index 927e51b56d727f92b75930eb0915fb5da8931f01..eb9dc926c8207f4de4a6ce7e3d0dc89cc2b965fd 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py @@ -292,7 +292,7 @@ class TestDygraphPtbRnn(unittest.TestCase): np_t = v.numpy() self.model_base[k] = np_t - paddle.imperative.save(self.state_dict, "./test_dy") + paddle.save(self.state_dict, "./test_dy") def testLoadAndSetVarBase(self): seed = 90 @@ -373,8 +373,7 @@ class TestDygraphPtbRnn(unittest.TestCase): if isinstance(adam._learning_rate, LearningRateDecay): adam._learning_rate.step_num = 0 - para_state_dict, opti_state_dict = paddle.imperative.load( - "./test_dy") + para_state_dict, opti_state_dict = paddle.load("./test_dy") adam.set_dict(opti_state_dict) opti_dict = adam.state_dict() @@ -900,18 +899,17 @@ class TestDygraphPtbRnn(unittest.TestCase): with fluid.dygraph.guard(): emb = fluid.dygraph.Embedding([10, 10]) state_dict = emb.state_dict() - paddle.imperative.save(state_dict, - os.path.join('saved_dy', 'emb_dy')) + paddle.save(state_dict, os.path.join('saved_dy', 'emb_dy')) - para_state_dict, opti_state_dict = paddle.imperative.load( + para_state_dict, opti_state_dict = paddle.load( os.path.join('saved_dy', 'emb_dy')) self.assertTrue(opti_state_dict == None) - para_state_dict, opti_state_dict = paddle.imperative.load( + para_state_dict, opti_state_dict = paddle.load( os.path.join('saved_dy', 'emb_dy.pdparams')) - para_state_dict, opti_state_dict = paddle.imperative.load( + para_state_dict, opti_state_dict = paddle.load( os.path.join('saved_dy', 'emb_dy.pdopt')) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py index 2789174ba7a5805b86557a9a465c661a906bc0a7..9878e2f9ad772fe3d03addb4ced9f3b66a6cd58a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py @@ -47,33 +47,34 @@ class TestSimpleNet(unittest.TestCase): for place in places: for dtype in ["float32", "float64"]: for sort_sum_gradient in [True, False]: - with paddle.imperative.guard(place): - backward_strategy = paddle.imperative.BackwardStrategy() - backward_strategy.sort_sum_gradient = sort_sum_gradient - # grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0) + paddle.disable_static(place) + backward_strategy = paddle.BackwardStrategy() + backward_strategy.sort_sum_gradient = sort_sum_gradient + # grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0) - input_word = np.array([[1, 2], [2, 1]]).astype('int64') - input = paddle.imperative.to_variable(input_word) + input_word = np.array([[1, 2], [2, 1]]).astype('int64') + input = paddle.to_variable(input_word) - simplenet = SimpleNet(20, 32, dtype) - adam = SGDOptimizer( - learning_rate=0.001, - parameter_list=simplenet.parameters( - )) # grad_clip=grad_clip - input_emb, emb = simplenet(input) + simplenet = SimpleNet(20, 32, dtype) + adam = SGDOptimizer( + learning_rate=0.001, + parameter_list=simplenet.parameters( + )) # grad_clip=grad_clip + input_emb, emb = simplenet(input) - self.assertTrue(emb.weight.gradient() is None) - self.assertTrue(input_emb.gradient() is None) + self.assertTrue(emb.weight.gradient() is None) + self.assertTrue(input_emb.gradient() is None) - input_emb.backward(backward_strategy) - adam.minimize(input_emb) - self.assertTrue(emb.weight.gradient() is not None) + input_emb.backward(backward_strategy) + adam.minimize(input_emb) + self.assertTrue(emb.weight.gradient() is not None) - emb.clear_gradients() - self.assertTrue(emb.weight.gradient() is None) + emb.clear_gradients() + self.assertTrue(emb.weight.gradient() is None) - input_emb.clear_gradient() - self.assertTrue(input_emb.gradient() is not None) + input_emb.clear_gradient() + self.assertTrue(input_emb.gradient() is not None) + paddle.enable_static() def test_selectedrows_gradient2(self): places = [fluid.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_linspace.py b/python/paddle/fluid/tests/unittests/test_linspace.py index c7bab1a135bc439eefa822087869e08a43de0c51..068993c4c1c5e770dd6cf7dc7a35b9ccc3f49aae 100644 --- a/python/paddle/fluid/tests/unittests/test_linspace.py +++ b/python/paddle/fluid/tests/unittests/test_linspace.py @@ -82,15 +82,16 @@ class TestLinspaceAPI(unittest.TestCase): assert np.array_equal(res_1, res_2) def test_name(self): - with paddle.program_guard(paddle.Program()): + with paddle.static.program_guard(paddle.static.Program()): out = paddle.linspace( 0, 10, 5, dtype='float32', name='linspace_res') assert 'linspace_res' in out.name def test_imperative(self): - with paddle.imperative.guard(): - out = paddle.linspace(0, 10, 5, dtype='float32') - np_out = np.linspace(0, 10, 5, dtype='float32') + paddle.disable_static() + out = paddle.linspace(0, 10, 5, dtype='float32') + np_out = np.linspace(0, 10, 5, dtype='float32') + paddle.enable_static() self.assertEqual((out.numpy() == np_out).all(), True) diff --git a/python/paddle/fluid/tests/unittests/test_multiply.py b/python/paddle/fluid/tests/unittests/test_multiply.py index 64421f6a1c6a018fdf82a7518f647099830972b3..f7f6e1f1aac678a00617e0693847b4346604a1ab 100644 --- a/python/paddle/fluid/tests/unittests/test_multiply.py +++ b/python/paddle/fluid/tests/unittests/test_multiply.py @@ -41,9 +41,9 @@ class TestMultiplyAPI(unittest.TestCase): return res def __run_dynamic_graph_case(self, x_data, y_data, axis=-1): - paddle.enable_imperative() - x = paddle.imperative.to_variable(x_data) - y = paddle.imperative.to_variable(y_data) + paddle.disable_static() + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) res = paddle.multiply(x, y, axis=axis) return res.numpy() @@ -107,7 +107,7 @@ class TestMultiplyError(unittest.TestCase): def test_errors(self): """test_errors.""" # test static computation graph: dtype can not be int8 - paddle.disable_imperative() + paddle.enable_static() with program_guard(Program(), Program()): x = paddle.nn.data(name='x', shape=[100], dtype=np.int8) y = paddle.nn.data(name='y', shape=[100], dtype=np.int8) @@ -121,18 +121,18 @@ class TestMultiplyError(unittest.TestCase): np.random.seed(7) # test dynamic computation graph: dtype can not be int8 - paddle.enable_imperative() + paddle.disable_static() x_data = np.random.randn(200).astype(np.int8) y_data = np.random.randn(200).astype(np.int8) - x = paddle.imperative.to_variable(x_data) - y = paddle.imperative.to_variable(y_data) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y) # test dynamic computation graph: inputs must be broadcastable x_data = np.random.rand(200, 5) y_data = np.random.rand(200) - x = paddle.imperative.to_variable(x_data) - y = paddle.imperative.to_variable(y_data) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y) diff --git a/python/paddle/fluid/tests/unittests/test_ones_like.py b/python/paddle/fluid/tests/unittests/test_ones_like.py index 4e3b3f3edc9f92a2b268586f79dbcc3aafc05031..c1e6a3377710f98184e9541e287b911def89cd81 100644 --- a/python/paddle/fluid/tests/unittests/test_ones_like.py +++ b/python/paddle/fluid/tests/unittests/test_ones_like.py @@ -62,18 +62,18 @@ class TestOnesLikeImpeartive(unittest.TestCase): shape = [3, 4] place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() - with paddle.imperative.guard(place): - x = paddle.imperative.to_variable(np.ones(shape)) - for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: - out = ones_like(x, dtype) - self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), - True) - - out = paddle.tensor.ones_like(x) + paddle.disable_static(place) + x = paddle.to_variable(np.ones(shape)) + for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: + out = ones_like(x, dtype) self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True) - out = paddle.tensor.creation.ones_like(x) - self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True) + out = paddle.tensor.ones_like(x) + self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True) + + out = paddle.tensor.creation.ones_like(x) + self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True) + paddle.enable_static() if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_ones_op.py b/python/paddle/fluid/tests/unittests/test_ones_op.py index d50e820c6c6bc89a9346382c79f057e179f1da12..47ce37964324208a032c821360d6ab10666abcb5 100644 --- a/python/paddle/fluid/tests/unittests/test_ones_op.py +++ b/python/paddle/fluid/tests/unittests/test_ones_op.py @@ -27,35 +27,35 @@ import numpy as np class ApiOnesTest(unittest.TestCase): def test_paddle_ones(self): - with paddle.program_guard(paddle.Program()): + with paddle.static.program_guard(paddle.static.Program()): ones = paddle.ones(shape=[10]) place = paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) result, = exe.run(fetch_list=[ones]) expected_result = np.ones(10, dtype="float32") self.assertEqual((result == expected_result).all(), True) - with paddle.program_guard(paddle.Program()): + with paddle.static.program_guard(paddle.static.Program()): ones = paddle.ones(shape=[10], dtype="float64") place = paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) result, = exe.run(fetch_list=[ones]) expected_result = np.ones(10, dtype="float64") self.assertEqual((result == expected_result).all(), True) - with paddle.program_guard(paddle.Program()): + with paddle.static.program_guard(paddle.static.Program()): ones = paddle.ones(shape=[10], dtype="int64") place = paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) result, = exe.run(fetch_list=[ones]) expected_result = np.ones(10, dtype="int64") self.assertEqual((result == expected_result).all(), True) def test_fluid_ones(self): - with paddle.program_guard(paddle.Program()): + with paddle.static.program_guard(paddle.static.Program()): ones = fluid.layers.ones(shape=[10], dtype="int64") place = paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) result, = exe.run(fetch_list=[ones]) expected_result = np.ones(10, dtype="int64") self.assertEqual((result == expected_result).all(), True) @@ -64,25 +64,25 @@ class ApiOnesTest(unittest.TestCase): class ApiOnesZerosError(unittest.TestCase): def test_errors(self): def test_error1(): - with paddle.program_guard(paddle.Program()): + with paddle.static.program_guard(paddle.static.Program()): ones = paddle.ones(shape=10, dtype="int64") self.assertRaises(TypeError, test_error1) def test_error2(): - with paddle.program_guard(paddle.Program()): + with paddle.static.program_guard(paddle.static.Program()): ones = paddle.ones(shape=10) self.assertRaises(TypeError, test_error2) def test_error3(): - with paddle.program_guard(paddle.Program()): + with paddle.static.program_guard(paddle.static.Program()): ones = fluid.layers.ones(shape=10, dtype="int64") self.assertRaises(TypeError, test_error3) def test_error4(): - with paddle.program_guard(paddle.Program()): + with paddle.static.program_guard(paddle.static.Program()): ones = fluid.layers.ones(shape=[10], dtype="int8") self.assertRaises(TypeError, test_error4) diff --git a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py index 50e587478957a9e5c359d0c8a9d606859f17e994..858d56c1fc04f61c9dd281a633f7be9aceff8338 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py @@ -22,7 +22,7 @@ import paddle def _dygraph_guard_(func): def __impl__(*args, **kwargs): - if paddle.in_imperative_mode(): + if paddle.in_dynamic_mode(): return func(*args, **kwargs) else: with fluid.dygraph.guard(): @@ -54,7 +54,7 @@ class TestDygraphDoubleGrad(TestCase): allow_unused=False): backward_strategy = fluid.dygraph.BackwardStrategy() backward_strategy.sort_sum_gradient = self.sort_sum_gradient - return paddle.imperative.grad( + return paddle.grad( outputs=outputs, inputs=inputs, grad_outputs=grad_outputs, diff --git a/python/paddle/fluid/tests/unittests/test_randint_op.py b/python/paddle/fluid/tests/unittests/test_randint_op.py index 5b2d5be346a9b205cb44373f58a413baa6c8a2fa..715d66aa3332cef649f867052400b9769bacd979 100644 --- a/python/paddle/fluid/tests/unittests/test_randint_op.py +++ b/python/paddle/fluid/tests/unittests/test_randint_op.py @@ -19,7 +19,7 @@ import numpy as np from op_test import OpTest import paddle from paddle.fluid import core -from paddle import Program, program_guard +from paddle.static import program_guard, Program def output_hist(out): @@ -132,7 +132,7 @@ class TestRandintAPI(unittest.TestCase): place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( ) else paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) outs = exe.run( feed={'var_shape': np.array([100, 100]).astype('int64')}, fetch_list=[out1, out2, out3, out4, out5]) @@ -141,13 +141,14 @@ class TestRandintAPI(unittest.TestCase): class TestRandintImperative(unittest.TestCase): def test_api(self): n = 10 - with paddle.imperative.guard(): - x1 = paddle.randint(n, shape=[10], dtype="int32") - x2 = paddle.tensor.randint(n) - x3 = paddle.tensor.random.randint(n) - for i in [x1, x2, x3]: - for j in i.numpy().tolist(): - self.assertTrue((j >= 0 and j < n)) + paddle.disable_static() + x1 = paddle.randint(n, shape=[10], dtype="int32") + x2 = paddle.tensor.randint(n) + x3 = paddle.tensor.random.randint(n) + for i in [x1, x2, x3]: + for j in i.numpy().tolist(): + self.assertTrue((j >= 0 and j < n)) + paddle.enable_static() if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_randn_op.py b/python/paddle/fluid/tests/unittests/test_randn_op.py index f65cc6dc53b7e3541016447d8510bd3d38a53b17..8b560f18f9f7bc614e38c1245f48398f808525ed 100644 --- a/python/paddle/fluid/tests/unittests/test_randn_op.py +++ b/python/paddle/fluid/tests/unittests/test_randn_op.py @@ -18,7 +18,7 @@ import unittest import numpy as np import paddle import paddle.fluid.core as core -from paddle import Program, program_guard +from paddle.static import program_guard, Program class TestRandnOp(unittest.TestCase): @@ -39,7 +39,7 @@ class TestRandnOp(unittest.TestCase): place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( ) else paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) res = exe.run(train_program, feed={'X': np.array( shape, dtype='int32')}, @@ -55,20 +55,21 @@ class TestRandnOpForDygraph(unittest.TestCase): shape = [1000, 784] place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( ) else paddle.CPUPlace() - with paddle.imperative.guard(place): - x1 = paddle.randn(shape, 'float32') - x2 = paddle.randn(shape, 'float64') + paddle.disable_static(place) + x1 = paddle.randn(shape, 'float32') + x2 = paddle.randn(shape, 'float64') - dim_1 = paddle.fill_constant([1], "int64", 20) - dim_2 = paddle.fill_constant([1], "int32", 50) - x3 = paddle.randn(shape=[dim_1, dim_2, 784]) + dim_1 = paddle.fill_constant([1], "int64", 20) + dim_2 = paddle.fill_constant([1], "int32", 50) + x3 = paddle.randn(shape=[dim_1, dim_2, 784]) - var_shape = paddle.imperative.to_variable(np.array(shape)) - x4 = paddle.randn(var_shape) + var_shape = paddle.to_variable(np.array(shape)) + x4 = paddle.randn(var_shape) - for out in [x1, x2, x3, x4]: - self.assertAlmostEqual(np.mean(out.numpy()), .0, delta=0.1) - self.assertAlmostEqual(np.std(out.numpy()), 1., delta=0.1) + for out in [x1, x2, x3, x4]: + self.assertAlmostEqual(np.mean(out.numpy()), .0, delta=0.1) + self.assertAlmostEqual(np.std(out.numpy()), 1., delta=0.1) + paddle.enable_static() class TestRandnOpError(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_randperm_op.py b/python/paddle/fluid/tests/unittests/test_randperm_op.py index 6938b8ef1e051777c867796062e5e7cbed6d7fa4..4361a45f1568f5f047ee03090bd3ef28a8d6654f 100644 --- a/python/paddle/fluid/tests/unittests/test_randperm_op.py +++ b/python/paddle/fluid/tests/unittests/test_randperm_op.py @@ -17,7 +17,7 @@ import numpy as np from op_test import OpTest import paddle import paddle.fluid.core as core -from paddle import Program, program_guard +from paddle.static import program_guard, Program def check_randperm_out(n, data_np): @@ -108,7 +108,7 @@ class TestRandpermAPI(unittest.TestCase): x1 = paddle.randperm(n) x2 = paddle.randperm(n, 'float32') - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) res = exe.run(fetch_list=[x1, x2]) self.assertEqual(res[0].dtype, np.int64) @@ -119,13 +119,14 @@ class TestRandpermAPI(unittest.TestCase): class TestRandpermImperative(unittest.TestCase): def test_out(self): - with paddle.imperative.guard(): - n = 10 - for dtype in ['int32', np.int64, 'float32', 'float64']: - data_p = paddle.randperm(n, dtype) - data_np = data_p.numpy() - self.assertTrue( - check_randperm_out(n, data_np), msg=error_msg(data_np)) + paddle.disable_static() + n = 10 + for dtype in ['int32', np.int64, 'float32', 'float64']: + data_p = paddle.randperm(n, dtype) + data_np = data_p.numpy() + self.assertTrue( + check_randperm_out(n, data_np), msg=error_msg(data_np)) + paddle.enable_static() if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_retain_graph.py b/python/paddle/fluid/tests/unittests/test_retain_graph.py index bc50cf197f63e6082ea1d3fdbff1891f500e5b9a..53fde086dd23e681bd79ec663a7acb82759193bc 100644 --- a/python/paddle/fluid/tests/unittests/test_retain_graph.py +++ b/python/paddle/fluid/tests/unittests/test_retain_graph.py @@ -17,7 +17,7 @@ import paddle import paddle.fluid as fluid import unittest -paddle.enable_imperative() +paddle.disable_static() SEED = 2020 np.random.seed(SEED) fluid.default_main_program().random_seed = SEED @@ -73,7 +73,7 @@ class TestRetainGraph(unittest.TestCase): outs = paddle.fill_constant(disc_interpolates.shape, disc_interpolates.dtype, 1.0) - gradients = paddle.imperative.grad( + gradients = paddle.grad( outputs=disc_interpolates, inputs=fake_AB, grad_outputs=outs, @@ -103,8 +103,8 @@ class TestRetainGraph(unittest.TestCase): A = np.random.rand(2, 3, 32, 32).astype('float32') B = np.random.rand(2, 3, 32, 32).astype('float32') - realA = paddle.imperative.to_variable(A) - realB = paddle.imperative.to_variable(B) + realA = paddle.to_variable(A) + realB = paddle.to_variable(B) fakeB = g(realA) optim_d.clear_gradients() diff --git a/python/paddle/fluid/tests/unittests/test_sort_op.py b/python/paddle/fluid/tests/unittests/test_sort_op.py index 087586aa89607a58493c2d4427cbb6d30b31f0da..015b72fd1c5275f758a109451110f61b97c4a0c7 100644 --- a/python/paddle/fluid/tests/unittests/test_sort_op.py +++ b/python/paddle/fluid/tests/unittests/test_sort_op.py @@ -17,7 +17,6 @@ from __future__ import print_function import unittest import paddle import paddle.fluid as fluid -import paddle.imperative as imperative import paddle.fluid.layers as layers import numpy as np import six @@ -72,16 +71,17 @@ class TestSortDygraph(unittest.TestCase): self.place = core.CPUPlace() def test_api_0(self): - with imperative.guard(self.place): - var_x = imperative.to_variable(self.input_data) - out = paddle.sort(var_x) - self.assertEqual((np.sort(self.input_data) == out.numpy()).all(), - True) + paddle.disable_static(self.place) + var_x = paddle.to_variable(self.input_data) + out = paddle.sort(var_x) + self.assertEqual((np.sort(self.input_data) == out.numpy()).all(), True) + paddle.enable_static() def test_api_1(self): - with imperative.guard(self.place): - var_x = imperative.to_variable(self.input_data) - out = paddle.sort(var_x, axis=-1) - self.assertEqual( - (np.sort( - self.input_data, axis=-1) == out.numpy()).all(), True) + paddle.disable_static(self.place) + var_x = paddle.to_variable(self.input_data) + out = paddle.sort(var_x, axis=-1) + self.assertEqual( + (np.sort( + self.input_data, axis=-1) == out.numpy()).all(), True) + paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py index 448751f19dbe76fdbd856d0464e36390c69aba41..21e618a46201659fe0c4e5c67d1d9a8bafd70f1b 100644 --- a/python/paddle/fluid/tests/unittests/test_zeros_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_zeros_like_op.py @@ -62,20 +62,19 @@ class TestZerosLikeImpeartive(unittest.TestCase): shape = [3, 4] place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( ) else fluid.CPUPlace() - with paddle.imperative.guard(place): - x = paddle.imperative.to_variable(np.ones(shape)) - for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: - out = zeros_like(x, dtype) - self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), - True) - - out = paddle.tensor.zeros_like(x) + paddle.disable_static(place) + x = paddle.to_variable(np.ones(shape)) + for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: + out = zeros_like(x, dtype) self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True) - out = paddle.tensor.creation.zeros_like(x) - self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), - True) + out = paddle.tensor.zeros_like(x) + self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True) + + out = paddle.tensor.creation.zeros_like(x) + self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True) + paddle.enable_static() if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_zeros_op.py b/python/paddle/fluid/tests/unittests/test_zeros_op.py index 0cf51a87cf6b844c053ab1335e20df108d16e177..23dec935507fd977f884e952451b5ea98c935893 100644 --- a/python/paddle/fluid/tests/unittests/test_zeros_op.py +++ b/python/paddle/fluid/tests/unittests/test_zeros_op.py @@ -39,15 +39,15 @@ class ApiZerosTest(unittest.TestCase): with program_guard(Program()): zeros = paddle.zeros(shape=[10], dtype="float64") place = paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) result, = exe.run(fetch_list=[zeros]) expected_result = np.zeros(10, dtype="float64") self.assertEqual((result == expected_result).all(), True) - with paddle.program_guard(Program()): + with paddle.static.program_guard(Program()): zeros = paddle.zeros(shape=[10], dtype="int64") place = paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) result, = exe.run(fetch_list=[zeros]) expected_result = np.zeros(10, dtype="int64") self.assertEqual((result == expected_result).all(), True) @@ -55,7 +55,7 @@ class ApiZerosTest(unittest.TestCase): with program_guard(Program()): zeros = paddle.zeros(shape=[10], dtype="int64") place = paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) result, = exe.run(fetch_list=[zeros]) expected_result = np.zeros(10, dtype="int64") self.assertEqual((result == expected_result).all(), True) @@ -64,7 +64,7 @@ class ApiZerosTest(unittest.TestCase): out_np = np.zeros(shape=(1), dtype='float32') out = paddle.zeros(shape=[1], dtype="float32") place = paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) result = exe.run(fetch_list=[out]) self.assertEqual((result == out_np).all(), True) @@ -72,7 +72,7 @@ class ApiZerosTest(unittest.TestCase): with program_guard(Program()): zeros = fluid.layers.zeros(shape=[10], dtype="int64") place = paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) result, = exe.run(fetch_list=[zeros]) expected_result = np.zeros(10, dtype="int64") self.assertEqual((result == expected_result).all(), True) @@ -81,13 +81,13 @@ class ApiZerosTest(unittest.TestCase): class ApiZerosError(unittest.TestCase): def test_errors(self): def test_error1(): - with paddle.program_guard(fluid.Program()): + with paddle.static.program_guard(fluid.Program()): ones = fluid.layers.zeros(shape=10, dtype="int64") self.assertRaises(TypeError, test_error1) def test_error2(): - with paddle.program_guard(fluid.Program()): + with paddle.static.program_guard(fluid.Program()): ones = fluid.layers.zeros(shape=[10], dtype="int8") self.assertRaises(TypeError, test_error2) diff --git a/python/paddle/framework/__init__.py b/python/paddle/framework/__init__.py index 3078f432c3a70308c929a4fdface215fb79eebcb..0c26e4c5178883883dc9e364dff90e67c4667ce1 100644 --- a/python/paddle/framework/__init__.py +++ b/python/paddle/framework/__init__.py @@ -14,23 +14,49 @@ # TODO: import framework api under this directory __all__ = [ - 'append_backward', 'gradients', 'Executor', 'global_scope', 'scope_guard', - 'BuildStrategy', 'CompiledProgram', 'default_main_program', - 'default_startup_program', 'create_global_var', 'create_parameter', 'Print', - 'py_func', 'ExecutionStrategy', 'name_scope', 'ParallelExecutor', - 'ParamAttr', 'Program', 'program_guard', 'Variable', 'WeightNormParamAttr', + 'create_global_var', 'create_parameter', 'ParamAttr', 'Variable', 'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace' ] +__all__ += [ + 'BackwardStrategy', 'grad', 'LayerList', 'load', 'save', 'prepare_context', + 'to_variable', 'no_grad', 'ParallelEnv', 'DataParallel' +] + +__all__ += [ + 'NoamDecay', 'PiecewiseDecay', 'NaturalExpDecay', 'ExponentialDecay', + 'InverseTimeDecay', 'PolynomialDecay', 'CosineDecay' +] + from . import random from .random import manual_seed -from ..fluid.executor import Executor, global_scope, scope_guard -from ..fluid.backward import append_backward, gradients -from ..fluid.compiler import BuildStrategy, CompiledProgram, ExecutionStrategy -from ..fluid.framework import default_main_program, default_startup_program, name_scope, Program, program_guard, Variable -from ..fluid.layers.control_flow import Print -from ..fluid.layers.nn import py_func -from ..fluid.parallel_executor import ParallelExecutor -from ..fluid.param_attr import ParamAttr, WeightNormParamAttr -from ..fluid.layers.tensor import create_global_var, create_parameter -from ..fluid.core import CPUPlace, CUDAPlace, CUDAPinnedPlace + +from ..fluid.framework import Variable #DEFINE_ALIAS +from ..fluid.param_attr import ParamAttr #DEFINE_ALIAS +from ..fluid.layers.tensor import create_global_var #DEFINE_ALIAS +from ..fluid.layers.tensor import create_parameter #DEFINE_ALIAS +from ..fluid.core import CPUPlace #DEFINE_ALIAS +from ..fluid.core import CUDAPlace #DEFINE_ALIAS +from ..fluid.core import CUDAPinnedPlace #DEFINE_ALIAS + +from paddle.fluid import core #DEFINE_ALIAS +from ..fluid.dygraph.base import no_grad #DEFINE_ALIAS +from ..fluid.dygraph.base import to_variable #DEFINE_ALIAS +from ..fluid.dygraph.base import grad #DEFINE_ALIAS +from ..fluid.dygraph.checkpoint import load_dygraph #DEFINE_ALIAS +from ..fluid.dygraph.checkpoint import save_dygraph #DEFINE_ALIAS +from ..fluid.dygraph.checkpoint import load_dygraph as load #DEFINE_ALIAS +from ..fluid.dygraph.checkpoint import save_dygraph as save #DEFINE_ALIAS +from ..fluid.dygraph.parallel import prepare_context #DEFINE_ALIAS +from ..fluid.dygraph.parallel import ParallelEnv #DEFINE_ALIAS +from ..fluid.dygraph.parallel import DataParallel #DEFINE_ALIAS + +from ..fluid.dygraph.learning_rate_scheduler import NoamDecay #DEFINE_ALIAS +from ..fluid.dygraph.learning_rate_scheduler import PiecewiseDecay #DEFINE_ALIAS +from ..fluid.dygraph.learning_rate_scheduler import NaturalExpDecay #DEFINE_ALIAS +from ..fluid.dygraph.learning_rate_scheduler import ExponentialDecay #DEFINE_ALIAS +from ..fluid.dygraph.learning_rate_scheduler import InverseTimeDecay #DEFINE_ALIAS +from ..fluid.dygraph.learning_rate_scheduler import PolynomialDecay #DEFINE_ALIAS +from ..fluid.dygraph.learning_rate_scheduler import CosineDecay #DEFINE_ALIAS + +BackwardStrategy = core.BackwardStrategy diff --git a/python/paddle/imperative/__init__.py b/python/paddle/imperative/__init__.py deleted file mode 100644 index 489888a2fef39b2cca5b918a412d231784471ddc..0000000000000000000000000000000000000000 --- a/python/paddle/imperative/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# define api used to run in imperative mode -__all__ = [ - 'BackwardStrategy', 'enabled', 'grad', 'guard', 'LayerList', 'load', 'save', - 'prepare_context', 'to_variable', 'TracedLayer', 'no_grad', 'ParallelEnv', - 'ProgramTranslator', 'declarative', 'DataParallel', 'TranslatedLayer', 'jit' -] - -__all__ += [ - 'NoamDecay', 'PiecewiseDecay', 'NaturalExpDecay', 'ExponentialDecay', - 'InverseTimeDecay', 'PolynomialDecay', 'CosineDecay' -] - -from paddle.fluid import core -from ..fluid.dygraph.base import enabled, guard, no_grad, to_variable, grad -from ..fluid.dygraph.checkpoint import load_dygraph as load -from ..fluid.dygraph.checkpoint import save_dygraph as save -from ..fluid.dygraph.parallel import prepare_context, ParallelEnv, DataParallel -from ..fluid.dygraph.jit import TracedLayer, declarative -from ..fluid.dygraph import ProgramTranslator -from . import jit - -from ..fluid.dygraph.learning_rate_scheduler import NoamDecay, PiecewiseDecay, NaturalExpDecay, ExponentialDecay, \ - InverseTimeDecay, PolynomialDecay, CosineDecay - -BackwardStrategy = core.BackwardStrategy diff --git a/python/paddle/incubate/complex/tensor/math.py b/python/paddle/incubate/complex/tensor/math.py index 5c26d6da8d9bb002a117ee40e0ce209c3fa0db9f..52fdbcbc82be291f356067258789c876fede8f16 100644 --- a/python/paddle/incubate/complex/tensor/math.py +++ b/python/paddle/incubate/complex/tensor/math.py @@ -261,8 +261,8 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): case1 = np.random.randn(3, 10, 10).astype('float64') + 1j * np.random.randn(3, 10, 10).astype('float64') - paddle.enable_imperative() - case1 = paddle.imperative.to_variable(case1) + paddle.disable_static() + case1 = paddle.to_variable(case1) data1 = paddle.complex.trace(case1, offset=1, axis1=1, axis2=2) # data1.shape = [3] """ complex_variable_exists([x], "trace") diff --git a/python/paddle/incubate/hapi/text/text.py b/python/paddle/incubate/hapi/text/text.py index 1424ce0381ac22e3fc15db854e653e0c2632cf22..a2940fbe6cf483bce905c596a4b50294129fab54 100644 --- a/python/paddle/incubate/hapi/text/text.py +++ b/python/paddle/incubate/hapi/text/text.py @@ -1804,7 +1804,7 @@ class DynamicDecode(Layer): from paddle.fluid.layers import BeamSearchDecoder from paddle.incubate.hapi.text import StackedLSTMCell, DynamicDecode - paddle.enable_dygraph() + paddle.disable_static() vocab_size, d_model, = 100, 32 encoder_output = paddle.rand((2, 4, d_model)) @@ -2278,7 +2278,7 @@ class TransformerCell(RNNCell): from paddle.incubate.hapi.text import TransformerBeamSearchDecoder from paddle.incubate.hapi.text import DynamicDecode - paddle.enable_dygraph() + paddle.disable_static() class Embedder(fluid.dygraph.Layer): def __init__(self): @@ -2445,7 +2445,7 @@ class TransformerBeamSearchDecoder(layers.BeamSearchDecoder): from paddle.incubate.hapi.text import TransformerBeamSearchDecoder from paddle.incubate.hapi.text import DynamicDecode - paddle.enable_dygraph() + paddle.disable_static() class Embedder(fluid.dygraph.Layer): def __init__(self): diff --git a/python/paddle/imperative/jit/__init__.py b/python/paddle/jit/__init__.py similarity index 50% rename from python/paddle/imperative/jit/__init__.py rename to python/paddle/jit/__init__.py index 85fccf6e689ebf606092df8c3f94f561a68705ed..f098dc591cc3e58811d2db6bd170b7eef8c92366 100644 --- a/python/paddle/imperative/jit/__init__.py +++ b/python/paddle/jit/__init__.py @@ -12,7 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ...fluid.dygraph.jit import save, load, SaveLoadConfig -from ...fluid.dygraph.io import TranslatedLayer +from ..fluid.dygraph.jit import save #DEFINE_ALIAS +from ..fluid.dygraph.jit import load #DEFINE_ALIAS +from ..fluid.dygraph.jit import SaveLoadConfig #DEFINE_ALIAS +from ..fluid.dygraph.jit import TracedLayer #DEFINE_ALIAS +from ..fluid.dygraph.jit import declarative as __impl__ #DEFINE_ALIAS +from ..fluid.dygraph.jit import declarative as to_static #DEFINE_ALIAS +from ..fluid.dygraph import ProgramTranslator #DEFINE_ALIAS +from ..fluid.dygraph.io import TranslatedLayer #DEFINE_ALIAS -__all__ = ['save', 'load', 'SaveLoadConfig'] +__all__ = [ + 'save', 'load', 'SaveLoadConfig', 'TracedLayer', 'to_static', + 'ProgramTranslator', 'TranslatedLayer' +] diff --git a/python/paddle/static/__init__.py b/python/paddle/static/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d31e5173f8b7d2bd1ab89267b11bdbbad9feb518 --- /dev/null +++ b/python/paddle/static/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO: import framework api under this directory +__all__ = [ + 'append_backward', 'gradients', 'Executor', 'global_scope', 'scope_guard', + 'BuildStrategy', 'CompiledProgram', 'Print', 'py_func', 'ExecutionStrategy', + 'name_scope', 'ParallelExecutor', 'program_guard', 'WeightNormParamAttr', + 'default_main_program', 'default_startup_program', 'Program', 'save', 'load' +] + +from ..fluid.executor import Executor #DEFINE_ALIAS +from ..fluid.executor import global_scope #DEFINE_ALIAS +from ..fluid.executor import scope_guard #DEFINE_ALIAS +from ..fluid.backward import append_backward #DEFINE_ALIAS +from ..fluid.backward import gradients #DEFINE_ALIAS +from ..fluid.compiler import BuildStrategy #DEFINE_ALIAS +from ..fluid.compiler import CompiledProgram #DEFINE_ALIAS +from ..fluid.compiler import ExecutionStrategy #DEFINE_ALIAS +from ..fluid.framework import default_main_program #DEFINE_ALIAS +from ..fluid.framework import default_startup_program #DEFINE_ALIAS +from ..fluid.framework import Program #DEFINE_ALIAS +from ..fluid.framework import name_scope #DEFINE_ALIAS +from ..fluid.framework import program_guard #DEFINE_ALIAS +from ..fluid.layers.control_flow import Print #DEFINE_ALIAS +from ..fluid.layers.nn import py_func #DEFINE_ALIAS +from ..fluid.parallel_executor import ParallelExecutor #DEFINE_ALIAS +from ..fluid.param_attr import WeightNormParamAttr #DEFINE_ALIAS +from ..tensor.io import save #DEFINE_ALIAS +from ..tensor.io import load #DEFINE_ALIAS diff --git a/python/paddle/static/nn/__init__.py b/python/paddle/static/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..91da0926b1870bb4a7999e62965c135dcf36bf25 --- /dev/null +++ b/python/paddle/static/nn/__init__.py @@ -0,0 +1,60 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'fc', + 'batch_norm', + 'embedding', + 'bilinear_tensor_product', + 'conv2d', + 'conv2d_transpose', + 'conv3d', + 'conv3d_transpose', + 'create_parameter', + 'crf_decoding', + 'data_norm', + 'deformable_conv', + 'group_norm', + 'hsigmoid', + 'instance_norm', + 'layer_norm', + 'multi_box_head', + 'nce', + 'prelu', + 'row_conv', + 'spectral_norm', +] + +from ...fluid.layers import fc #DEFINE_ALIAS +from ...fluid.layers import batch_norm #DEFINE_ALIAS +from ...fluid.layers import bilinear_tensor_product #DEFINE_ALIAS +from ...fluid.layers import conv2d #DEFINE_ALIAS +from ...fluid.layers import conv2d_transpose #DEFINE_ALIAS +from ...fluid.layers import conv3d #DEFINE_ALIAS +from ...fluid.layers import conv3d_transpose #DEFINE_ALIAS +from ...fluid.layers import create_parameter #DEFINE_ALIAS +from ...fluid.layers import crf_decoding #DEFINE_ALIAS +from ...fluid.layers import data_norm #DEFINE_ALIAS +from ...fluid.layers import deformable_conv #DEFINE_ALIAS +from ...fluid.layers import group_norm #DEFINE_ALIAS +from ...fluid.layers import hsigmoid #DEFINE_ALIAS +from ...fluid.layers import instance_norm #DEFINE_ALIAS +from ...fluid.layers import layer_norm #DEFINE_ALIAS +from ...fluid.layers import multi_box_head #DEFINE_ALIAS +from ...fluid.layers import nce #DEFINE_ALIAS +from ...fluid.layers import prelu #DEFINE_ALIAS +from ...fluid.layers import row_conv #DEFINE_ALIAS +from ...fluid.layers import spectral_norm #DEFINE_ALIAS + +from ...fluid.input import embedding #DEFINE_ALIAS diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 158660e7659b6f73e8b7b19483cc8913fc612d40..e625e5496c7e3dd3df2c514f764af52d0adceeca 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -83,7 +83,7 @@ def full_like(x, fill_value, dtype=None, name=None): import paddle import numpy as np - paddle.enable_imperative() # Now we are in imperative mode + paddle.disable_static() # Now we are in imperative mode input = paddle.full(shape=[2, 3], fill_value=0.0, dtype='float32', name='input') output = paddle.full_like(input, 2.0) # [[2. 2. 2.] @@ -143,7 +143,7 @@ def ones(shape, dtype=None, name=None): .. code-block:: python import paddle - paddle.enable_imperative() + paddle.disable_static() # default dtype for ones OP data1 = paddle.ones(shape=[3, 2]) @@ -199,9 +199,9 @@ def ones_like(x, dtype=None, name=None): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() - x = paddle.imperative.to_variable(np.array([1,2,3], dtype='float32')) + x = paddle.to_variable(np.array([1,2,3], dtype='float32')) out1 = paddle.zeros_like(x) # [1., 1., 1.] out2 = paddle.zeros_like(x, dtype='int32') # [1, 1, 1] @@ -236,7 +236,7 @@ def zeros(shape, dtype=None, name=None): import paddle - paddle.enable_imperative() # Now we are in imperative mode + paddle.disable_static() # Now we are in imperative mode data = paddle.zeros(shape=[3, 2], dtype='float32') # [[0. 0.] # [0. 0.] @@ -289,9 +289,9 @@ def zeros_like(x, dtype=None, name=None): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() - x = paddle.imperative.to_variable(np.array([1,2,3], dtype='float32')) + x = paddle.to_variable(np.array([1,2,3], dtype='float32')) out1 = paddle.zeros_like(x) # [0., 0., 0.] out2 = paddle.zeros_like(x, dtype='int32') # [0, 0, 0] @@ -328,7 +328,7 @@ def eye(num_rows, num_columns=None, dtype=None, name=None): import paddle - paddle.enable_imperative() # Now we are in imperative mode + paddle.disable_static() # Now we are in imperative mode data = paddle.eye(3, dtype='int32') # [[1 0 0] # [0 1 0] @@ -382,7 +382,7 @@ def full(shape, fill_value, dtype=None, name=None): import paddle - paddle.enable_imperative() # Now we are in imperative mode + paddle.disable_static() # Now we are in imperative mode data1 = paddle.full(shape=[2,1], fill_value=0, dtype='int64') #[[0] # [0]] @@ -459,7 +459,7 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() out1 = paddle.arange(5) # [0, 1, 2, 3, 4] @@ -471,7 +471,7 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): out3 = paddle.arange(4.999, dtype='float32') # [0., 1., 2., 3., 4.] - start_var = paddle.imperative.to_variable(np.array([3])) + start_var = paddle.to_variable(np.array([3])) out4 = paddle.arange(start_var, 7) # [3, 4, 5, 6] @@ -709,12 +709,12 @@ def meshgrid(*args, **kwargs): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() input_3 = np.random.randint(0, 100, [100, ]).astype('int32') input_4 = np.random.randint(0, 100, [200, ]).astype('int32') - tensor_3 = paddle.imperative.to_variable(input_3) - tensor_4 = paddle.imperative.to_variable(input_4) + tensor_3 = paddle.to_variable(input_3) + tensor_4 = paddle.to_variable(input_4) grid_x, grid_y = paddle.tensor.meshgrid(tensor_3, tensor_4) #the shape of grid_x is (100, 200) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index fcff5585bc12a75f274bd29236648d5b201a2f2d..8744e02b9f7c56cdc7cda28886df3dc06e0aff83 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -605,10 +605,10 @@ def cross(x, y, axis=None, name=None): Examples: .. code-block:: python import paddle - from paddle.imperative import to_variable + from paddle import to_variable import numpy as np - paddle.enable_imperative() + paddle.disable_static() data_x = np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], @@ -782,13 +782,13 @@ def histogram(input, bins=100, min=0, max=0): .. code-block:: python import paddle import numpy as np - startup_program = paddle.Program() - train_program = paddle.Program() - with paddle.program_guard(train_program, startup_program): + startup_program = paddle.static.Program() + train_program = paddle.static.Program() + with paddle.static.program_guard(train_program, startup_program): inputs = paddle.data(name='input', dtype='int32', shape=[2,3]) output = paddle.histogram(inputs, bins=5, min=1, max=5) place = paddle.CPUPlace() - exe = paddle.Executor(place) + exe = paddle.static.Executor(place) exe.run(startup_program) img = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int32) res = exe.run(train_program, @@ -800,11 +800,12 @@ def histogram(input, bins=100, min=0, max=0): .. code-block:: python import paddle import numpy as np - with paddle.imperative.guard(paddle.CPUPlace()): - inputs_np = np.array([1, 2, 1]).astype(np.float) - inputs = paddle.imperative.to_variable(inputs_np) - result = paddle.histogram(inputs, bins=4, min=0, max=3) - print(result) # [0, 2, 1, 0] + paddle.disable_static(paddle.CPUPlace()) + inputs_np = np.array([1, 2, 1]).astype(np.float) + inputs = paddle.to_variable(inputs_np) + result = paddle.histogram(inputs, bins=4, min=0, max=3) + print(result) # [0, 2, 1, 0] + paddle.enable_static() """ if in_dygraph_mode(): return core.ops.histogram(input, "bins", bins, "min", min, "max", max) diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 936022dd73b31f2d5839cc7e8698c6757378d874..18dbeb0c46e8a3416d7d57f92ffa6064510250b3 100644 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -71,12 +71,11 @@ def equal_all(x, y, name=None): import numpy as np import paddle - import paddle.imperative as imperative - paddle.enable_imperative() - x = imperative.to_variable(np.array([1, 2, 3])) - y = imperative.to_variable(np.array([1, 2, 3])) - z = imperative.to_variable(np.array([1, 4, 3])) + paddle.disable_static() + x = paddle.to_variable(np.array([1, 2, 3])) + y = paddle.to_variable(np.array([1, 2, 3])) + z = paddle.to_variable(np.array([1, 4, 3])) result1 = paddle.equal_all(x, y) print(result1.numpy()) # result1 = [True ] result2 = paddle.equal_all(x, z) @@ -201,11 +200,10 @@ def equal(x, y, name=None): import numpy as np import paddle - import paddle.imperative as imperative - paddle.enable_imperative() - x = imperative.to_variable(np.array([1, 2, 3])) - y = imperative.to_variable(np.array([1, 3, 2])) + paddle.disable_static() + x = paddle.to_variable(np.array([1, 2, 3])) + y = paddle.to_variable(np.array([1, 3, 2])) result1 = paddle.equal(x, y) print(result1.numpy()) # result1 = [True False False] """ @@ -234,11 +232,10 @@ def greater_equal(x, y, name=None): .. code-block:: python import numpy as np import paddle - import paddle.imperative as imperative - paddle.enable_imperative() - x = imperative.to_variable(np.array([1, 2, 3])) - y = imperative.to_variable(np.array([1, 3, 2])) + paddle.disable_static() + x = paddle.to_variable(np.array([1, 2, 3])) + y = paddle.to_variable(np.array([1, 3, 2])) result1 = paddle.greater_equal(x, y) print(result1.numpy()) # result1 = [True False True] """ @@ -267,11 +264,10 @@ def greater_than(x, y, name=None): .. code-block:: python import numpy as np import paddle - import paddle.imperative as imperative - paddle.enable_imperative() - x = imperative.to_variable(np.array([1, 2, 3])) - y = imperative.to_variable(np.array([1, 3, 2])) + paddle.disable_static() + x = paddle.to_variable(np.array([1, 2, 3])) + y = paddle.to_variable(np.array([1, 3, 2])) result1 = paddle.greater_than(x, y) print(result1.numpy()) # result1 = [False False True] """ @@ -301,11 +297,10 @@ def less_equal(x, y, name=None): .. code-block:: python import numpy as np import paddle - import paddle.imperative as imperative - paddle.enable_imperative() - x = imperative.to_variable(np.array([1, 2, 3])) - y = imperative.to_variable(np.array([1, 3, 2])) + paddle.disable_static() + x = paddle.to_variable(np.array([1, 2, 3])) + y = paddle.to_variable(np.array([1, 3, 2])) result1 = paddle.less_equal(x, y) print(result1.numpy()) # result1 = [True True False] """ @@ -335,11 +330,10 @@ def less_than(x, y, name=None): .. code-block:: python import numpy as np import paddle - import paddle.imperative as imperative - paddle.enable_imperative() - x = imperative.to_variable(np.array([1, 2, 3])) - y = imperative.to_variable(np.array([1, 3, 2])) + paddle.disable_static() + x = paddle.to_variable(np.array([1, 2, 3])) + y = paddle.to_variable(np.array([1, 3, 2])) result1 = paddle.less_than(x, y) print(result1.numpy()) # result1 = [False True False] """ @@ -369,11 +363,10 @@ def not_equal(x, y, name=None): .. code-block:: python import numpy as np import paddle - import paddle.imperative as imperative - paddle.enable_imperative() - x = imperative.to_variable(np.array([1, 2, 3])) - y = imperative.to_variable(np.array([1, 3, 2])) + paddle.disable_static() + x = paddle.to_variable(np.array([1, 2, 3])) + y = paddle.to_variable(np.array([1, 3, 2])) result1 = paddle.not_equal(x, y) print(result1.numpy()) # result1 = [False True True] """ diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 5b7c8c37b1b0a549f8c15af3e2d6425d5361de03..efd384df4e7c0b3c08224c60a81b5db16f40b6d4 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -103,16 +103,16 @@ def concat(x, axis=0, name=None): import paddle import numpy as np - paddle.enable_imperative() # Now we are in imperative mode + paddle.disable_static() # Now we are in imperative mode in1 = np.array([[1, 2, 3], [4, 5, 6]]) in2 = np.array([[11, 12, 13], [14, 15, 16]]) in3 = np.array([[21, 22], [23, 24]]) - x1 = paddle.imperative.to_variable(in1) - x2 = paddle.imperative.to_variable(in2) - x3 = paddle.imperative.to_variable(in3) + x1 = paddle.to_variable(in1) + x2 = paddle.to_variable(in2) + x3 = paddle.to_variable(in3) zero = paddle.full(shape=[1], dtype='int32', fill_value=0) # When the axis is negative, the real axis is (axis + Rank(x)) # As follow, axis is -1, Rank(x) is 2, the real axis is 1 @@ -156,12 +156,12 @@ def flip(x, axis, name=None): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() image_shape=(3, 2, 2) x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape) x = x.astype('float32') - img = paddle.imperative.to_variable(x) + img = paddle.to_variable(x) out = paddle.flip(img, [0,1]) print(out) # [[[10,11][8, 9]],[[6, 7],[4, 5]] [[2, 3],[0, 1]]] @@ -247,13 +247,13 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() image_shape=(2, 3, 4, 4) x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]).reshape(image_shape) / 100. x = x.astype('float32') - img = paddle.imperative.to_variable(x) + img = paddle.to_variable(x) out = paddle.flatten(img, start_axis=1, stop_axis=2) # out shape is [2, 12, 4] """ @@ -325,8 +325,8 @@ def roll(x, shifts, axis=None, name=None): data = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]) - paddle.enable_imperative() - x = paddle.imperative.to_variable(data) + paddle.disable_static() + x = paddle.to_variable(data) out_z1 = paddle.roll(x, shifts=1) print(out_z1.numpy()) #[[9. 1. 2.] @@ -503,10 +503,10 @@ def split(x, num_or_sections, axis=0, name=None): import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() # x is a Tensor which shape is [3, 9, 5] x_np = np.random.random([3, 9, 5]).astype("int32") - x = paddle.imperative.to_variable(x_np) + x = paddle.to_variable(x_np) out0, out1, out22 = paddle.split(x, num_or_sections=3, axis=1) # out0.shape [3, 3, 5] @@ -595,7 +595,7 @@ def squeeze(x, axis=None, name=None): import paddle - paddle.enable_imperative() + paddle.disable_static() x = paddle.rand([5, 1, 10]) output = paddle.squeeze(x, axis=1) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index c67ac474d479c5fb5570ce6ad218ae2c82f29c9a..7ce82e7ae31e708f0c56f7c07185fb4ecc8d2037 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -487,18 +487,18 @@ Examples: import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) - x = paddle.imperative.to_variable(x_data) - y = paddle.imperative.to_variable(y_data) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) res = paddle.multiply(x, y) print(res.numpy()) # [[5, 12], [21, 32]] x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) y_data = np.array([1, 2], dtype=np.float32) - x = paddle.imperative.to_variable(x_data) - y = paddle.imperative.to_variable(y_data) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) res = paddle.multiply(x, y, axis=1) print(res.numpy()) # [[[1, 2, 3], [2, 4, 6]]] @@ -1431,11 +1431,11 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): case2 = np.random.randn(3, 10, 10).astype('float32') case3 = np.random.randn(3, 10, 5, 10).astype('float32') - paddle.enable_imperative() + paddle.disable_static() - case1 = paddle.imperative.to_variable(case1) - case2 = paddle.imperative.to_variable(case2) - case3 = paddle.imperative.to_variable(case3) + case1 = paddle.to_variable(case1) + case2 = paddle.to_variable(case2) + case3 = paddle.to_variable(case3) data1 = paddle.trace(case1) # data1.shape = [1] data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3] data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5] diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 5e9f55cd34c3e3e5cedee10352c7a5d96fbb8abc..d26003fd826cfb3f3905b6aeea3e9492fab39cea 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -81,7 +81,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() # example 1: # attr shape is a list which doesn't contain Tensor. @@ -98,7 +98,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): # example 3: # attr shape is a Tensor - var_shape = paddle.imperative.to_variable(np.array([3])) + var_shape = paddle.to_variable(np.array([3])) result_3 = paddle.randint(low=-5, high=5, shape=var_shape) # [-2, 2, 3] @@ -187,7 +187,7 @@ def randn(shape, dtype=None, name=None): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() # example 1: attr shape is a list which doesn't contain Tensor. result_1 = paddle.randn(shape=[2, 3]) @@ -206,7 +206,7 @@ def randn(shape, dtype=None, name=None): # [ 0.8086993 , 0.6868893 ]]] # example 3: attr shape is a Tensor, the data type must be int64 or int32. - var_shape = paddle.imperative.to_variable(np.array([2, 3])) + var_shape = paddle.to_variable(np.array([2, 3])) result_3 = paddle.randn(var_shape) # [[-2.878077 , 0.17099959, 0.05111201] # [-0.3761474, -1.044801 , 1.1870178 ]] @@ -252,7 +252,7 @@ def randperm(n, dtype="int64", name=None): import paddle - paddle.enable_imperative() + paddle.disable_static() result_1 = paddle.randperm(5) # [4, 1, 2, 3, 0] @@ -325,7 +325,7 @@ def rand(shape, dtype=None, name=None): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() # example 1: attr shape is a list which doesn't contain Tensor. result_1 = paddle.rand(shape=[2, 3]) # [[0.451152 , 0.55825245, 0.403311 ], @@ -343,7 +343,7 @@ def rand(shape, dtype=None, name=None): # [0.870881 , 0.2984597 ]]] # example 3: attr shape is a Tensor, the data type must be int64 or int32. - var_shape = paddle.imperative.to_variable(np.array([2, 3])) + var_shape = paddle.to_variable(np.array([2, 3])) result_3 = paddle.rand(var_shape) # [[0.22920267, 0.841956 , 0.05981819], # [0.4836288 , 0.24573246, 0.7516129 ]] diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index 1cb775c9d4b73beaf0f2167fe7fc9909e91d116d..0f8381d824027670250f4b59607b4275e43b5e22 100644 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -68,17 +68,16 @@ def argsort(x, axis=-1, descending=False, name=None): Examples: .. code-block:: python import paddle - import paddle.imperative as imperative import numpy as np - paddle.enable_imperative() + paddle.disable_static() input_array = np.array([[[5,8,9,5], [0,0,1,7], [6,9,2,4]], [[5,2,4,2], [4,7,7,9], [1,7,0,6]]]).astype(np.float32) - x = imperative.to_variable(input_array) + x = paddle.to_variable(input_array) out1 = paddle.argsort(x=x, axis=-1) out2 = paddle.argsort(x=x, axis=0) out3 = paddle.argsort(x=x, axis=1) @@ -250,14 +249,14 @@ def index_select(x, index, axis=0, name=None): import paddle import numpy as np - paddle.enable_imperative() # Now we are in imperative mode + paddle.disable_static() # Now we are in imperative mode data = np.array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [9.0, 10.0, 11.0, 12.0]]) data_index = np.array([0, 1, 1]).astype('int32') - x = paddle.imperative.to_variable(data) - index = paddle.imperative.to_variable(data_index) + x = paddle.to_variable(data) + index = paddle.to_variable(data_index) out_z1 = paddle.index_select(x=x, index=index) #[[1. 2. 3. 4.] # [5. 6. 7. 8.] @@ -399,17 +398,16 @@ def sort(x, axis=-1, descending=False, name=None): Examples: .. code-block:: python import paddle - import paddle.imperative as imperative import numpy as np - paddle.enable_imperative() + paddle.disable_static() input_array = np.array([[[5,8,9,5], [0,0,1,7], [6,9,2,4]], [[5,2,4,2], [4,7,7,9], [1,7,0,6]]]).astype(np.float32) - x = imperative.to_variable(input_array) + x = paddle.to_variable(input_array) out1 = paddle.sort(x=x, axis=-1) out2 = paddle.sort(x=x, axis=0) out3 = paddle.sort(x=x, axis=1) diff --git a/python/setup.py.in b/python/setup.py.in index b2e2811dea3e6bec685c0f3c1499f1ee8d0b0d4d..a2628cac51af62c59c05484ecbf2d2b52c9bf859 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -154,6 +154,7 @@ packages=['paddle', 'paddle.fleet.proto', 'paddle.fleet.utils', 'paddle.framework', + 'paddle.jit', 'paddle.fluid', 'paddle.fluid.dygraph', 'paddle.fluid.dygraph.dygraph_to_static', @@ -200,8 +201,8 @@ packages=['paddle', 'paddle.nn.layer', 'paddle.nn.initializer', 'paddle.metric', - 'paddle.imperative', - 'paddle.imperative.jit', + 'paddle.static', + 'paddle.static.nn', 'paddle.tensor', ]