未验证 提交 2efcb481 编写于 作者: P pangyoki 提交者: GitHub

Paddle-2.0 API directory migration (#25898)

* Directory migration, test=develop

* Change imperative from paddle init to paddle framework, test=develop

* Fixed jit bug, test=develop

* default static mode, test=develop

* fixed format and create parameter belongs to framework, test=develop

* Fixed import package, test=develop

* fix __init__ format, test=develop

* fixed alias problem

* fixed paddle.enable_imperative problems, test=develop

* Add unittest

* delete install_check comment

* Fixed unittest timeout

* fixed unittest error

* move Program default_xx_program to static package

* optimize unittest method

* fixed framework __init__ format

* fixed jit path

* delete alias

* move jit to paddle

* Fixed unittest format

* fixed paddle.default_main_program

* Fixed save load API in paddle __init__.py

* fixed ci paddle.imperative.to_variable
上级 cd7b55a2
...@@ -38,7 +38,6 @@ import paddle.tensor ...@@ -38,7 +38,6 @@ import paddle.tensor
import paddle.nn import paddle.nn
import paddle.fleet import paddle.fleet
import paddle.framework import paddle.framework
import paddle.imperative
import paddle.optimizer import paddle.optimizer
import paddle.metric import paddle.metric
import paddle.incubate.complex as complex import paddle.incubate.complex as complex
...@@ -69,8 +68,6 @@ from .tensor.creation import full_like #DEFINE_ALIAS ...@@ -69,8 +68,6 @@ from .tensor.creation import full_like #DEFINE_ALIAS
from .tensor.creation import triu #DEFINE_ALIAS from .tensor.creation import triu #DEFINE_ALIAS
from .tensor.creation import tril #DEFINE_ALIAS from .tensor.creation import tril #DEFINE_ALIAS
from .tensor.creation import meshgrid #DEFINE_ALIAS from .tensor.creation import meshgrid #DEFINE_ALIAS
from .tensor.io import save #DEFINE_ALIAS
from .tensor.io import load #DEFINE_ALIAS
from .tensor.linalg import matmul #DEFINE_ALIAS from .tensor.linalg import matmul #DEFINE_ALIAS
from .tensor.linalg import dot #DEFINE_ALIAS from .tensor.linalg import dot #DEFINE_ALIAS
# from .tensor.linalg import einsum #DEFINE_ALIAS # from .tensor.linalg import einsum #DEFINE_ALIAS
...@@ -201,30 +198,34 @@ from .tensor.search import index_select #DEFINE_ALIAS ...@@ -201,30 +198,34 @@ from .tensor.search import index_select #DEFINE_ALIAS
from .tensor.search import nonzero #DEFINE_ALIAS from .tensor.search import nonzero #DEFINE_ALIAS
from .tensor.search import sort #DEFINE_ALIAS from .tensor.search import sort #DEFINE_ALIAS
from .framework.random import manual_seed #DEFINE_ALIAS from .framework.random import manual_seed #DEFINE_ALIAS
from .framework import append_backward #DEFINE_ALIAS from .framework import Variable #DEFINE_ALIAS
from .framework import gradients #DEFINE_ALIAS from .framework import ParamAttr #DEFINE_ALIAS
from .framework import Executor #DEFINE_ALIAS
from .framework import global_scope #DEFINE_ALIAS
from .framework import scope_guard #DEFINE_ALIAS
from .framework import BuildStrategy #DEFINE_ALIAS
from .framework import CompiledProgram #DEFINE_ALIAS
from .framework import default_main_program #DEFINE_ALIAS
from .framework import default_startup_program #DEFINE_ALIAS
from .framework import create_global_var #DEFINE_ALIAS from .framework import create_global_var #DEFINE_ALIAS
from .framework import create_parameter #DEFINE_ALIAS from .framework import create_parameter #DEFINE_ALIAS
from .framework import Print #DEFINE_ALIAS
from .framework import py_func #DEFINE_ALIAS
from .framework import ExecutionStrategy #DEFINE_ALIAS
from .framework import name_scope #DEFINE_ALIAS
from .framework import ParallelExecutor #DEFINE_ALIAS
from .framework import ParamAttr #DEFINE_ALIAS
from .framework import Program #DEFINE_ALIAS
from .framework import program_guard #DEFINE_ALIAS
from .framework import Variable #DEFINE_ALIAS
from .framework import WeightNormParamAttr #DEFINE_ALIAS
from .framework import CPUPlace #DEFINE_ALIAS from .framework import CPUPlace #DEFINE_ALIAS
from .framework import CUDAPlace #DEFINE_ALIAS from .framework import CUDAPlace #DEFINE_ALIAS
from .framework import CUDAPinnedPlace #DEFINE_ALIAS from .framework import CUDAPinnedPlace #DEFINE_ALIAS
from .framework import BackwardStrategy #DEFINE_ALIAS
from .framework import to_variable #DEFINE_ALIAS
from .framework import grad #DEFINE_ALIAS
from .framework import no_grad #DEFINE_ALIAS
from .framework import save_dygraph #DEFINE_ALIAS
from .framework import load_dygraph #DEFINE_ALIAS
from .framework import save #DEFINE_ALIAS
from .framework import load #DEFINE_ALIAS
from .framework import prepare_context #DEFINE_ALIAS
from .framework import ParallelEnv #DEFINE_ALIAS
from .framework import DataParallel #DEFINE_ALIAS
from .framework import NoamDecay #DEFINE_ALIAS
from .framework import PiecewiseDecay #DEFINE_ALIAS
from .framework import NaturalExpDecay #DEFINE_ALIAS
from .framework import ExponentialDecay #DEFINE_ALIAS
from .framework import InverseTimeDecay #DEFINE_ALIAS
from .framework import PolynomialDecay #DEFINE_ALIAS
from .framework import CosineDecay #DEFINE_ALIAS
from .tensor.search import index_sample #DEFINE_ALIAS from .tensor.search import index_sample #DEFINE_ALIAS
from .tensor.stat import mean #DEFINE_ALIAS from .tensor.stat import mean #DEFINE_ALIAS
from .tensor.stat import reduce_mean #DEFINE_ALIAS from .tensor.stat import reduce_mean #DEFINE_ALIAS
...@@ -237,6 +238,11 @@ from .fluid.data import data ...@@ -237,6 +238,11 @@ from .fluid.data import data
from . import incubate from . import incubate
from .incubate import hapi from .incubate import hapi
from .fluid.dygraph.base import enable_dygraph as enable_imperative #DEFINE_ALIAS from .fluid.dygraph.base import enable_dygraph #DEFINE_ALIAS
from .fluid.dygraph.base import disable_dygraph as disable_imperative #DEFINE_ALIAS from .fluid.dygraph.base import disable_dygraph #DEFINE_ALIAS
from .fluid.framework import in_dygraph_mode as in_imperative_mode #DEFINE_ALIAS from .fluid.dygraph.base import enable_dygraph as disable_static #DEFINE_ALIAS
from .fluid.dygraph.base import disable_dygraph as enable_static #DEFINE_ALIAS
from .fluid.framework import in_dygraph_mode as in_dynamic_mode #DEFINE_ALIAS
from . import jit
from . import static
...@@ -286,8 +286,8 @@ class Fleet(object): ...@@ -286,8 +286,8 @@ class Fleet(object):
context["loss"] = loss context["loss"] = loss
if startup_program == None: if startup_program == None:
self.origin_startup_program = \ self.origin_startup_program = \
paddle.default_startup_program().clone(for_test=False) paddle.static.default_startup_program().clone(for_test=False)
startup_program = paddle.default_startup_program() startup_program = paddle.static.default_startup_program()
else: else:
self.origin_startup_program = \ self.origin_startup_program = \
startup_program.clone(for_test=False) startup_program.clone(for_test=False)
...@@ -338,7 +338,7 @@ class Fleet(object): ...@@ -338,7 +338,7 @@ class Fleet(object):
parameter_list=parameter_list, parameter_list=parameter_list,
no_grad_set=no_grad_set) no_grad_set=no_grad_set)
default_program = paddle.default_main_program() default_program = paddle.static.default_main_program()
if id(default_program) != id(loss.block.program): if id(default_program) != id(loss.block.program):
paddle.fluid.framework.switch_main_program(loss.block.program) paddle.fluid.framework.switch_main_program(loss.block.program)
......
...@@ -190,7 +190,7 @@ class GraphExecutionOptimizer(MetaOptimizerBase): ...@@ -190,7 +190,7 @@ class GraphExecutionOptimizer(MetaOptimizerBase):
parameter_list=None, parameter_list=None,
no_grad_set=None): no_grad_set=None):
if startup_program == None: if startup_program == None:
startup_program = paddle.default_startup_program() startup_program = paddle.static.default_startup_program()
compiled_program = self._try_to_compile(startup_program, compiled_program = self._try_to_compile(startup_program,
loss.block.program, loss) loss.block.program, loss)
loss.block.program._graph = compiled_program loss.block.program._graph = compiled_program
......
...@@ -121,10 +121,6 @@ def enabled(): ...@@ -121,10 +121,6 @@ def enabled():
def enable_dygraph(place=None): def enable_dygraph(place=None):
""" """
:alias_main: paddle.enable_dygraph
:alias: paddle.enable_dygraph,paddle.enable_imperative.enable_dygraph
:old_api: paddle.fluid.dygraph.base.enable_dygraph
This function enables dynamic graph mode. This function enables dynamic graph mode.
Parameters: Parameters:
...@@ -155,10 +151,6 @@ def enable_dygraph(place=None): ...@@ -155,10 +151,6 @@ def enable_dygraph(place=None):
def disable_dygraph(): def disable_dygraph():
""" """
:alias_main: paddle.disable_dygraph
:alias: paddle.disable_dygraph,paddle.disable_imperative.disable_dygraph
:old_api: paddle.fluid.dygraph.base.disable_dygraph
This function disables dynamic graph mode. This function disables dynamic graph mode.
return: return:
......
...@@ -701,11 +701,11 @@ def save(layer, model_path, input_spec=None, configs=None): ...@@ -701,11 +701,11 @@ def save(layer, model_path, input_spec=None, configs=None):
prog_translator = ProgramTranslator() prog_translator = ProgramTranslator()
if not prog_translator.enable: if not prog_translator.enable:
raise RuntimeError( raise RuntimeError(
"The paddle.imperative.jit.save doesn't work when setting ProgramTranslator.enable=False." "The paddle.jit.save doesn't work when setting ProgramTranslator.enable=False."
) )
if not isinstance(layer, Layer): if not isinstance(layer, Layer):
raise TypeError( raise TypeError(
"The input layer of paddle.imperative.jit.save should be 'Layer', but received layer type is %s." "The input layer of paddle.jit.save should be 'Layer', but received layer type is %s."
% type(layer)) % type(layer))
if configs is None: if configs is None:
......
...@@ -146,7 +146,7 @@ class Layer(core.Layer): ...@@ -146,7 +146,7 @@ class Layer(core.Layer):
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
paddle.enable_imperative() paddle.disable_static()
net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2)) net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
......
...@@ -3207,12 +3207,12 @@ class Flatten(layers.Layer): ...@@ -3207,12 +3207,12 @@ class Flatten(layers.Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.imperative import to_variable from paddle import to_variable
import numpy as np import numpy as np
inp_np = np.ones([5, 2, 3, 4]).astype('float32') inp_np = np.ones([5, 2, 3, 4]).astype('float32')
paddle.enable_imperative() paddle.disable_static()
inp_np = to_variable(inp_np) inp_np = to_variable(inp_np)
flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2) flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2)
......
...@@ -226,7 +226,7 @@ def monkey_patch_varbase(): ...@@ -226,7 +226,7 @@ def monkey_patch_varbase():
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.enable_imperative() paddle.disable_static()
x = paddle.rand([1, 5]) x = paddle.rand([1, 5])
print(x) print(x)
# Variable: eager_tmp_0 # Variable: eager_tmp_0
...@@ -235,7 +235,7 @@ def monkey_patch_varbase(): ...@@ -235,7 +235,7 @@ def monkey_patch_varbase():
# - layout: NCHW # - layout: NCHW
# - dtype: float # - dtype: float
# - data: [0.645307 0.597973 0.732793 0.646921 0.540328] # - data: [0.645307 0.597973 0.732793 0.646921 0.540328]
paddle.disable_imperative() paddle.enable_static()
""" """
tensor = self.value().get_tensor() tensor = self.value().get_tensor()
if tensor._is_initialized(): if tensor._is_initialized():
......
...@@ -5175,7 +5175,7 @@ class ParamBase(core.VarBase): ...@@ -5175,7 +5175,7 @@ class ParamBase(core.VarBase):
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.enable_imperative() paddle.disable_static()
conv = paddle.nn.Conv2D(3, 3, 5) conv = paddle.nn.Conv2D(3, 3, 5)
print(conv.weight) print(conv.weight)
# Parameter: conv2d_0.w_0 # Parameter: conv2d_0.w_0
...@@ -5184,7 +5184,7 @@ class ParamBase(core.VarBase): ...@@ -5184,7 +5184,7 @@ class ParamBase(core.VarBase):
# - layout: NCHW # - layout: NCHW
# - dtype: float # - dtype: float
# - data: [...] # - data: [...]
paddle.disable_imperative() paddle.enable_static()
""" """
tensor = self.value().get_tensor() tensor = self.value().get_tensor()
if tensor._is_initialized(): if tensor._is_initialized():
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import os import os
import paddle
from .framework import Program, program_guard, unique_name, cuda_places, cpu_places from .framework import Program, program_guard, unique_name, cuda_places, cpu_places
from .param_attr import ParamAttr from .param_attr import ParamAttr
from .initializer import Constant from .initializer import Constant
......
...@@ -1669,9 +1669,6 @@ def _load_persistable_nodes(executor, dirname, graph): ...@@ -1669,9 +1669,6 @@ def _load_persistable_nodes(executor, dirname, graph):
def save(program, model_path): def save(program, model_path):
""" """
:api_attr: Static Graph :api_attr: Static Graph
:alias_main: paddle.save
:alias: paddle.save,paddle.tensor.save,paddle.tensor.io.save
:old_api: paddle.fluid.save
This function save parameters, optimizer information and network description to model_path. This function save parameters, optimizer information and network description to model_path.
...@@ -1733,9 +1730,6 @@ def save(program, model_path): ...@@ -1733,9 +1730,6 @@ def save(program, model_path):
def load(program, model_path, executor=None, var_list=None): def load(program, model_path, executor=None, var_list=None):
""" """
:api_attr: Static Graph :api_attr: Static Graph
:alias_main: paddle.load
:alias: paddle.load,paddle.tensor.load,paddle.tensor.io.load
:old_api: paddle.fluid.io.load
This function get parameters and optimizer information from program, and then get corresponding value from file. This function get parameters and optimizer information from program, and then get corresponding value from file.
An exception will throw if shape or dtype of the parameters is not match. An exception will throw if shape or dtype of the parameters is not match.
......
...@@ -12073,11 +12073,11 @@ def logical_and(x, y, out=None, name=None): ...@@ -12073,11 +12073,11 @@ def logical_and(x, y, out=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
x_data = np.array([True, True, False, False], dtype=np.bool) x_data = np.array([True, True, False, False], dtype=np.bool)
y_data = np.array([True, False, True, False], dtype=np.bool) y_data = np.array([True, False, True, False], dtype=np.bool)
x = paddle.imperative.to_variable(x_data) x = paddle.to_variable(x_data)
y = paddle.imperative.to_variable(y_data) y = paddle.to_variable(y_data)
res = paddle.logical_and(x, y) res = paddle.logical_and(x, y)
print(res.numpy()) # [True False False False] print(res.numpy()) # [True False False False]
""" """
...@@ -12115,11 +12115,11 @@ def logical_or(x, y, out=None, name=None): ...@@ -12115,11 +12115,11 @@ def logical_or(x, y, out=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
x_data = np.array([True, True, False, False], dtype=np.bool) x_data = np.array([True, True, False, False], dtype=np.bool)
y_data = np.array([True, False, True, False], dtype=np.bool) y_data = np.array([True, False, True, False], dtype=np.bool)
x = paddle.imperative.to_variable(x_data) x = paddle.to_variable(x_data)
y = paddle.imperative.to_variable(y_data) y = paddle.to_variable(y_data)
res = paddle.logical_or(x, y) res = paddle.logical_or(x, y)
print(res.numpy()) # [True True True False] print(res.numpy()) # [True True True False]
""" """
...@@ -12157,11 +12157,11 @@ def logical_xor(x, y, out=None, name=None): ...@@ -12157,11 +12157,11 @@ def logical_xor(x, y, out=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
x_data = np.array([True, True, False, False], dtype=np.bool) x_data = np.array([True, True, False, False], dtype=np.bool)
y_data = np.array([True, False, True, False], dtype=np.bool) y_data = np.array([True, False, True, False], dtype=np.bool)
x = paddle.imperative.to_variable(x_data) x = paddle.to_variable(x_data)
y = paddle.imperative.to_variable(y_data) y = paddle.to_variable(y_data)
res = paddle.logical_xor(x, y) res = paddle.logical_xor(x, y)
print(res.numpy()) # [False True True False] print(res.numpy()) # [False True True False]
""" """
...@@ -12197,9 +12197,9 @@ def logical_not(x, out=None, name=None): ...@@ -12197,9 +12197,9 @@ def logical_not(x, out=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
x_data = np.array([True, False, True, False], dtype=np.bool) x_data = np.array([True, False, True, False], dtype=np.bool)
x = paddle.imperative.to_variable(x_data) x = paddle.to_variable(x_data)
res = paddle.logical_not(x) res = paddle.logical_not(x)
print(res.numpy()) # [False True False True] print(res.numpy()) # [False True False True]
""" """
......
...@@ -16,7 +16,7 @@ from __future__ import print_function ...@@ -16,7 +16,7 @@ from __future__ import print_function
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
from paddle import program_guard, Program from paddle.static import program_guard, Program
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
...@@ -82,7 +82,7 @@ class TestArangeAPI(unittest.TestCase): ...@@ -82,7 +82,7 @@ class TestArangeAPI(unittest.TestCase):
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace() ) else paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
out = exe.run(fetch_list=[x1]) out = exe.run(fetch_list=[x1])
expected_data = np.arange(0, 5, 1).astype(np.float32) expected_data = np.arange(0, 5, 1).astype(np.float32)
...@@ -93,15 +93,16 @@ class TestArangeImperative(unittest.TestCase): ...@@ -93,15 +93,16 @@ class TestArangeImperative(unittest.TestCase):
def test_out(self): def test_out(self):
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace() ) else paddle.CPUPlace()
with paddle.imperative.guard(place): paddle.disable_static(place)
x1 = paddle.arange(0, 5, 1) x1 = paddle.arange(0, 5, 1)
x2 = paddle.tensor.arange(5) x2 = paddle.tensor.arange(5)
x3 = paddle.tensor.creation.arange(5) x3 = paddle.tensor.creation.arange(5)
start = paddle.imperative.to_variable(np.array([0], 'float32')) start = paddle.to_variable(np.array([0], 'float32'))
end = paddle.imperative.to_variable(np.array([5], 'float32')) end = paddle.to_variable(np.array([5], 'float32'))
step = paddle.imperative.to_variable(np.array([1], 'float32')) step = paddle.to_variable(np.array([1], 'float32'))
x4 = paddle.arange(start, end, step, 'int64') x4 = paddle.arange(start, end, step, 'int64')
paddle.enable_static()
expected_data = np.arange(0, 5, 1).astype(np.int64) expected_data = np.arange(0, 5, 1).astype(np.int64)
for i in [x1, x2, x3, x4]: for i in [x1, x2, x3, x4]:
......
...@@ -17,7 +17,6 @@ from __future__ import print_function ...@@ -17,7 +17,6 @@ from __future__ import print_function
import unittest import unittest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.imperative as imperative
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import numpy as np import numpy as np
import six import six
...@@ -384,20 +383,21 @@ class TestArgsortDygraph(unittest.TestCase): ...@@ -384,20 +383,21 @@ class TestArgsortDygraph(unittest.TestCase):
self.place = core.CPUPlace() self.place = core.CPUPlace()
def test_api_0(self): def test_api_0(self):
with imperative.guard(self.place): paddle.disable_static(self.place)
var_x = imperative.to_variable(self.input_data) var_x = paddle.to_variable(self.input_data)
out = paddle.argsort(var_x) out = paddle.argsort(var_x)
self.assertEqual((np.argsort(self.input_data) == out.numpy()).all(), self.assertEqual((np.argsort(self.input_data) == out.numpy()).all(),
True) True)
paddle.enable_static()
def test_api_1(self): def test_api_1(self):
with imperative.guard(self.place): paddle.disable_static(self.place)
var_x = imperative.to_variable(self.input_data) var_x = paddle.to_variable(self.input_data)
out = paddle.argsort(var_x, axis=-1) out = paddle.argsort(var_x, axis=-1)
self.assertEqual( self.assertEqual(
(np.argsort( (np.argsort(
self.input_data, axis=-1) == out.numpy()).all(), self.input_data, axis=-1) == out.numpy()).all(), True)
True) paddle.enable_static()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -97,7 +97,7 @@ def create_paddle_case(op_type, callback): ...@@ -97,7 +97,7 @@ def create_paddle_case(op_type, callback):
y = paddle.nn.data(name='y', shape=[1, 2, 3], dtype='int32') y = paddle.nn.data(name='y', shape=[1, 2, 3], dtype='int32')
op = eval("paddle.%s" % (self.op_type)) op = eval("paddle.%s" % (self.op_type))
out = op(x, y) out = op(x, y)
exe = paddle.Executor(self.place) exe = paddle.static.Executor(self.place)
input_x = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(np.int32) input_x = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(np.int32)
input_y = np.arange(0, 6).reshape((1, 2, 3)).astype(np.int32) input_y = np.arange(0, 6).reshape((1, 2, 3)).astype(np.int32)
real_result = callback(input_x, input_y) real_result = callback(input_x, input_y)
......
...@@ -268,9 +268,9 @@ class TestConcatAPI(unittest.TestCase): ...@@ -268,9 +268,9 @@ class TestConcatAPI(unittest.TestCase):
out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64) out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64)
out_4 = paddle.concat(x=[x_2, x_3], axis=negative_int64) out_4 = paddle.concat(x=[x_2, x_3], axis=negative_int64)
exe = paddle.Executor(place=paddle.CPUPlace()) exe = paddle.static.Executor(place=paddle.CPUPlace())
[res_1, res_2, res_3, res_4] = exe.run( [res_1, res_2, res_3, res_4] = exe.run(
paddle.default_main_program(), paddle.static.default_main_program(),
feed={"x_1": input_2, feed={"x_1": input_2,
"x_2": input_2, "x_2": input_2,
"x_3": input_3}, "x_3": input_3},
...@@ -284,14 +284,15 @@ class TestConcatAPI(unittest.TestCase): ...@@ -284,14 +284,15 @@ class TestConcatAPI(unittest.TestCase):
in1 = np.array([[1, 2, 3], [4, 5, 6]]) in1 = np.array([[1, 2, 3], [4, 5, 6]])
in2 = np.array([[11, 12, 13], [14, 15, 16]]) in2 = np.array([[11, 12, 13], [14, 15, 16]])
in3 = np.array([[21, 22], [23, 24]]) in3 = np.array([[21, 22], [23, 24]])
with paddle.imperative.guard(): paddle.disable_static()
x1 = paddle.imperative.to_variable(in1) x1 = paddle.to_variable(in1)
x2 = paddle.imperative.to_variable(in2) x2 = paddle.to_variable(in2)
x3 = paddle.imperative.to_variable(in3) x3 = paddle.to_variable(in3)
out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1) out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
out2 = paddle.concat(x=[x1, x2], axis=0) out2 = paddle.concat(x=[x1, x2], axis=0)
np_out1 = np.concatenate([in1, in2, in3], axis=-1) np_out1 = np.concatenate([in1, in2, in3], axis=-1)
np_out2 = np.concatenate([in1, in2], axis=0) np_out2 = np.concatenate([in1, in2], axis=0)
paddle.enable_static()
self.assertEqual((out1.numpy() == np_out1).all(), True) self.assertEqual((out1.numpy() == np_out1).all(), True)
self.assertEqual((out2.numpy() == np_out2).all(), True) self.assertEqual((out2.numpy() == np_out2).all(), True)
......
...@@ -21,7 +21,7 @@ import paddle ...@@ -21,7 +21,7 @@ import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
from paddle.imperative import to_variable from paddle import to_variable
class TestCumsumOp(unittest.TestCase): class TestCumsumOp(unittest.TestCase):
...@@ -83,16 +83,18 @@ class TestCumsumOp(unittest.TestCase): ...@@ -83,16 +83,18 @@ class TestCumsumOp(unittest.TestCase):
self.assertTrue(np.allclose(z, out[5])) self.assertTrue(np.allclose(z, out[5]))
def test_cpu(self): def test_cpu(self):
with paddle.imperative.guard(paddle.fluid.CPUPlace()): paddle.disable_static(paddle.fluid.CPUPlace())
self.run_cases() self.run_cases()
paddle.enable_static()
self.run_static() self.run_static()
def test_gpu(self): def test_gpu(self):
if not fluid.core.is_compiled_with_cuda(): if not fluid.core.is_compiled_with_cuda():
return return
with paddle.imperative.guard(paddle.fluid.CUDAPlace(0)): paddle.disable_static(paddle.fluid.CUDAPlace(0))
self.run_cases() self.run_cases()
paddle.enable_static()
self.run_static(use_gpu=True) self.run_static(use_gpu=True)
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import time
import subprocess
import unittest
import numpy as np
import paddle
class TestDirectory(unittest.TestCase):
def get_import_command(self, module):
paths = module.split('.')
if len(paths) <= 1:
return module
package = '.'.join(paths[:-1])
func = paths[-1]
cmd = 'from {} import {}'.format(package, func)
return cmd
def test_new_directory(self):
new_directory = [
'paddle.enable_static', 'paddle.disable_static',
'paddle.in_dynamic_mode', 'paddle.to_variable', 'paddle.grad',
'paddle.no_grad', 'paddle.save', 'paddle.load',
'paddle.static.save', 'paddle.static.load',
'paddle.BackwardStrategy', 'paddle.ParallelEnv',
'paddle.prepare_context', 'paddle.DataParallel', 'paddle.jit',
'paddle.jit.TracedLayer', 'paddle.jit.to_static',
'paddle.jit.ProgramTranslator', 'paddle.jit.TranslatedLayer',
'paddle.jit.save', 'paddle.jit.load', 'paddle.jit.SaveLoadConfig',
'paddle.NoamDecay', 'paddle.PiecewiseDecay',
'paddle.NaturalExpDecay', 'paddle.ExponentialDecay',
'paddle.InverseTimeDecay', 'paddle.PolynomialDecay',
'paddle.CosineDecay', 'paddle.static.Executor',
'paddle.static.global_scope', 'paddle.static.scope_guard',
'paddle.static.append_backward', 'paddle.static.gradients',
'paddle.static.BuildStrategy', 'paddle.static.CompiledProgram',
'paddle.static.ExecutionStrategy',
'paddle.static.default_main_program',
'paddle.static.default_startup_program', 'paddle.static.Program',
'paddle.static.name_scope', 'paddle.static.program_guard',
'paddle.static.Print', 'paddle.static.py_func',
'paddle.static.ParallelExecutor',
'paddle.static.WeightNormParamAttr', 'paddle.static.nn.fc',
'paddle.static.nn.batch_norm',
'paddle.static.nn.bilinear_tensor_product',
'paddle.static.nn.conv2d', 'paddle.static.nn.conv2d_transpose',
'paddle.static.nn.conv3d', 'paddle.static.nn.conv3d_transpose',
'paddle.static.nn.create_parameter',
'paddle.static.nn.crf_decoding', 'paddle.static.nn.data_norm',
'paddle.static.nn.deformable_conv', 'paddle.static.nn.group_norm',
'paddle.static.nn.hsigmoid', 'paddle.static.nn.instance_norm',
'paddle.static.nn.layer_norm', 'paddle.static.nn.multi_box_head',
'paddle.static.nn.nce', 'paddle.static.nn.prelu',
'paddle.static.nn.row_conv', 'paddle.static.nn.spectral_norm',
'paddle.static.nn.embedding'
]
import_file = 'run_import_modules.py'
with open(import_file, "w") as wb:
for module in new_directory:
run_cmd = self.get_import_command(module)
wb.write("{}\n".format(run_cmd))
_python = sys.executable
ps_cmd = "{} {}".format(_python, import_file)
ps_proc = subprocess.Popen(
ps_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = ps_proc.communicate()
assert "Error" not in str(stderr), "Error: Can't" \
" import Module {}".format(module)
def test_old_directory(self):
old_directory = [
'paddle.enable_imperative', 'paddle.disable_imperative',
'paddle.in_imperative_mode', 'paddle.imperative.to_variable',
'paddle.imperative.enable', 'paddle.imperative.guard',
'paddle.imperative.grad', 'paddle.imperative.no_grad',
'paddle.imperative.save', 'paddle.imperative.load',
'paddle.imperative.BackwardStrategy',
'paddle.imperative.ParallelEnv',
'paddle.imperative.prepare_context',
'paddle.imperative.DataParalell', 'paddle.imperative.jit',
'paddle.imperative.TracedLayer', 'paddle.imperative.declarative',
'paddle.imperative.ProgramTranslator',
'paddle.imperative.TranslatedLayer', 'paddle.imperative.jit.save',
'paddle.imperative.jit.load',
'paddle.imperative.jit.SaveLoadConfig',
'paddle.imperative.NoamDecay'
'paddle.imperative.PiecewiseDecay',
'paddle.imperative.NaturalExpDecay',
'paddle.imperative.ExponentialDecay',
'paddle.imperative.InverseTimeDecay',
'paddle.imperative.PolynomialDecay',
'paddle.imperative.CosineDecay', 'paddle.Executor',
'paddle.global_scope', 'paddle.scope_guard',
'paddle.append_backward', 'paddle.gradients',
'paddle.BuildStrategy', 'paddle.CompiledProgram',
'paddle.ExecutionStrategy', 'paddle.name_scope',
'paddle.program_guard', 'paddle.Print', 'paddle.py_func',
'paddle.ParallelExecutor', 'paddle.default_main_program',
'paddle.default_startup_program', 'paddle.Program',
'paddle.WeightNormParamAttr', 'paddle.declarative.fc',
'paddle.declarative.batch_norm',
'paddle.declarative.bilinear_tensor_product',
'paddle.declarative.conv2d', 'paddle.declarative.conv2d_transpose',
'paddle.declarative.conv3d', 'paddle.declarative.conv3d_transpose',
'paddle.declarative.create_parameter',
'paddle.declarative.crf_decoding', 'paddle.declarative.data_norm',
'paddle.declarative.deformable_conv',
'paddle.declarative.group_norm', 'paddle.declarative.hsigmoid',
'paddle.declarative.instance_norm', 'paddle.declarative.layer_norm',
'paddle.declarative.multi_box_head', 'paddle.declarative.nce',
'paddle.declarative.prelu', 'paddle.declarative.row_conv',
'paddle.declarative.spectral_norm', 'paddle.declarative.embedding'
]
import_file = 'run_old_import_modules.py'
with open(import_file, "w") as wb:
cmd_context_count = """
count = 0
err_module = ""
"""
wb.write(cmd_context_count)
for module in old_directory:
run_cmd = self.get_import_command(module)
cmd_context_loop_template = """
try:
{run_cmd}
except:
count += 1
else:
err_module = "{module}"
"""
cmd_context_loop = cmd_context_loop_template.format(
run_cmd=run_cmd, module=module)
wb.write(cmd_context_loop)
cmd_context_print_template = """
if count != {len_old_directory}:
print("Error: Module " + err_module + " should not be imported")
"""
cmd_context_print = cmd_context_print_template.format(
len_old_directory=str(len(old_directory)))
wb.write(cmd_context_print)
_python = sys.executable
ps_cmd = "{} {}".format(_python, import_file)
ps_proc = subprocess.Popen(
ps_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = ps_proc.communicate()
assert "Error" not in str(stdout), str(stdout)
if __name__ == '__main__':
unittest.main()
...@@ -74,41 +74,39 @@ class TestEyeOp2(OpTest): ...@@ -74,41 +74,39 @@ class TestEyeOp2(OpTest):
class API_TestTensorEye(unittest.TestCase): class API_TestTensorEye(unittest.TestCase):
def test_out(self): def test_out(self):
with paddle.program_guard(paddle.Program()): with paddle.static.program_guard(paddle.static.Program()):
data = paddle.eye(10) data = paddle.eye(10)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[data]) result, = exe.run(fetch_list=[data])
expected_result = np.eye(10, dtype="float32") expected_result = np.eye(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
with paddle.program_guard(paddle.Program()): with paddle.static.program_guard(paddle.static.Program()):
data = paddle.eye(10, num_columns=7, dtype="float64") data = paddle.eye(10, num_columns=7, dtype="float64")
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[data]) result, = exe.run(fetch_list=[data])
expected_result = np.eye(10, 7, dtype="float64") expected_result = np.eye(10, 7, dtype="float64")
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
with paddle.program_guard(paddle.Program()): with paddle.static.program_guard(paddle.static.Program()):
data = paddle.eye(10, dtype="int64") data = paddle.eye(10, dtype="int64")
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[data]) result, = exe.run(fetch_list=[data])
expected_result = np.eye(10, dtype="int64") expected_result = np.eye(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
with paddle.imperative.guard(): paddle.disable_static()
out = paddle.eye(10, dtype="int64") out = paddle.eye(10, dtype="int64")
expected_result = np.eye(10, dtype="int64") expected_result = np.eye(10, dtype="int64")
paddle.enable_static()
self.assertEqual((out.numpy() == expected_result).all(), True) self.assertEqual((out.numpy() == expected_result).all(), True)
with paddle.imperative.guard(): paddle.disable_static()
batch_shape = [2] batch_shape = [2]
out = fluid.layers.eye(10, out = fluid.layers.eye(10, 10, dtype="int64", batch_shape=batch_shape)
10,
dtype="int64",
batch_shape=batch_shape)
result = np.eye(10, dtype="int64") result = np.eye(10, dtype="int64")
expected_result = [] expected_result = []
for index in reversed(batch_shape): for index in reversed(batch_shape):
...@@ -117,16 +115,14 @@ class API_TestTensorEye(unittest.TestCase): ...@@ -117,16 +115,14 @@ class API_TestTensorEye(unittest.TestCase):
tmp_result.append(result) tmp_result.append(result)
result = tmp_result result = tmp_result
expected_result = np.stack(result, axis=0) expected_result = np.stack(result, axis=0)
paddle.enable_static()
self.assertEqual(out.numpy().shape == np.array(expected_result).shape, self.assertEqual(out.numpy().shape == np.array(expected_result).shape,
True) True)
self.assertEqual((out.numpy() == expected_result).all(), True) self.assertEqual((out.numpy() == expected_result).all(), True)
with paddle.imperative.guard(): paddle.disable_static()
batch_shape = [3, 2] batch_shape = [3, 2]
out = fluid.layers.eye(10, out = fluid.layers.eye(10, 10, dtype="int64", batch_shape=batch_shape)
10,
dtype="int64",
batch_shape=batch_shape)
result = np.eye(10, dtype="int64") result = np.eye(10, dtype="int64")
expected_result = [] expected_result = []
for index in reversed(batch_shape): for index in reversed(batch_shape):
...@@ -135,12 +131,13 @@ class API_TestTensorEye(unittest.TestCase): ...@@ -135,12 +131,13 @@ class API_TestTensorEye(unittest.TestCase):
tmp_result.append(result) tmp_result.append(result)
result = tmp_result result = tmp_result
expected_result = np.stack(result, axis=0) expected_result = np.stack(result, axis=0)
paddle.enable_static()
self.assertEqual(out.numpy().shape == np.array(expected_result).shape, self.assertEqual(out.numpy().shape == np.array(expected_result).shape,
True) True)
self.assertEqual((out.numpy() == expected_result).all(), True) self.assertEqual((out.numpy() == expected_result).all(), True)
def test_errors(self): def test_errors(self):
with paddle.program_guard(paddle.Program()): with paddle.static.program_guard(paddle.static.Program()):
def test_num_rows_type_check(): def test_num_rows_type_check():
paddle.eye(-1, dtype="int64") paddle.eye(-1, dtype="int64")
......
...@@ -191,8 +191,8 @@ class TestFlattenPython(unittest.TestCase): ...@@ -191,8 +191,8 @@ class TestFlattenPython(unittest.TestCase):
self.assertRaises(ValueError, test_InputError) self.assertRaises(ValueError, test_InputError)
def test_Negative(): def test_Negative():
paddle.enable_imperative() paddle.disable_static()
img = paddle.imperative.to_variable(x) img = paddle.to_variable(x)
out = paddle.flatten(img, start_axis=-2, stop_axis=-1) out = paddle.flatten(img, start_axis=-2, stop_axis=-1)
return out.numpy().shape return out.numpy().shape
......
...@@ -16,7 +16,7 @@ from __future__ import print_function ...@@ -16,7 +16,7 @@ from __future__ import print_function
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle import Program, program_guard from paddle.static import program_guard, Program
import paddle.compat as cpt import paddle.compat as cpt
import unittest import unittest
import numpy as np import numpy as np
...@@ -38,7 +38,7 @@ class TestFullOp(unittest.TestCase): ...@@ -38,7 +38,7 @@ class TestFullOp(unittest.TestCase):
place = paddle.CPUPlace() place = paddle.CPUPlace()
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0) place = paddle.CUDAPlace(0)
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_program) exe.run(startup_program)
img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
...@@ -53,12 +53,13 @@ class TestFullOp(unittest.TestCase): ...@@ -53,12 +53,13 @@ class TestFullOp(unittest.TestCase):
msg="full_like output is wrong, out = " + str(out_np)) msg="full_like output is wrong, out = " + str(out_np))
def test_full_like_imperative(self): def test_full_like_imperative(self):
with paddle.imperative.guard(): paddle.disable_static()
input = paddle.arange(6, 10, dtype='float32') input = paddle.arange(6, 10, dtype='float32')
out = paddle.full_like(input, fill_value=888.88, dtype='float32') out = paddle.full_like(input, fill_value=888.88, dtype='float32')
out_numpy = np.random.random((4)).astype("float32") out_numpy = np.random.random((4)).astype("float32")
out_numpy.fill(888.88) out_numpy.fill(888.88)
self.assertTrue((out.numpy() == out_numpy).all(), True) self.assertTrue((out.numpy() == out_numpy).all(), True)
paddle.enable_static()
class TestFullOpError(unittest.TestCase): class TestFullOpError(unittest.TestCase):
......
...@@ -205,27 +205,28 @@ class TestImperative(unittest.TestCase): ...@@ -205,27 +205,28 @@ class TestImperative(unittest.TestCase):
self.assertTrue(np.array_equal(dy_grad1, dy_grad2)) self.assertTrue(np.array_equal(dy_grad1, dy_grad2))
def test_functional_paddle_imperative_dygraph_context(self): def test_functional_paddle_imperative_dygraph_context(self):
self.assertFalse(paddle.imperative.enabled()) self.assertFalse(paddle.in_dynamic_mode())
paddle.enable_imperative() paddle.disable_static()
self.assertTrue(paddle.imperative.enabled()) self.assertTrue(paddle.in_dynamic_mode())
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
var_inp = paddle.imperative.to_variable(np_inp) var_inp = paddle.to_variable(np_inp)
mlp = MLP(input_size=2) mlp = MLP(input_size=2)
out = mlp(var_inp) out = mlp(var_inp)
dy_out1 = out.numpy() dy_out1 = out.numpy()
out.backward() out.backward()
dy_grad1 = mlp._linear1.weight.gradient() dy_grad1 = mlp._linear1.weight.gradient()
paddle.disable_imperative() paddle.enable_static()
self.assertFalse(paddle.imperative.enabled()) self.assertFalse(paddle.in_dynamic_mode())
with paddle.imperative.guard(): paddle.disable_static()
self.assertTrue(paddle.imperative.enabled()) self.assertTrue(paddle.in_dynamic_mode())
var_inp = paddle.imperative.to_variable(np_inp) var_inp = paddle.to_variable(np_inp)
mlp = MLP(input_size=2) mlp = MLP(input_size=2)
out = mlp(var_inp) out = mlp(var_inp)
dy_out2 = out.numpy() dy_out2 = out.numpy()
out.backward() out.backward()
dy_grad2 = mlp._linear1.weight.gradient() dy_grad2 = mlp._linear1.weight.gradient()
self.assertFalse(paddle.imperative.enabled()) paddle.enable_static()
self.assertFalse(paddle.in_dynamic_mode())
self.assertTrue(np.array_equal(dy_out1, dy_out2)) self.assertTrue(np.array_equal(dy_out1, dy_out2))
self.assertTrue(np.array_equal(dy_grad1, dy_grad2)) self.assertTrue(np.array_equal(dy_grad1, dy_grad2))
...@@ -281,7 +282,7 @@ class TestImperative(unittest.TestCase): ...@@ -281,7 +282,7 @@ class TestImperative(unittest.TestCase):
l0 = fluid.Linear(2, 2) l0 = fluid.Linear(2, 2)
self.assertTrue(l0.weight._grad_ivar() is None) self.assertTrue(l0.weight._grad_ivar() is None)
l1 = fluid.Linear(2, 2) l1 = fluid.Linear(2, 2)
with paddle.imperative.no_grad(): with paddle.no_grad():
self.assertTrue(l1.weight.stop_gradient is False) self.assertTrue(l1.weight.stop_gradient is False)
tmp = l1.weight * 2 tmp = l1.weight * 2
self.assertTrue(tmp.stop_gradient) self.assertTrue(tmp.stop_gradient)
......
...@@ -43,7 +43,7 @@ class MLP(fluid.Layer): ...@@ -43,7 +43,7 @@ class MLP(fluid.Layer):
class TestDataParallelStateDict(unittest.TestCase): class TestDataParallelStateDict(unittest.TestCase):
def test_data_parallel_state_dict(self): def test_data_parallel_state_dict(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
strategy = paddle.imperative.prepare_context() strategy = paddle.prepare_context()
mlp = MLP() mlp = MLP()
parallel_mlp = dygraph.parallel.DataParallel(mlp, strategy) parallel_mlp = dygraph.parallel.DataParallel(mlp, strategy)
......
...@@ -153,7 +153,7 @@ class TestImperativeMnist(unittest.TestCase): ...@@ -153,7 +153,7 @@ class TestImperativeMnist(unittest.TestCase):
label.stop_gradient = True label.stop_gradient = True
if batch_id % 10 == 0: if batch_id % 10 == 0:
cost, traced_layer = paddle.imperative.TracedLayer.trace( cost, traced_layer = paddle.jit.TracedLayer.trace(
mnist, inputs=img) mnist, inputs=img)
if program is not None: if program is not None:
self.assertTrue(program, traced_layer.program) self.assertTrue(program, traced_layer.program)
......
...@@ -292,7 +292,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -292,7 +292,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
np_t = v.numpy() np_t = v.numpy()
self.model_base[k] = np_t self.model_base[k] = np_t
paddle.imperative.save(self.state_dict, "./test_dy") paddle.save(self.state_dict, "./test_dy")
def testLoadAndSetVarBase(self): def testLoadAndSetVarBase(self):
seed = 90 seed = 90
...@@ -373,8 +373,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -373,8 +373,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
if isinstance(adam._learning_rate, LearningRateDecay): if isinstance(adam._learning_rate, LearningRateDecay):
adam._learning_rate.step_num = 0 adam._learning_rate.step_num = 0
para_state_dict, opti_state_dict = paddle.imperative.load( para_state_dict, opti_state_dict = paddle.load("./test_dy")
"./test_dy")
adam.set_dict(opti_state_dict) adam.set_dict(opti_state_dict)
opti_dict = adam.state_dict() opti_dict = adam.state_dict()
...@@ -900,18 +899,17 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -900,18 +899,17 @@ class TestDygraphPtbRnn(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10]) emb = fluid.dygraph.Embedding([10, 10])
state_dict = emb.state_dict() state_dict = emb.state_dict()
paddle.imperative.save(state_dict, paddle.save(state_dict, os.path.join('saved_dy', 'emb_dy'))
os.path.join('saved_dy', 'emb_dy'))
para_state_dict, opti_state_dict = paddle.imperative.load( para_state_dict, opti_state_dict = paddle.load(
os.path.join('saved_dy', 'emb_dy')) os.path.join('saved_dy', 'emb_dy'))
self.assertTrue(opti_state_dict == None) self.assertTrue(opti_state_dict == None)
para_state_dict, opti_state_dict = paddle.imperative.load( para_state_dict, opti_state_dict = paddle.load(
os.path.join('saved_dy', 'emb_dy.pdparams')) os.path.join('saved_dy', 'emb_dy.pdparams'))
para_state_dict, opti_state_dict = paddle.imperative.load( para_state_dict, opti_state_dict = paddle.load(
os.path.join('saved_dy', 'emb_dy.pdopt')) os.path.join('saved_dy', 'emb_dy.pdopt'))
......
...@@ -47,13 +47,13 @@ class TestSimpleNet(unittest.TestCase): ...@@ -47,13 +47,13 @@ class TestSimpleNet(unittest.TestCase):
for place in places: for place in places:
for dtype in ["float32", "float64"]: for dtype in ["float32", "float64"]:
for sort_sum_gradient in [True, False]: for sort_sum_gradient in [True, False]:
with paddle.imperative.guard(place): paddle.disable_static(place)
backward_strategy = paddle.imperative.BackwardStrategy() backward_strategy = paddle.BackwardStrategy()
backward_strategy.sort_sum_gradient = sort_sum_gradient backward_strategy.sort_sum_gradient = sort_sum_gradient
# grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0) # grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0)
input_word = np.array([[1, 2], [2, 1]]).astype('int64') input_word = np.array([[1, 2], [2, 1]]).astype('int64')
input = paddle.imperative.to_variable(input_word) input = paddle.to_variable(input_word)
simplenet = SimpleNet(20, 32, dtype) simplenet = SimpleNet(20, 32, dtype)
adam = SGDOptimizer( adam = SGDOptimizer(
...@@ -74,6 +74,7 @@ class TestSimpleNet(unittest.TestCase): ...@@ -74,6 +74,7 @@ class TestSimpleNet(unittest.TestCase):
input_emb.clear_gradient() input_emb.clear_gradient()
self.assertTrue(input_emb.gradient() is not None) self.assertTrue(input_emb.gradient() is not None)
paddle.enable_static()
def test_selectedrows_gradient2(self): def test_selectedrows_gradient2(self):
places = [fluid.CPUPlace()] places = [fluid.CPUPlace()]
......
...@@ -82,15 +82,16 @@ class TestLinspaceAPI(unittest.TestCase): ...@@ -82,15 +82,16 @@ class TestLinspaceAPI(unittest.TestCase):
assert np.array_equal(res_1, res_2) assert np.array_equal(res_1, res_2)
def test_name(self): def test_name(self):
with paddle.program_guard(paddle.Program()): with paddle.static.program_guard(paddle.static.Program()):
out = paddle.linspace( out = paddle.linspace(
0, 10, 5, dtype='float32', name='linspace_res') 0, 10, 5, dtype='float32', name='linspace_res')
assert 'linspace_res' in out.name assert 'linspace_res' in out.name
def test_imperative(self): def test_imperative(self):
with paddle.imperative.guard(): paddle.disable_static()
out = paddle.linspace(0, 10, 5, dtype='float32') out = paddle.linspace(0, 10, 5, dtype='float32')
np_out = np.linspace(0, 10, 5, dtype='float32') np_out = np.linspace(0, 10, 5, dtype='float32')
paddle.enable_static()
self.assertEqual((out.numpy() == np_out).all(), True) self.assertEqual((out.numpy() == np_out).all(), True)
......
...@@ -41,9 +41,9 @@ class TestMultiplyAPI(unittest.TestCase): ...@@ -41,9 +41,9 @@ class TestMultiplyAPI(unittest.TestCase):
return res return res
def __run_dynamic_graph_case(self, x_data, y_data, axis=-1): def __run_dynamic_graph_case(self, x_data, y_data, axis=-1):
paddle.enable_imperative() paddle.disable_static()
x = paddle.imperative.to_variable(x_data) x = paddle.to_variable(x_data)
y = paddle.imperative.to_variable(y_data) y = paddle.to_variable(y_data)
res = paddle.multiply(x, y, axis=axis) res = paddle.multiply(x, y, axis=axis)
return res.numpy() return res.numpy()
...@@ -107,7 +107,7 @@ class TestMultiplyError(unittest.TestCase): ...@@ -107,7 +107,7 @@ class TestMultiplyError(unittest.TestCase):
def test_errors(self): def test_errors(self):
"""test_errors.""" """test_errors."""
# test static computation graph: dtype can not be int8 # test static computation graph: dtype can not be int8
paddle.disable_imperative() paddle.enable_static()
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x = paddle.nn.data(name='x', shape=[100], dtype=np.int8) x = paddle.nn.data(name='x', shape=[100], dtype=np.int8)
y = paddle.nn.data(name='y', shape=[100], dtype=np.int8) y = paddle.nn.data(name='y', shape=[100], dtype=np.int8)
...@@ -121,18 +121,18 @@ class TestMultiplyError(unittest.TestCase): ...@@ -121,18 +121,18 @@ class TestMultiplyError(unittest.TestCase):
np.random.seed(7) np.random.seed(7)
# test dynamic computation graph: dtype can not be int8 # test dynamic computation graph: dtype can not be int8
paddle.enable_imperative() paddle.disable_static()
x_data = np.random.randn(200).astype(np.int8) x_data = np.random.randn(200).astype(np.int8)
y_data = np.random.randn(200).astype(np.int8) y_data = np.random.randn(200).astype(np.int8)
x = paddle.imperative.to_variable(x_data) x = paddle.to_variable(x_data)
y = paddle.imperative.to_variable(y_data) y = paddle.to_variable(y_data)
self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y) self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y)
# test dynamic computation graph: inputs must be broadcastable # test dynamic computation graph: inputs must be broadcastable
x_data = np.random.rand(200, 5) x_data = np.random.rand(200, 5)
y_data = np.random.rand(200) y_data = np.random.rand(200)
x = paddle.imperative.to_variable(x_data) x = paddle.to_variable(x_data)
y = paddle.imperative.to_variable(y_data) y = paddle.to_variable(y_data)
self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y) self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y)
......
...@@ -62,18 +62,18 @@ class TestOnesLikeImpeartive(unittest.TestCase): ...@@ -62,18 +62,18 @@ class TestOnesLikeImpeartive(unittest.TestCase):
shape = [3, 4] shape = [3, 4]
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
with paddle.imperative.guard(place): paddle.disable_static(place)
x = paddle.imperative.to_variable(np.ones(shape)) x = paddle.to_variable(np.ones(shape))
for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]:
out = ones_like(x, dtype) out = ones_like(x, dtype)
self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True)
True)
out = paddle.tensor.ones_like(x) out = paddle.tensor.ones_like(x)
self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True) self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True)
out = paddle.tensor.creation.ones_like(x) out = paddle.tensor.creation.ones_like(x)
self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True) self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True)
paddle.enable_static()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -27,35 +27,35 @@ import numpy as np ...@@ -27,35 +27,35 @@ import numpy as np
class ApiOnesTest(unittest.TestCase): class ApiOnesTest(unittest.TestCase):
def test_paddle_ones(self): def test_paddle_ones(self):
with paddle.program_guard(paddle.Program()): with paddle.static.program_guard(paddle.static.Program()):
ones = paddle.ones(shape=[10]) ones = paddle.ones(shape=[10])
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[ones]) result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="float32") expected_result = np.ones(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
with paddle.program_guard(paddle.Program()): with paddle.static.program_guard(paddle.static.Program()):
ones = paddle.ones(shape=[10], dtype="float64") ones = paddle.ones(shape=[10], dtype="float64")
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[ones]) result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="float64") expected_result = np.ones(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
with paddle.program_guard(paddle.Program()): with paddle.static.program_guard(paddle.static.Program()):
ones = paddle.ones(shape=[10], dtype="int64") ones = paddle.ones(shape=[10], dtype="int64")
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[ones]) result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64") expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
def test_fluid_ones(self): def test_fluid_ones(self):
with paddle.program_guard(paddle.Program()): with paddle.static.program_guard(paddle.static.Program()):
ones = fluid.layers.ones(shape=[10], dtype="int64") ones = fluid.layers.ones(shape=[10], dtype="int64")
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[ones]) result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64") expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
...@@ -64,25 +64,25 @@ class ApiOnesTest(unittest.TestCase): ...@@ -64,25 +64,25 @@ class ApiOnesTest(unittest.TestCase):
class ApiOnesZerosError(unittest.TestCase): class ApiOnesZerosError(unittest.TestCase):
def test_errors(self): def test_errors(self):
def test_error1(): def test_error1():
with paddle.program_guard(paddle.Program()): with paddle.static.program_guard(paddle.static.Program()):
ones = paddle.ones(shape=10, dtype="int64") ones = paddle.ones(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error1) self.assertRaises(TypeError, test_error1)
def test_error2(): def test_error2():
with paddle.program_guard(paddle.Program()): with paddle.static.program_guard(paddle.static.Program()):
ones = paddle.ones(shape=10) ones = paddle.ones(shape=10)
self.assertRaises(TypeError, test_error2) self.assertRaises(TypeError, test_error2)
def test_error3(): def test_error3():
with paddle.program_guard(paddle.Program()): with paddle.static.program_guard(paddle.static.Program()):
ones = fluid.layers.ones(shape=10, dtype="int64") ones = fluid.layers.ones(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error3) self.assertRaises(TypeError, test_error3)
def test_error4(): def test_error4():
with paddle.program_guard(paddle.Program()): with paddle.static.program_guard(paddle.static.Program()):
ones = fluid.layers.ones(shape=[10], dtype="int8") ones = fluid.layers.ones(shape=[10], dtype="int8")
self.assertRaises(TypeError, test_error4) self.assertRaises(TypeError, test_error4)
......
...@@ -22,7 +22,7 @@ import paddle ...@@ -22,7 +22,7 @@ import paddle
def _dygraph_guard_(func): def _dygraph_guard_(func):
def __impl__(*args, **kwargs): def __impl__(*args, **kwargs):
if paddle.in_imperative_mode(): if paddle.in_dynamic_mode():
return func(*args, **kwargs) return func(*args, **kwargs)
else: else:
with fluid.dygraph.guard(): with fluid.dygraph.guard():
...@@ -54,7 +54,7 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -54,7 +54,7 @@ class TestDygraphDoubleGrad(TestCase):
allow_unused=False): allow_unused=False):
backward_strategy = fluid.dygraph.BackwardStrategy() backward_strategy = fluid.dygraph.BackwardStrategy()
backward_strategy.sort_sum_gradient = self.sort_sum_gradient backward_strategy.sort_sum_gradient = self.sort_sum_gradient
return paddle.imperative.grad( return paddle.grad(
outputs=outputs, outputs=outputs,
inputs=inputs, inputs=inputs,
grad_outputs=grad_outputs, grad_outputs=grad_outputs,
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
from paddle import Program, program_guard from paddle.static import program_guard, Program
def output_hist(out): def output_hist(out):
...@@ -132,7 +132,7 @@ class TestRandintAPI(unittest.TestCase): ...@@ -132,7 +132,7 @@ class TestRandintAPI(unittest.TestCase):
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace() ) else paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
outs = exe.run( outs = exe.run(
feed={'var_shape': np.array([100, 100]).astype('int64')}, feed={'var_shape': np.array([100, 100]).astype('int64')},
fetch_list=[out1, out2, out3, out4, out5]) fetch_list=[out1, out2, out3, out4, out5])
...@@ -141,13 +141,14 @@ class TestRandintAPI(unittest.TestCase): ...@@ -141,13 +141,14 @@ class TestRandintAPI(unittest.TestCase):
class TestRandintImperative(unittest.TestCase): class TestRandintImperative(unittest.TestCase):
def test_api(self): def test_api(self):
n = 10 n = 10
with paddle.imperative.guard(): paddle.disable_static()
x1 = paddle.randint(n, shape=[10], dtype="int32") x1 = paddle.randint(n, shape=[10], dtype="int32")
x2 = paddle.tensor.randint(n) x2 = paddle.tensor.randint(n)
x3 = paddle.tensor.random.randint(n) x3 = paddle.tensor.random.randint(n)
for i in [x1, x2, x3]: for i in [x1, x2, x3]:
for j in i.numpy().tolist(): for j in i.numpy().tolist():
self.assertTrue((j >= 0 and j < n)) self.assertTrue((j >= 0 and j < n))
paddle.enable_static()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -18,7 +18,7 @@ import unittest ...@@ -18,7 +18,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle import Program, program_guard from paddle.static import program_guard, Program
class TestRandnOp(unittest.TestCase): class TestRandnOp(unittest.TestCase):
...@@ -39,7 +39,7 @@ class TestRandnOp(unittest.TestCase): ...@@ -39,7 +39,7 @@ class TestRandnOp(unittest.TestCase):
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace() ) else paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
res = exe.run(train_program, res = exe.run(train_program,
feed={'X': np.array( feed={'X': np.array(
shape, dtype='int32')}, shape, dtype='int32')},
...@@ -55,7 +55,7 @@ class TestRandnOpForDygraph(unittest.TestCase): ...@@ -55,7 +55,7 @@ class TestRandnOpForDygraph(unittest.TestCase):
shape = [1000, 784] shape = [1000, 784]
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace() ) else paddle.CPUPlace()
with paddle.imperative.guard(place): paddle.disable_static(place)
x1 = paddle.randn(shape, 'float32') x1 = paddle.randn(shape, 'float32')
x2 = paddle.randn(shape, 'float64') x2 = paddle.randn(shape, 'float64')
...@@ -63,12 +63,13 @@ class TestRandnOpForDygraph(unittest.TestCase): ...@@ -63,12 +63,13 @@ class TestRandnOpForDygraph(unittest.TestCase):
dim_2 = paddle.fill_constant([1], "int32", 50) dim_2 = paddle.fill_constant([1], "int32", 50)
x3 = paddle.randn(shape=[dim_1, dim_2, 784]) x3 = paddle.randn(shape=[dim_1, dim_2, 784])
var_shape = paddle.imperative.to_variable(np.array(shape)) var_shape = paddle.to_variable(np.array(shape))
x4 = paddle.randn(var_shape) x4 = paddle.randn(var_shape)
for out in [x1, x2, x3, x4]: for out in [x1, x2, x3, x4]:
self.assertAlmostEqual(np.mean(out.numpy()), .0, delta=0.1) self.assertAlmostEqual(np.mean(out.numpy()), .0, delta=0.1)
self.assertAlmostEqual(np.std(out.numpy()), 1., delta=0.1) self.assertAlmostEqual(np.std(out.numpy()), 1., delta=0.1)
paddle.enable_static()
class TestRandnOpError(unittest.TestCase): class TestRandnOpError(unittest.TestCase):
......
...@@ -17,7 +17,7 @@ import numpy as np ...@@ -17,7 +17,7 @@ import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle import Program, program_guard from paddle.static import program_guard, Program
def check_randperm_out(n, data_np): def check_randperm_out(n, data_np):
...@@ -108,7 +108,7 @@ class TestRandpermAPI(unittest.TestCase): ...@@ -108,7 +108,7 @@ class TestRandpermAPI(unittest.TestCase):
x1 = paddle.randperm(n) x1 = paddle.randperm(n)
x2 = paddle.randperm(n, 'float32') x2 = paddle.randperm(n, 'float32')
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
res = exe.run(fetch_list=[x1, x2]) res = exe.run(fetch_list=[x1, x2])
self.assertEqual(res[0].dtype, np.int64) self.assertEqual(res[0].dtype, np.int64)
...@@ -119,13 +119,14 @@ class TestRandpermAPI(unittest.TestCase): ...@@ -119,13 +119,14 @@ class TestRandpermAPI(unittest.TestCase):
class TestRandpermImperative(unittest.TestCase): class TestRandpermImperative(unittest.TestCase):
def test_out(self): def test_out(self):
with paddle.imperative.guard(): paddle.disable_static()
n = 10 n = 10
for dtype in ['int32', np.int64, 'float32', 'float64']: for dtype in ['int32', np.int64, 'float32', 'float64']:
data_p = paddle.randperm(n, dtype) data_p = paddle.randperm(n, dtype)
data_np = data_p.numpy() data_np = data_p.numpy()
self.assertTrue( self.assertTrue(
check_randperm_out(n, data_np), msg=error_msg(data_np)) check_randperm_out(n, data_np), msg=error_msg(data_np))
paddle.enable_static()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -17,7 +17,7 @@ import paddle ...@@ -17,7 +17,7 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import unittest import unittest
paddle.enable_imperative() paddle.disable_static()
SEED = 2020 SEED = 2020
np.random.seed(SEED) np.random.seed(SEED)
fluid.default_main_program().random_seed = SEED fluid.default_main_program().random_seed = SEED
...@@ -73,7 +73,7 @@ class TestRetainGraph(unittest.TestCase): ...@@ -73,7 +73,7 @@ class TestRetainGraph(unittest.TestCase):
outs = paddle.fill_constant(disc_interpolates.shape, outs = paddle.fill_constant(disc_interpolates.shape,
disc_interpolates.dtype, 1.0) disc_interpolates.dtype, 1.0)
gradients = paddle.imperative.grad( gradients = paddle.grad(
outputs=disc_interpolates, outputs=disc_interpolates,
inputs=fake_AB, inputs=fake_AB,
grad_outputs=outs, grad_outputs=outs,
...@@ -103,8 +103,8 @@ class TestRetainGraph(unittest.TestCase): ...@@ -103,8 +103,8 @@ class TestRetainGraph(unittest.TestCase):
A = np.random.rand(2, 3, 32, 32).astype('float32') A = np.random.rand(2, 3, 32, 32).astype('float32')
B = np.random.rand(2, 3, 32, 32).astype('float32') B = np.random.rand(2, 3, 32, 32).astype('float32')
realA = paddle.imperative.to_variable(A) realA = paddle.to_variable(A)
realB = paddle.imperative.to_variable(B) realB = paddle.to_variable(B)
fakeB = g(realA) fakeB = g(realA)
optim_d.clear_gradients() optim_d.clear_gradients()
......
...@@ -17,7 +17,6 @@ from __future__ import print_function ...@@ -17,7 +17,6 @@ from __future__ import print_function
import unittest import unittest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.imperative as imperative
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import numpy as np import numpy as np
import six import six
...@@ -72,16 +71,17 @@ class TestSortDygraph(unittest.TestCase): ...@@ -72,16 +71,17 @@ class TestSortDygraph(unittest.TestCase):
self.place = core.CPUPlace() self.place = core.CPUPlace()
def test_api_0(self): def test_api_0(self):
with imperative.guard(self.place): paddle.disable_static(self.place)
var_x = imperative.to_variable(self.input_data) var_x = paddle.to_variable(self.input_data)
out = paddle.sort(var_x) out = paddle.sort(var_x)
self.assertEqual((np.sort(self.input_data) == out.numpy()).all(), self.assertEqual((np.sort(self.input_data) == out.numpy()).all(), True)
True) paddle.enable_static()
def test_api_1(self): def test_api_1(self):
with imperative.guard(self.place): paddle.disable_static(self.place)
var_x = imperative.to_variable(self.input_data) var_x = paddle.to_variable(self.input_data)
out = paddle.sort(var_x, axis=-1) out = paddle.sort(var_x, axis=-1)
self.assertEqual( self.assertEqual(
(np.sort( (np.sort(
self.input_data, axis=-1) == out.numpy()).all(), True) self.input_data, axis=-1) == out.numpy()).all(), True)
paddle.enable_static()
...@@ -62,20 +62,19 @@ class TestZerosLikeImpeartive(unittest.TestCase): ...@@ -62,20 +62,19 @@ class TestZerosLikeImpeartive(unittest.TestCase):
shape = [3, 4] shape = [3, 4]
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
with paddle.imperative.guard(place): paddle.disable_static(place)
x = paddle.imperative.to_variable(np.ones(shape)) x = paddle.to_variable(np.ones(shape))
for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]:
out = zeros_like(x, dtype) out = zeros_like(x, dtype)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(),
True) True)
out = paddle.tensor.zeros_like(x) out = paddle.tensor.zeros_like(x)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True)
True)
out = paddle.tensor.creation.zeros_like(x) out = paddle.tensor.creation.zeros_like(x)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), True)
True) paddle.enable_static()
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -39,15 +39,15 @@ class ApiZerosTest(unittest.TestCase): ...@@ -39,15 +39,15 @@ class ApiZerosTest(unittest.TestCase):
with program_guard(Program()): with program_guard(Program()):
zeros = paddle.zeros(shape=[10], dtype="float64") zeros = paddle.zeros(shape=[10], dtype="float64")
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[zeros]) result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float64") expected_result = np.zeros(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
with paddle.program_guard(Program()): with paddle.static.program_guard(Program()):
zeros = paddle.zeros(shape=[10], dtype="int64") zeros = paddle.zeros(shape=[10], dtype="int64")
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[zeros]) result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64") expected_result = np.zeros(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
...@@ -55,7 +55,7 @@ class ApiZerosTest(unittest.TestCase): ...@@ -55,7 +55,7 @@ class ApiZerosTest(unittest.TestCase):
with program_guard(Program()): with program_guard(Program()):
zeros = paddle.zeros(shape=[10], dtype="int64") zeros = paddle.zeros(shape=[10], dtype="int64")
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[zeros]) result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64") expected_result = np.zeros(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
...@@ -64,7 +64,7 @@ class ApiZerosTest(unittest.TestCase): ...@@ -64,7 +64,7 @@ class ApiZerosTest(unittest.TestCase):
out_np = np.zeros(shape=(1), dtype='float32') out_np = np.zeros(shape=(1), dtype='float32')
out = paddle.zeros(shape=[1], dtype="float32") out = paddle.zeros(shape=[1], dtype="float32")
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
result = exe.run(fetch_list=[out]) result = exe.run(fetch_list=[out])
self.assertEqual((result == out_np).all(), True) self.assertEqual((result == out_np).all(), True)
...@@ -72,7 +72,7 @@ class ApiZerosTest(unittest.TestCase): ...@@ -72,7 +72,7 @@ class ApiZerosTest(unittest.TestCase):
with program_guard(Program()): with program_guard(Program()):
zeros = fluid.layers.zeros(shape=[10], dtype="int64") zeros = fluid.layers.zeros(shape=[10], dtype="int64")
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
result, = exe.run(fetch_list=[zeros]) result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64") expected_result = np.zeros(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
...@@ -81,13 +81,13 @@ class ApiZerosTest(unittest.TestCase): ...@@ -81,13 +81,13 @@ class ApiZerosTest(unittest.TestCase):
class ApiZerosError(unittest.TestCase): class ApiZerosError(unittest.TestCase):
def test_errors(self): def test_errors(self):
def test_error1(): def test_error1():
with paddle.program_guard(fluid.Program()): with paddle.static.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=10, dtype="int64") ones = fluid.layers.zeros(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error1) self.assertRaises(TypeError, test_error1)
def test_error2(): def test_error2():
with paddle.program_guard(fluid.Program()): with paddle.static.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=[10], dtype="int8") ones = fluid.layers.zeros(shape=[10], dtype="int8")
self.assertRaises(TypeError, test_error2) self.assertRaises(TypeError, test_error2)
......
...@@ -14,23 +14,49 @@ ...@@ -14,23 +14,49 @@
# TODO: import framework api under this directory # TODO: import framework api under this directory
__all__ = [ __all__ = [
'append_backward', 'gradients', 'Executor', 'global_scope', 'scope_guard', 'create_global_var', 'create_parameter', 'ParamAttr', 'Variable',
'BuildStrategy', 'CompiledProgram', 'default_main_program',
'default_startup_program', 'create_global_var', 'create_parameter', 'Print',
'py_func', 'ExecutionStrategy', 'name_scope', 'ParallelExecutor',
'ParamAttr', 'Program', 'program_guard', 'Variable', 'WeightNormParamAttr',
'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace' 'CPUPlace', 'CUDAPlace', 'CUDAPinnedPlace'
] ]
__all__ += [
'BackwardStrategy', 'grad', 'LayerList', 'load', 'save', 'prepare_context',
'to_variable', 'no_grad', 'ParallelEnv', 'DataParallel'
]
__all__ += [
'NoamDecay', 'PiecewiseDecay', 'NaturalExpDecay', 'ExponentialDecay',
'InverseTimeDecay', 'PolynomialDecay', 'CosineDecay'
]
from . import random from . import random
from .random import manual_seed from .random import manual_seed
from ..fluid.executor import Executor, global_scope, scope_guard
from ..fluid.backward import append_backward, gradients from ..fluid.framework import Variable #DEFINE_ALIAS
from ..fluid.compiler import BuildStrategy, CompiledProgram, ExecutionStrategy from ..fluid.param_attr import ParamAttr #DEFINE_ALIAS
from ..fluid.framework import default_main_program, default_startup_program, name_scope, Program, program_guard, Variable from ..fluid.layers.tensor import create_global_var #DEFINE_ALIAS
from ..fluid.layers.control_flow import Print from ..fluid.layers.tensor import create_parameter #DEFINE_ALIAS
from ..fluid.layers.nn import py_func from ..fluid.core import CPUPlace #DEFINE_ALIAS
from ..fluid.parallel_executor import ParallelExecutor from ..fluid.core import CUDAPlace #DEFINE_ALIAS
from ..fluid.param_attr import ParamAttr, WeightNormParamAttr from ..fluid.core import CUDAPinnedPlace #DEFINE_ALIAS
from ..fluid.layers.tensor import create_global_var, create_parameter
from ..fluid.core import CPUPlace, CUDAPlace, CUDAPinnedPlace from paddle.fluid import core #DEFINE_ALIAS
from ..fluid.dygraph.base import no_grad #DEFINE_ALIAS
from ..fluid.dygraph.base import to_variable #DEFINE_ALIAS
from ..fluid.dygraph.base import grad #DEFINE_ALIAS
from ..fluid.dygraph.checkpoint import load_dygraph #DEFINE_ALIAS
from ..fluid.dygraph.checkpoint import save_dygraph #DEFINE_ALIAS
from ..fluid.dygraph.checkpoint import load_dygraph as load #DEFINE_ALIAS
from ..fluid.dygraph.checkpoint import save_dygraph as save #DEFINE_ALIAS
from ..fluid.dygraph.parallel import prepare_context #DEFINE_ALIAS
from ..fluid.dygraph.parallel import ParallelEnv #DEFINE_ALIAS
from ..fluid.dygraph.parallel import DataParallel #DEFINE_ALIAS
from ..fluid.dygraph.learning_rate_scheduler import NoamDecay #DEFINE_ALIAS
from ..fluid.dygraph.learning_rate_scheduler import PiecewiseDecay #DEFINE_ALIAS
from ..fluid.dygraph.learning_rate_scheduler import NaturalExpDecay #DEFINE_ALIAS
from ..fluid.dygraph.learning_rate_scheduler import ExponentialDecay #DEFINE_ALIAS
from ..fluid.dygraph.learning_rate_scheduler import InverseTimeDecay #DEFINE_ALIAS
from ..fluid.dygraph.learning_rate_scheduler import PolynomialDecay #DEFINE_ALIAS
from ..fluid.dygraph.learning_rate_scheduler import CosineDecay #DEFINE_ALIAS
BackwardStrategy = core.BackwardStrategy
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# define api used to run in imperative mode
__all__ = [
'BackwardStrategy', 'enabled', 'grad', 'guard', 'LayerList', 'load', 'save',
'prepare_context', 'to_variable', 'TracedLayer', 'no_grad', 'ParallelEnv',
'ProgramTranslator', 'declarative', 'DataParallel', 'TranslatedLayer', 'jit'
]
__all__ += [
'NoamDecay', 'PiecewiseDecay', 'NaturalExpDecay', 'ExponentialDecay',
'InverseTimeDecay', 'PolynomialDecay', 'CosineDecay'
]
from paddle.fluid import core
from ..fluid.dygraph.base import enabled, guard, no_grad, to_variable, grad
from ..fluid.dygraph.checkpoint import load_dygraph as load
from ..fluid.dygraph.checkpoint import save_dygraph as save
from ..fluid.dygraph.parallel import prepare_context, ParallelEnv, DataParallel
from ..fluid.dygraph.jit import TracedLayer, declarative
from ..fluid.dygraph import ProgramTranslator
from . import jit
from ..fluid.dygraph.learning_rate_scheduler import NoamDecay, PiecewiseDecay, NaturalExpDecay, ExponentialDecay, \
InverseTimeDecay, PolynomialDecay, CosineDecay
BackwardStrategy = core.BackwardStrategy
...@@ -261,8 +261,8 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): ...@@ -261,8 +261,8 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
case1 = np.random.randn(3, 10, 10).astype('float64') + 1j * np.random.randn(3, 10, 10).astype('float64') case1 = np.random.randn(3, 10, 10).astype('float64') + 1j * np.random.randn(3, 10, 10).astype('float64')
paddle.enable_imperative() paddle.disable_static()
case1 = paddle.imperative.to_variable(case1) case1 = paddle.to_variable(case1)
data1 = paddle.complex.trace(case1, offset=1, axis1=1, axis2=2) # data1.shape = [3] data1 = paddle.complex.trace(case1, offset=1, axis1=1, axis2=2) # data1.shape = [3]
""" """
complex_variable_exists([x], "trace") complex_variable_exists([x], "trace")
......
...@@ -1804,7 +1804,7 @@ class DynamicDecode(Layer): ...@@ -1804,7 +1804,7 @@ class DynamicDecode(Layer):
from paddle.fluid.layers import BeamSearchDecoder from paddle.fluid.layers import BeamSearchDecoder
from paddle.incubate.hapi.text import StackedLSTMCell, DynamicDecode from paddle.incubate.hapi.text import StackedLSTMCell, DynamicDecode
paddle.enable_dygraph() paddle.disable_static()
vocab_size, d_model, = 100, 32 vocab_size, d_model, = 100, 32
encoder_output = paddle.rand((2, 4, d_model)) encoder_output = paddle.rand((2, 4, d_model))
...@@ -2278,7 +2278,7 @@ class TransformerCell(RNNCell): ...@@ -2278,7 +2278,7 @@ class TransformerCell(RNNCell):
from paddle.incubate.hapi.text import TransformerBeamSearchDecoder from paddle.incubate.hapi.text import TransformerBeamSearchDecoder
from paddle.incubate.hapi.text import DynamicDecode from paddle.incubate.hapi.text import DynamicDecode
paddle.enable_dygraph() paddle.disable_static()
class Embedder(fluid.dygraph.Layer): class Embedder(fluid.dygraph.Layer):
def __init__(self): def __init__(self):
...@@ -2445,7 +2445,7 @@ class TransformerBeamSearchDecoder(layers.BeamSearchDecoder): ...@@ -2445,7 +2445,7 @@ class TransformerBeamSearchDecoder(layers.BeamSearchDecoder):
from paddle.incubate.hapi.text import TransformerBeamSearchDecoder from paddle.incubate.hapi.text import TransformerBeamSearchDecoder
from paddle.incubate.hapi.text import DynamicDecode from paddle.incubate.hapi.text import DynamicDecode
paddle.enable_dygraph() paddle.disable_static()
class Embedder(fluid.dygraph.Layer): class Embedder(fluid.dygraph.Layer):
def __init__(self): def __init__(self):
......
...@@ -12,7 +12,16 @@ ...@@ -12,7 +12,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from ...fluid.dygraph.jit import save, load, SaveLoadConfig from ..fluid.dygraph.jit import save #DEFINE_ALIAS
from ...fluid.dygraph.io import TranslatedLayer from ..fluid.dygraph.jit import load #DEFINE_ALIAS
from ..fluid.dygraph.jit import SaveLoadConfig #DEFINE_ALIAS
from ..fluid.dygraph.jit import TracedLayer #DEFINE_ALIAS
from ..fluid.dygraph.jit import declarative as __impl__ #DEFINE_ALIAS
from ..fluid.dygraph.jit import declarative as to_static #DEFINE_ALIAS
from ..fluid.dygraph import ProgramTranslator #DEFINE_ALIAS
from ..fluid.dygraph.io import TranslatedLayer #DEFINE_ALIAS
__all__ = ['save', 'load', 'SaveLoadConfig'] __all__ = [
'save', 'load', 'SaveLoadConfig', 'TracedLayer', 'to_static',
'ProgramTranslator', 'TranslatedLayer'
]
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: import framework api under this directory
__all__ = [
'append_backward', 'gradients', 'Executor', 'global_scope', 'scope_guard',
'BuildStrategy', 'CompiledProgram', 'Print', 'py_func', 'ExecutionStrategy',
'name_scope', 'ParallelExecutor', 'program_guard', 'WeightNormParamAttr',
'default_main_program', 'default_startup_program', 'Program', 'save', 'load'
]
from ..fluid.executor import Executor #DEFINE_ALIAS
from ..fluid.executor import global_scope #DEFINE_ALIAS
from ..fluid.executor import scope_guard #DEFINE_ALIAS
from ..fluid.backward import append_backward #DEFINE_ALIAS
from ..fluid.backward import gradients #DEFINE_ALIAS
from ..fluid.compiler import BuildStrategy #DEFINE_ALIAS
from ..fluid.compiler import CompiledProgram #DEFINE_ALIAS
from ..fluid.compiler import ExecutionStrategy #DEFINE_ALIAS
from ..fluid.framework import default_main_program #DEFINE_ALIAS
from ..fluid.framework import default_startup_program #DEFINE_ALIAS
from ..fluid.framework import Program #DEFINE_ALIAS
from ..fluid.framework import name_scope #DEFINE_ALIAS
from ..fluid.framework import program_guard #DEFINE_ALIAS
from ..fluid.layers.control_flow import Print #DEFINE_ALIAS
from ..fluid.layers.nn import py_func #DEFINE_ALIAS
from ..fluid.parallel_executor import ParallelExecutor #DEFINE_ALIAS
from ..fluid.param_attr import WeightNormParamAttr #DEFINE_ALIAS
from ..tensor.io import save #DEFINE_ALIAS
from ..tensor.io import load #DEFINE_ALIAS
...@@ -16,29 +16,45 @@ __all__ = [ ...@@ -16,29 +16,45 @@ __all__ = [
'fc', 'fc',
'batch_norm', 'batch_norm',
'embedding', 'embedding',
'bilinear_tensor_product' 'bilinear_tensor_product',
'conv2d' 'conv2d',
'conv2d_transpose' 'conv2d_transpose',
'conv3d' 'conv3d',
'conv3d_transpose' 'conv3d_transpose',
'create_parameter' 'create_parameter',
'crf_decoding' 'crf_decoding',
'data_norm' 'data_norm',
'deformable_conv' 'deformable_conv',
'group_norm' 'group_norm',
'hsigmoid' 'hsigmoid',
'instance_norm' 'instance_norm',
'layer_norm' 'layer_norm',
'multi_box_head' 'multi_box_head',
'nce' 'nce',
'prelu' 'prelu',
'row_conv' 'row_conv',
'spectral_norm', 'spectral_norm',
] ]
from ..fluid.layers import fc, batch_norm, bilinear_tensor_product, \ from ...fluid.layers import fc #DEFINE_ALIAS
conv2d, conv2d_transpose, conv3d, conv3d_transpose, create_parameter, \ from ...fluid.layers import batch_norm #DEFINE_ALIAS
crf_decoding, data_norm, deformable_conv, group_norm, hsigmoid, instance_norm, \ from ...fluid.layers import bilinear_tensor_product #DEFINE_ALIAS
layer_norm, multi_box_head, nce, prelu, row_conv, spectral_norm from ...fluid.layers import conv2d #DEFINE_ALIAS
from ...fluid.layers import conv2d_transpose #DEFINE_ALIAS
from ...fluid.layers import conv3d #DEFINE_ALIAS
from ...fluid.layers import conv3d_transpose #DEFINE_ALIAS
from ...fluid.layers import create_parameter #DEFINE_ALIAS
from ...fluid.layers import crf_decoding #DEFINE_ALIAS
from ...fluid.layers import data_norm #DEFINE_ALIAS
from ...fluid.layers import deformable_conv #DEFINE_ALIAS
from ...fluid.layers import group_norm #DEFINE_ALIAS
from ...fluid.layers import hsigmoid #DEFINE_ALIAS
from ...fluid.layers import instance_norm #DEFINE_ALIAS
from ...fluid.layers import layer_norm #DEFINE_ALIAS
from ...fluid.layers import multi_box_head #DEFINE_ALIAS
from ...fluid.layers import nce #DEFINE_ALIAS
from ...fluid.layers import prelu #DEFINE_ALIAS
from ...fluid.layers import row_conv #DEFINE_ALIAS
from ...fluid.layers import spectral_norm #DEFINE_ALIAS
from ..fluid.input import embedding from ...fluid.input import embedding #DEFINE_ALIAS
...@@ -83,7 +83,7 @@ def full_like(x, fill_value, dtype=None, name=None): ...@@ -83,7 +83,7 @@ def full_like(x, fill_value, dtype=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() # Now we are in imperative mode paddle.disable_static() # Now we are in imperative mode
input = paddle.full(shape=[2, 3], fill_value=0.0, dtype='float32', name='input') input = paddle.full(shape=[2, 3], fill_value=0.0, dtype='float32', name='input')
output = paddle.full_like(input, 2.0) output = paddle.full_like(input, 2.0)
# [[2. 2. 2.] # [[2. 2. 2.]
...@@ -143,7 +143,7 @@ def ones(shape, dtype=None, name=None): ...@@ -143,7 +143,7 @@ def ones(shape, dtype=None, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
paddle.enable_imperative() paddle.disable_static()
# default dtype for ones OP # default dtype for ones OP
data1 = paddle.ones(shape=[3, 2]) data1 = paddle.ones(shape=[3, 2])
...@@ -199,9 +199,9 @@ def ones_like(x, dtype=None, name=None): ...@@ -199,9 +199,9 @@ def ones_like(x, dtype=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
x = paddle.imperative.to_variable(np.array([1,2,3], dtype='float32')) x = paddle.to_variable(np.array([1,2,3], dtype='float32'))
out1 = paddle.zeros_like(x) # [1., 1., 1.] out1 = paddle.zeros_like(x) # [1., 1., 1.]
out2 = paddle.zeros_like(x, dtype='int32') # [1, 1, 1] out2 = paddle.zeros_like(x, dtype='int32') # [1, 1, 1]
...@@ -236,7 +236,7 @@ def zeros(shape, dtype=None, name=None): ...@@ -236,7 +236,7 @@ def zeros(shape, dtype=None, name=None):
import paddle import paddle
paddle.enable_imperative() # Now we are in imperative mode paddle.disable_static() # Now we are in imperative mode
data = paddle.zeros(shape=[3, 2], dtype='float32') data = paddle.zeros(shape=[3, 2], dtype='float32')
# [[0. 0.] # [[0. 0.]
# [0. 0.] # [0. 0.]
...@@ -289,9 +289,9 @@ def zeros_like(x, dtype=None, name=None): ...@@ -289,9 +289,9 @@ def zeros_like(x, dtype=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
x = paddle.imperative.to_variable(np.array([1,2,3], dtype='float32')) x = paddle.to_variable(np.array([1,2,3], dtype='float32'))
out1 = paddle.zeros_like(x) # [0., 0., 0.] out1 = paddle.zeros_like(x) # [0., 0., 0.]
out2 = paddle.zeros_like(x, dtype='int32') # [0, 0, 0] out2 = paddle.zeros_like(x, dtype='int32') # [0, 0, 0]
...@@ -328,7 +328,7 @@ def eye(num_rows, num_columns=None, dtype=None, name=None): ...@@ -328,7 +328,7 @@ def eye(num_rows, num_columns=None, dtype=None, name=None):
import paddle import paddle
paddle.enable_imperative() # Now we are in imperative mode paddle.disable_static() # Now we are in imperative mode
data = paddle.eye(3, dtype='int32') data = paddle.eye(3, dtype='int32')
# [[1 0 0] # [[1 0 0]
# [0 1 0] # [0 1 0]
...@@ -382,7 +382,7 @@ def full(shape, fill_value, dtype=None, name=None): ...@@ -382,7 +382,7 @@ def full(shape, fill_value, dtype=None, name=None):
import paddle import paddle
paddle.enable_imperative() # Now we are in imperative mode paddle.disable_static() # Now we are in imperative mode
data1 = paddle.full(shape=[2,1], fill_value=0, dtype='int64') data1 = paddle.full(shape=[2,1], fill_value=0, dtype='int64')
#[[0] #[[0]
# [0]] # [0]]
...@@ -459,7 +459,7 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): ...@@ -459,7 +459,7 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
out1 = paddle.arange(5) out1 = paddle.arange(5)
# [0, 1, 2, 3, 4] # [0, 1, 2, 3, 4]
...@@ -471,7 +471,7 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): ...@@ -471,7 +471,7 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
out3 = paddle.arange(4.999, dtype='float32') out3 = paddle.arange(4.999, dtype='float32')
# [0., 1., 2., 3., 4.] # [0., 1., 2., 3., 4.]
start_var = paddle.imperative.to_variable(np.array([3])) start_var = paddle.to_variable(np.array([3]))
out4 = paddle.arange(start_var, 7) out4 = paddle.arange(start_var, 7)
# [3, 4, 5, 6] # [3, 4, 5, 6]
...@@ -709,12 +709,12 @@ def meshgrid(*args, **kwargs): ...@@ -709,12 +709,12 @@ def meshgrid(*args, **kwargs):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
input_3 = np.random.randint(0, 100, [100, ]).astype('int32') input_3 = np.random.randint(0, 100, [100, ]).astype('int32')
input_4 = np.random.randint(0, 100, [200, ]).astype('int32') input_4 = np.random.randint(0, 100, [200, ]).astype('int32')
tensor_3 = paddle.imperative.to_variable(input_3) tensor_3 = paddle.to_variable(input_3)
tensor_4 = paddle.imperative.to_variable(input_4) tensor_4 = paddle.to_variable(input_4)
grid_x, grid_y = paddle.tensor.meshgrid(tensor_3, tensor_4) grid_x, grid_y = paddle.tensor.meshgrid(tensor_3, tensor_4)
#the shape of grid_x is (100, 200) #the shape of grid_x is (100, 200)
......
...@@ -605,10 +605,10 @@ def cross(x, y, axis=None, name=None): ...@@ -605,10 +605,10 @@ def cross(x, y, axis=None, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle.imperative import to_variable from paddle import to_variable
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
data_x = np.array([[1.0, 1.0, 1.0], data_x = np.array([[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0], [2.0, 2.0, 2.0],
...@@ -782,13 +782,13 @@ def histogram(input, bins=100, min=0, max=0): ...@@ -782,13 +782,13 @@ def histogram(input, bins=100, min=0, max=0):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
startup_program = paddle.Program() startup_program = paddle.static.Program()
train_program = paddle.Program() train_program = paddle.static.Program()
with paddle.program_guard(train_program, startup_program): with paddle.static.program_guard(train_program, startup_program):
inputs = paddle.data(name='input', dtype='int32', shape=[2,3]) inputs = paddle.data(name='input', dtype='int32', shape=[2,3])
output = paddle.histogram(inputs, bins=5, min=1, max=5) output = paddle.histogram(inputs, bins=5, min=1, max=5)
place = paddle.CPUPlace() place = paddle.CPUPlace()
exe = paddle.Executor(place) exe = paddle.static.Executor(place)
exe.run(startup_program) exe.run(startup_program)
img = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int32) img = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int32)
res = exe.run(train_program, res = exe.run(train_program,
...@@ -800,11 +800,12 @@ def histogram(input, bins=100, min=0, max=0): ...@@ -800,11 +800,12 @@ def histogram(input, bins=100, min=0, max=0):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np import numpy as np
with paddle.imperative.guard(paddle.CPUPlace()): paddle.disable_static(paddle.CPUPlace())
inputs_np = np.array([1, 2, 1]).astype(np.float) inputs_np = np.array([1, 2, 1]).astype(np.float)
inputs = paddle.imperative.to_variable(inputs_np) inputs = paddle.to_variable(inputs_np)
result = paddle.histogram(inputs, bins=4, min=0, max=3) result = paddle.histogram(inputs, bins=4, min=0, max=3)
print(result) # [0, 2, 1, 0] print(result) # [0, 2, 1, 0]
paddle.enable_static()
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.histogram(input, "bins", bins, "min", min, "max", max) return core.ops.histogram(input, "bins", bins, "min", min, "max", max)
......
...@@ -71,12 +71,11 @@ def equal_all(x, y, name=None): ...@@ -71,12 +71,11 @@ def equal_all(x, y, name=None):
import numpy as np import numpy as np
import paddle import paddle
import paddle.imperative as imperative
paddle.enable_imperative() paddle.disable_static()
x = imperative.to_variable(np.array([1, 2, 3])) x = paddle.to_variable(np.array([1, 2, 3]))
y = imperative.to_variable(np.array([1, 2, 3])) y = paddle.to_variable(np.array([1, 2, 3]))
z = imperative.to_variable(np.array([1, 4, 3])) z = paddle.to_variable(np.array([1, 4, 3]))
result1 = paddle.equal_all(x, y) result1 = paddle.equal_all(x, y)
print(result1.numpy()) # result1 = [True ] print(result1.numpy()) # result1 = [True ]
result2 = paddle.equal_all(x, z) result2 = paddle.equal_all(x, z)
...@@ -201,11 +200,10 @@ def equal(x, y, name=None): ...@@ -201,11 +200,10 @@ def equal(x, y, name=None):
import numpy as np import numpy as np
import paddle import paddle
import paddle.imperative as imperative
paddle.enable_imperative() paddle.disable_static()
x = imperative.to_variable(np.array([1, 2, 3])) x = paddle.to_variable(np.array([1, 2, 3]))
y = imperative.to_variable(np.array([1, 3, 2])) y = paddle.to_variable(np.array([1, 3, 2]))
result1 = paddle.equal(x, y) result1 = paddle.equal(x, y)
print(result1.numpy()) # result1 = [True False False] print(result1.numpy()) # result1 = [True False False]
""" """
...@@ -234,11 +232,10 @@ def greater_equal(x, y, name=None): ...@@ -234,11 +232,10 @@ def greater_equal(x, y, name=None):
.. code-block:: python .. code-block:: python
import numpy as np import numpy as np
import paddle import paddle
import paddle.imperative as imperative
paddle.enable_imperative() paddle.disable_static()
x = imperative.to_variable(np.array([1, 2, 3])) x = paddle.to_variable(np.array([1, 2, 3]))
y = imperative.to_variable(np.array([1, 3, 2])) y = paddle.to_variable(np.array([1, 3, 2]))
result1 = paddle.greater_equal(x, y) result1 = paddle.greater_equal(x, y)
print(result1.numpy()) # result1 = [True False True] print(result1.numpy()) # result1 = [True False True]
""" """
...@@ -267,11 +264,10 @@ def greater_than(x, y, name=None): ...@@ -267,11 +264,10 @@ def greater_than(x, y, name=None):
.. code-block:: python .. code-block:: python
import numpy as np import numpy as np
import paddle import paddle
import paddle.imperative as imperative
paddle.enable_imperative() paddle.disable_static()
x = imperative.to_variable(np.array([1, 2, 3])) x = paddle.to_variable(np.array([1, 2, 3]))
y = imperative.to_variable(np.array([1, 3, 2])) y = paddle.to_variable(np.array([1, 3, 2]))
result1 = paddle.greater_than(x, y) result1 = paddle.greater_than(x, y)
print(result1.numpy()) # result1 = [False False True] print(result1.numpy()) # result1 = [False False True]
""" """
...@@ -301,11 +297,10 @@ def less_equal(x, y, name=None): ...@@ -301,11 +297,10 @@ def less_equal(x, y, name=None):
.. code-block:: python .. code-block:: python
import numpy as np import numpy as np
import paddle import paddle
import paddle.imperative as imperative
paddle.enable_imperative() paddle.disable_static()
x = imperative.to_variable(np.array([1, 2, 3])) x = paddle.to_variable(np.array([1, 2, 3]))
y = imperative.to_variable(np.array([1, 3, 2])) y = paddle.to_variable(np.array([1, 3, 2]))
result1 = paddle.less_equal(x, y) result1 = paddle.less_equal(x, y)
print(result1.numpy()) # result1 = [True True False] print(result1.numpy()) # result1 = [True True False]
""" """
...@@ -335,11 +330,10 @@ def less_than(x, y, name=None): ...@@ -335,11 +330,10 @@ def less_than(x, y, name=None):
.. code-block:: python .. code-block:: python
import numpy as np import numpy as np
import paddle import paddle
import paddle.imperative as imperative
paddle.enable_imperative() paddle.disable_static()
x = imperative.to_variable(np.array([1, 2, 3])) x = paddle.to_variable(np.array([1, 2, 3]))
y = imperative.to_variable(np.array([1, 3, 2])) y = paddle.to_variable(np.array([1, 3, 2]))
result1 = paddle.less_than(x, y) result1 = paddle.less_than(x, y)
print(result1.numpy()) # result1 = [False True False] print(result1.numpy()) # result1 = [False True False]
""" """
...@@ -369,11 +363,10 @@ def not_equal(x, y, name=None): ...@@ -369,11 +363,10 @@ def not_equal(x, y, name=None):
.. code-block:: python .. code-block:: python
import numpy as np import numpy as np
import paddle import paddle
import paddle.imperative as imperative
paddle.enable_imperative() paddle.disable_static()
x = imperative.to_variable(np.array([1, 2, 3])) x = paddle.to_variable(np.array([1, 2, 3]))
y = imperative.to_variable(np.array([1, 3, 2])) y = paddle.to_variable(np.array([1, 3, 2]))
result1 = paddle.not_equal(x, y) result1 = paddle.not_equal(x, y)
print(result1.numpy()) # result1 = [False True True] print(result1.numpy()) # result1 = [False True True]
""" """
......
...@@ -103,16 +103,16 @@ def concat(x, axis=0, name=None): ...@@ -103,16 +103,16 @@ def concat(x, axis=0, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() # Now we are in imperative mode paddle.disable_static() # Now we are in imperative mode
in1 = np.array([[1, 2, 3], in1 = np.array([[1, 2, 3],
[4, 5, 6]]) [4, 5, 6]])
in2 = np.array([[11, 12, 13], in2 = np.array([[11, 12, 13],
[14, 15, 16]]) [14, 15, 16]])
in3 = np.array([[21, 22], in3 = np.array([[21, 22],
[23, 24]]) [23, 24]])
x1 = paddle.imperative.to_variable(in1) x1 = paddle.to_variable(in1)
x2 = paddle.imperative.to_variable(in2) x2 = paddle.to_variable(in2)
x3 = paddle.imperative.to_variable(in3) x3 = paddle.to_variable(in3)
zero = paddle.full(shape=[1], dtype='int32', fill_value=0) zero = paddle.full(shape=[1], dtype='int32', fill_value=0)
# When the axis is negative, the real axis is (axis + Rank(x)) # When the axis is negative, the real axis is (axis + Rank(x))
# As follow, axis is -1, Rank(x) is 2, the real axis is 1 # As follow, axis is -1, Rank(x) is 2, the real axis is 1
...@@ -156,12 +156,12 @@ def flip(x, axis, name=None): ...@@ -156,12 +156,12 @@ def flip(x, axis, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
image_shape=(3, 2, 2) image_shape=(3, 2, 2)
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape) x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape)
x = x.astype('float32') x = x.astype('float32')
img = paddle.imperative.to_variable(x) img = paddle.to_variable(x)
out = paddle.flip(img, [0,1]) out = paddle.flip(img, [0,1])
print(out) # [[[10,11][8, 9]],[[6, 7],[4, 5]] [[2, 3],[0, 1]]] print(out) # [[[10,11][8, 9]],[[6, 7],[4, 5]] [[2, 3],[0, 1]]]
...@@ -247,13 +247,13 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None): ...@@ -247,13 +247,13 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
image_shape=(2, 3, 4, 4) image_shape=(2, 3, 4, 4)
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]).reshape(image_shape) / 100. x = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]).reshape(image_shape) / 100.
x = x.astype('float32') x = x.astype('float32')
img = paddle.imperative.to_variable(x) img = paddle.to_variable(x)
out = paddle.flatten(img, start_axis=1, stop_axis=2) out = paddle.flatten(img, start_axis=1, stop_axis=2)
# out shape is [2, 12, 4] # out shape is [2, 12, 4]
""" """
...@@ -325,8 +325,8 @@ def roll(x, shifts, axis=None, name=None): ...@@ -325,8 +325,8 @@ def roll(x, shifts, axis=None, name=None):
data = np.array([[1.0, 2.0, 3.0], data = np.array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0], [4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]]) [7.0, 8.0, 9.0]])
paddle.enable_imperative() paddle.disable_static()
x = paddle.imperative.to_variable(data) x = paddle.to_variable(data)
out_z1 = paddle.roll(x, shifts=1) out_z1 = paddle.roll(x, shifts=1)
print(out_z1.numpy()) print(out_z1.numpy())
#[[9. 1. 2.] #[[9. 1. 2.]
...@@ -503,10 +503,10 @@ def split(x, num_or_sections, axis=0, name=None): ...@@ -503,10 +503,10 @@ def split(x, num_or_sections, axis=0, name=None):
import numpy as np import numpy as np
import paddle import paddle
paddle.enable_imperative() paddle.disable_static()
# x is a Tensor which shape is [3, 9, 5] # x is a Tensor which shape is [3, 9, 5]
x_np = np.random.random([3, 9, 5]).astype("int32") x_np = np.random.random([3, 9, 5]).astype("int32")
x = paddle.imperative.to_variable(x_np) x = paddle.to_variable(x_np)
out0, out1, out22 = paddle.split(x, num_or_sections=3, axis=1) out0, out1, out22 = paddle.split(x, num_or_sections=3, axis=1)
# out0.shape [3, 3, 5] # out0.shape [3, 3, 5]
...@@ -595,7 +595,7 @@ def squeeze(x, axis=None, name=None): ...@@ -595,7 +595,7 @@ def squeeze(x, axis=None, name=None):
import paddle import paddle
paddle.enable_imperative() paddle.disable_static()
x = paddle.rand([5, 1, 10]) x = paddle.rand([5, 1, 10])
output = paddle.squeeze(x, axis=1) output = paddle.squeeze(x, axis=1)
......
...@@ -487,18 +487,18 @@ Examples: ...@@ -487,18 +487,18 @@ Examples:
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
x_data = np.array([[1, 2], [3, 4]], dtype=np.float32) x_data = np.array([[1, 2], [3, 4]], dtype=np.float32)
y_data = np.array([[5, 6], [7, 8]], dtype=np.float32) y_data = np.array([[5, 6], [7, 8]], dtype=np.float32)
x = paddle.imperative.to_variable(x_data) x = paddle.to_variable(x_data)
y = paddle.imperative.to_variable(y_data) y = paddle.to_variable(y_data)
res = paddle.multiply(x, y) res = paddle.multiply(x, y)
print(res.numpy()) # [[5, 12], [21, 32]] print(res.numpy()) # [[5, 12], [21, 32]]
x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32) x_data = np.array([[[1, 2, 3], [1, 2, 3]]], dtype=np.float32)
y_data = np.array([1, 2], dtype=np.float32) y_data = np.array([1, 2], dtype=np.float32)
x = paddle.imperative.to_variable(x_data) x = paddle.to_variable(x_data)
y = paddle.imperative.to_variable(y_data) y = paddle.to_variable(y_data)
res = paddle.multiply(x, y, axis=1) res = paddle.multiply(x, y, axis=1)
print(res.numpy()) # [[[1, 2, 3], [2, 4, 6]]] print(res.numpy()) # [[[1, 2, 3], [2, 4, 6]]]
...@@ -1431,11 +1431,11 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): ...@@ -1431,11 +1431,11 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
case2 = np.random.randn(3, 10, 10).astype('float32') case2 = np.random.randn(3, 10, 10).astype('float32')
case3 = np.random.randn(3, 10, 5, 10).astype('float32') case3 = np.random.randn(3, 10, 5, 10).astype('float32')
paddle.enable_imperative() paddle.disable_static()
case1 = paddle.imperative.to_variable(case1) case1 = paddle.to_variable(case1)
case2 = paddle.imperative.to_variable(case2) case2 = paddle.to_variable(case2)
case3 = paddle.imperative.to_variable(case3) case3 = paddle.to_variable(case3)
data1 = paddle.trace(case1) # data1.shape = [1] data1 = paddle.trace(case1) # data1.shape = [1]
data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3] data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3]
data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5] data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]
......
...@@ -81,7 +81,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): ...@@ -81,7 +81,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
# example 1: # example 1:
# attr shape is a list which doesn't contain Tensor. # attr shape is a list which doesn't contain Tensor.
...@@ -98,7 +98,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): ...@@ -98,7 +98,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
# example 3: # example 3:
# attr shape is a Tensor # attr shape is a Tensor
var_shape = paddle.imperative.to_variable(np.array([3])) var_shape = paddle.to_variable(np.array([3]))
result_3 = paddle.randint(low=-5, high=5, shape=var_shape) result_3 = paddle.randint(low=-5, high=5, shape=var_shape)
# [-2, 2, 3] # [-2, 2, 3]
...@@ -187,7 +187,7 @@ def randn(shape, dtype=None, name=None): ...@@ -187,7 +187,7 @@ def randn(shape, dtype=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
# example 1: attr shape is a list which doesn't contain Tensor. # example 1: attr shape is a list which doesn't contain Tensor.
result_1 = paddle.randn(shape=[2, 3]) result_1 = paddle.randn(shape=[2, 3])
...@@ -206,7 +206,7 @@ def randn(shape, dtype=None, name=None): ...@@ -206,7 +206,7 @@ def randn(shape, dtype=None, name=None):
# [ 0.8086993 , 0.6868893 ]]] # [ 0.8086993 , 0.6868893 ]]]
# example 3: attr shape is a Tensor, the data type must be int64 or int32. # example 3: attr shape is a Tensor, the data type must be int64 or int32.
var_shape = paddle.imperative.to_variable(np.array([2, 3])) var_shape = paddle.to_variable(np.array([2, 3]))
result_3 = paddle.randn(var_shape) result_3 = paddle.randn(var_shape)
# [[-2.878077 , 0.17099959, 0.05111201] # [[-2.878077 , 0.17099959, 0.05111201]
# [-0.3761474, -1.044801 , 1.1870178 ]] # [-0.3761474, -1.044801 , 1.1870178 ]]
...@@ -252,7 +252,7 @@ def randperm(n, dtype="int64", name=None): ...@@ -252,7 +252,7 @@ def randperm(n, dtype="int64", name=None):
import paddle import paddle
paddle.enable_imperative() paddle.disable_static()
result_1 = paddle.randperm(5) result_1 = paddle.randperm(5)
# [4, 1, 2, 3, 0] # [4, 1, 2, 3, 0]
...@@ -325,7 +325,7 @@ def rand(shape, dtype=None, name=None): ...@@ -325,7 +325,7 @@ def rand(shape, dtype=None, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
# example 1: attr shape is a list which doesn't contain Tensor. # example 1: attr shape is a list which doesn't contain Tensor.
result_1 = paddle.rand(shape=[2, 3]) result_1 = paddle.rand(shape=[2, 3])
# [[0.451152 , 0.55825245, 0.403311 ], # [[0.451152 , 0.55825245, 0.403311 ],
...@@ -343,7 +343,7 @@ def rand(shape, dtype=None, name=None): ...@@ -343,7 +343,7 @@ def rand(shape, dtype=None, name=None):
# [0.870881 , 0.2984597 ]]] # [0.870881 , 0.2984597 ]]]
# example 3: attr shape is a Tensor, the data type must be int64 or int32. # example 3: attr shape is a Tensor, the data type must be int64 or int32.
var_shape = paddle.imperative.to_variable(np.array([2, 3])) var_shape = paddle.to_variable(np.array([2, 3]))
result_3 = paddle.rand(var_shape) result_3 = paddle.rand(var_shape)
# [[0.22920267, 0.841956 , 0.05981819], # [[0.22920267, 0.841956 , 0.05981819],
# [0.4836288 , 0.24573246, 0.7516129 ]] # [0.4836288 , 0.24573246, 0.7516129 ]]
......
...@@ -68,17 +68,16 @@ def argsort(x, axis=-1, descending=False, name=None): ...@@ -68,17 +68,16 @@ def argsort(x, axis=-1, descending=False, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.imperative as imperative
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
input_array = np.array([[[5,8,9,5], input_array = np.array([[[5,8,9,5],
[0,0,1,7], [0,0,1,7],
[6,9,2,4]], [6,9,2,4]],
[[5,2,4,2], [[5,2,4,2],
[4,7,7,9], [4,7,7,9],
[1,7,0,6]]]).astype(np.float32) [1,7,0,6]]]).astype(np.float32)
x = imperative.to_variable(input_array) x = paddle.to_variable(input_array)
out1 = paddle.argsort(x=x, axis=-1) out1 = paddle.argsort(x=x, axis=-1)
out2 = paddle.argsort(x=x, axis=0) out2 = paddle.argsort(x=x, axis=0)
out3 = paddle.argsort(x=x, axis=1) out3 = paddle.argsort(x=x, axis=1)
...@@ -250,14 +249,14 @@ def index_select(x, index, axis=0, name=None): ...@@ -250,14 +249,14 @@ def index_select(x, index, axis=0, name=None):
import paddle import paddle
import numpy as np import numpy as np
paddle.enable_imperative() # Now we are in imperative mode paddle.disable_static() # Now we are in imperative mode
data = np.array([[1.0, 2.0, 3.0, 4.0], data = np.array([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]]) [9.0, 10.0, 11.0, 12.0]])
data_index = np.array([0, 1, 1]).astype('int32') data_index = np.array([0, 1, 1]).astype('int32')
x = paddle.imperative.to_variable(data) x = paddle.to_variable(data)
index = paddle.imperative.to_variable(data_index) index = paddle.to_variable(data_index)
out_z1 = paddle.index_select(x=x, index=index) out_z1 = paddle.index_select(x=x, index=index)
#[[1. 2. 3. 4.] #[[1. 2. 3. 4.]
# [5. 6. 7. 8.] # [5. 6. 7. 8.]
...@@ -399,17 +398,16 @@ def sort(x, axis=-1, descending=False, name=None): ...@@ -399,17 +398,16 @@ def sort(x, axis=-1, descending=False, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.imperative as imperative
import numpy as np import numpy as np
paddle.enable_imperative() paddle.disable_static()
input_array = np.array([[[5,8,9,5], input_array = np.array([[[5,8,9,5],
[0,0,1,7], [0,0,1,7],
[6,9,2,4]], [6,9,2,4]],
[[5,2,4,2], [[5,2,4,2],
[4,7,7,9], [4,7,7,9],
[1,7,0,6]]]).astype(np.float32) [1,7,0,6]]]).astype(np.float32)
x = imperative.to_variable(input_array) x = paddle.to_variable(input_array)
out1 = paddle.sort(x=x, axis=-1) out1 = paddle.sort(x=x, axis=-1)
out2 = paddle.sort(x=x, axis=0) out2 = paddle.sort(x=x, axis=0)
out3 = paddle.sort(x=x, axis=1) out3 = paddle.sort(x=x, axis=1)
......
...@@ -154,6 +154,7 @@ packages=['paddle', ...@@ -154,6 +154,7 @@ packages=['paddle',
'paddle.fleet.proto', 'paddle.fleet.proto',
'paddle.fleet.utils', 'paddle.fleet.utils',
'paddle.framework', 'paddle.framework',
'paddle.jit',
'paddle.fluid', 'paddle.fluid',
'paddle.fluid.dygraph', 'paddle.fluid.dygraph',
'paddle.fluid.dygraph.dygraph_to_static', 'paddle.fluid.dygraph.dygraph_to_static',
...@@ -200,8 +201,8 @@ packages=['paddle', ...@@ -200,8 +201,8 @@ packages=['paddle',
'paddle.nn.layer', 'paddle.nn.layer',
'paddle.nn.initializer', 'paddle.nn.initializer',
'paddle.metric', 'paddle.metric',
'paddle.imperative', 'paddle.static',
'paddle.imperative.jit', 'paddle.static.nn',
'paddle.tensor', 'paddle.tensor',
] ]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册