未验证 提交 162b4d6c 编写于 作者: Z Zhou Wei 提交者: GitHub

remove to_variable from 2.0 (#27528)

上级 9b124014
...@@ -230,7 +230,6 @@ from .framework import CPUPlace #DEFINE_ALIAS ...@@ -230,7 +230,6 @@ from .framework import CPUPlace #DEFINE_ALIAS
from .framework import CUDAPlace #DEFINE_ALIAS from .framework import CUDAPlace #DEFINE_ALIAS
from .framework import CUDAPinnedPlace #DEFINE_ALIAS from .framework import CUDAPinnedPlace #DEFINE_ALIAS
from .framework import to_variable #DEFINE_ALIAS
from .framework import grad #DEFINE_ALIAS from .framework import grad #DEFINE_ALIAS
from .framework import no_grad #DEFINE_ALIAS from .framework import no_grad #DEFINE_ALIAS
from .framework import save #DEFINE_ALIAS from .framework import save #DEFINE_ALIAS
......
...@@ -3230,14 +3230,11 @@ class Flatten(layers.Layer): ...@@ -3230,14 +3230,11 @@ class Flatten(layers.Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle import to_variable
import numpy as np import numpy as np
paddle.disable_static()
inp_np = np.ones([5, 2, 3, 4]).astype('float32') inp_np = np.ones([5, 2, 3, 4]).astype('float32')
inp_np = paddle.to_tensor(inp_np)
paddle.disable_static()
inp_np = to_variable(inp_np)
flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2) flatten = paddle.nn.Flatten(start_axis=1, stop_axis=2)
flatten_res = flatten(inp_np) flatten_res = flatten(inp_np)
......
...@@ -228,7 +228,7 @@ class TestTanhAPI(unittest.TestCase): ...@@ -228,7 +228,7 @@ class TestTanhAPI(unittest.TestCase):
def test_dygraph_api(self): def test_dygraph_api(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
x = paddle.to_variable(self.x_np) x = paddle.to_tensor(self.x_np)
out1 = F.tanh(x) out1 = F.tanh(x)
out2 = paddle.tanh(x) out2 = paddle.tanh(x)
th = paddle.nn.Tanh() th = paddle.nn.Tanh()
...@@ -573,7 +573,7 @@ class TestHardShrinkAPI(unittest.TestCase): ...@@ -573,7 +573,7 @@ class TestHardShrinkAPI(unittest.TestCase):
def test_dygraph_api(self): def test_dygraph_api(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
x = paddle.to_variable(self.x_np) x = paddle.to_tensor(self.x_np)
out1 = F.hardshrink(x) out1 = F.hardshrink(x)
hd = paddle.nn.Hardshrink() hd = paddle.nn.Hardshrink()
out2 = hd(x) out2 = hd(x)
...@@ -639,7 +639,7 @@ class TestHardtanhAPI(unittest.TestCase): ...@@ -639,7 +639,7 @@ class TestHardtanhAPI(unittest.TestCase):
def test_dygraph_api(self): def test_dygraph_api(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
x = paddle.to_variable(self.x_np) x = paddle.to_tensor(self.x_np)
out1 = F.hardtanh(x) out1 = F.hardtanh(x)
m = paddle.nn.Hardtanh() m = paddle.nn.Hardtanh()
out2 = m(x) out2 = m(x)
...@@ -1063,7 +1063,7 @@ class TestLeakyReluAPI(unittest.TestCase): ...@@ -1063,7 +1063,7 @@ class TestLeakyReluAPI(unittest.TestCase):
def test_dygraph_api(self): def test_dygraph_api(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
x = paddle.to_variable(self.x_np) x = paddle.to_tensor(self.x_np)
out1 = F.leaky_relu(x) out1 = F.leaky_relu(x)
m = paddle.nn.LeakyReLU() m = paddle.nn.LeakyReLU()
out2 = m(x) out2 = m(x)
......
...@@ -25,7 +25,7 @@ class TestAdamaxAPI(unittest.TestCase): ...@@ -25,7 +25,7 @@ class TestAdamaxAPI(unittest.TestCase):
def test_adamax_api_dygraph(self): def test_adamax_api_dygraph(self):
paddle.disable_static() paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32") value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_variable(value) a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5) linear = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adamax( adam = paddle.optimizer.Adamax(
learning_rate=0.01, learning_rate=0.01,
......
...@@ -22,7 +22,7 @@ class TestAdamWOp(unittest.TestCase): ...@@ -22,7 +22,7 @@ class TestAdamWOp(unittest.TestCase):
def test_adamw_op_dygraph(self): def test_adamw_op_dygraph(self):
paddle.disable_static() paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32") value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_variable(value) a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5) linear = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.AdamW( adam = paddle.optimizer.AdamW(
learning_rate=0.01, learning_rate=0.01,
...@@ -37,7 +37,7 @@ class TestAdamWOp(unittest.TestCase): ...@@ -37,7 +37,7 @@ class TestAdamWOp(unittest.TestCase):
def test_adamw_op_coverage(self): def test_adamw_op_coverage(self):
paddle.disable_static() paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32") value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_variable(value) a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5) linear = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.AdamW( adam = paddle.optimizer.AdamW(
learning_rate=0.0, learning_rate=0.0,
......
...@@ -147,7 +147,7 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase): ...@@ -147,7 +147,7 @@ class TestAdaptiveAvgPool2dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np) x = paddle.to_tensor(self.x_np)
out_1 = paddle.nn.functional.adaptive_avg_pool2d( out_1 = paddle.nn.functional.adaptive_avg_pool2d(
x=x, output_size=[3, 3]) x=x, output_size=[3, 3])
...@@ -245,7 +245,7 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase): ...@@ -245,7 +245,7 @@ class TestAdaptiveAvgPool2dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np) x = paddle.to_tensor(self.x_np)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3]) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3])
out_1 = adaptive_avg_pool(x=x) out_1 = adaptive_avg_pool(x=x)
......
...@@ -162,7 +162,7 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase): ...@@ -162,7 +162,7 @@ class TestAdaptiveAvgPool3dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np) x = paddle.to_tensor(self.x_np)
out_1 = paddle.nn.functional.adaptive_avg_pool3d( out_1 = paddle.nn.functional.adaptive_avg_pool3d(
x=x, output_size=[3, 3, 3]) x=x, output_size=[3, 3, 3])
...@@ -262,7 +262,7 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase): ...@@ -262,7 +262,7 @@ class TestAdaptiveAvgPool3dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np) x = paddle.to_tensor(self.x_np)
adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d( adaptive_avg_pool = paddle.nn.AdaptiveAvgPool3d(
output_size=[3, 3, 3]) output_size=[3, 3, 3])
......
...@@ -147,7 +147,7 @@ class TestAdaptiveMaxPool2dAPI(unittest.TestCase): ...@@ -147,7 +147,7 @@ class TestAdaptiveMaxPool2dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np) x = paddle.to_tensor(self.x_np)
out_1 = paddle.nn.functional.adaptive_max_pool2d( out_1 = paddle.nn.functional.adaptive_max_pool2d(
x=x, return_indices=False, output_size=[3, 3]) x=x, return_indices=False, output_size=[3, 3])
...@@ -240,7 +240,7 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase): ...@@ -240,7 +240,7 @@ class TestAdaptiveMaxPool2dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np) x = paddle.to_tensor(self.x_np)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[3, 3]) adaptive_max_pool = paddle.nn.AdaptiveMaxPool2d(output_size=[3, 3])
out_1 = adaptive_max_pool(x=x) out_1 = adaptive_max_pool(x=x)
......
...@@ -162,7 +162,7 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase): ...@@ -162,7 +162,7 @@ class TestAdaptiveMaxPool3dAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np) x = paddle.to_tensor(self.x_np)
out_1 = paddle.nn.functional.adaptive_max_pool3d( out_1 = paddle.nn.functional.adaptive_max_pool3d(
x=x, output_size=[3, 3, 3]) x=x, output_size=[3, 3, 3])
...@@ -257,7 +257,7 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase): ...@@ -257,7 +257,7 @@ class TestAdaptiveMaxPool3dClassAPI(unittest.TestCase):
if core.is_compiled_with_cuda() else [False]): if core.is_compiled_with_cuda() else [False]):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.disable_static(place=place) paddle.disable_static(place=place)
x = paddle.to_variable(self.x_np) x = paddle.to_tensor(self.x_np)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d( adaptive_max_pool = paddle.nn.AdaptiveMaxPool3d(
output_size=[3, 3, 3]) output_size=[3, 3, 3])
......
...@@ -244,9 +244,9 @@ class TestAddMMAPI(unittest.TestCase): ...@@ -244,9 +244,9 @@ class TestAddMMAPI(unittest.TestCase):
def test_error1(): def test_error1():
data_x_wrong = np.ones((2, 3)).astype(np.float32) data_x_wrong = np.ones((2, 3)).astype(np.float32)
x = paddle.to_variable(data_x_wrong) x = paddle.to_tensor(data_x_wrong)
y = paddle.to_variable(data_y) y = paddle.to_tensor(data_y)
input = paddle.to_variable(data_input) input = paddle.to_tensor(data_input)
out = paddle.tensor.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 ) out = paddle.tensor.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 )
self.assertRaises(ValueError, test_error1) self.assertRaises(ValueError, test_error1)
''' '''
......
...@@ -98,9 +98,9 @@ class TestArangeImperative(unittest.TestCase): ...@@ -98,9 +98,9 @@ class TestArangeImperative(unittest.TestCase):
x2 = paddle.tensor.arange(5) x2 = paddle.tensor.arange(5)
x3 = paddle.tensor.creation.arange(5) x3 = paddle.tensor.creation.arange(5)
start = paddle.to_variable(np.array([0], 'float32')) start = paddle.to_tensor(np.array([0], 'float32'))
end = paddle.to_variable(np.array([5], 'float32')) end = paddle.to_tensor(np.array([5], 'float32'))
step = paddle.to_variable(np.array([1], 'float32')) step = paddle.to_tensor(np.array([1], 'float32'))
x4 = paddle.arange(start, end, step, 'int64') x4 = paddle.arange(start, end, step, 'int64')
paddle.enable_static() paddle.enable_static()
......
...@@ -96,7 +96,7 @@ class TestDygraph(unittest.TestCase): ...@@ -96,7 +96,7 @@ class TestDygraph(unittest.TestCase):
a = np.random.rand(3, 3) a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0]) a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03 x_data = np.matmul(a, a_t) + 1e-03
x = paddle.to_variable(x_data) x = paddle.to_tensor(x_data)
out = paddle.cholesky(x, upper=False) out = paddle.cholesky(x, upper=False)
......
...@@ -168,9 +168,9 @@ class TestClipAPI(unittest.TestCase): ...@@ -168,9 +168,9 @@ class TestClipAPI(unittest.TestCase):
paddle.disable_static(place) paddle.disable_static(place)
data_shape = [1, 9, 9, 4] data_shape = [1, 9, 9, 4]
data = np.random.random(data_shape).astype('float32') data = np.random.random(data_shape).astype('float32')
images = paddle.to_variable(data, dtype='float32') images = paddle.to_tensor(data, dtype='float32')
v_min = paddle.to_variable(np.array([0.2], dtype=np.float32)) v_min = paddle.to_tensor(np.array([0.2], dtype=np.float32))
v_max = paddle.to_variable(np.array([0.8], dtype=np.float32)) v_max = paddle.to_tensor(np.array([0.8], dtype=np.float32))
out_1 = paddle.clip(images, min=0.2, max=0.8) out_1 = paddle.clip(images, min=0.2, max=0.8)
out_2 = paddle.clip(images, min=0.2, max=0.9) out_2 = paddle.clip(images, min=0.2, max=0.9)
......
...@@ -285,9 +285,9 @@ class TestConcatAPI(unittest.TestCase): ...@@ -285,9 +285,9 @@ class TestConcatAPI(unittest.TestCase):
in2 = np.array([[11, 12, 13], [14, 15, 16]]) in2 = np.array([[11, 12, 13], [14, 15, 16]])
in3 = np.array([[21, 22], [23, 24]]) in3 = np.array([[21, 22], [23, 24]])
paddle.disable_static() paddle.disable_static()
x1 = paddle.to_variable(in1) x1 = paddle.to_tensor(in1)
x2 = paddle.to_variable(in2) x2 = paddle.to_tensor(in2)
x3 = paddle.to_variable(in3) x3 = paddle.to_tensor(in3)
out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1) out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
out2 = paddle.concat(x=[x1, x2], axis=0) out2 = paddle.concat(x=[x1, x2], axis=0)
np_out1 = np.concatenate([in1, in2, in3], axis=-1) np_out1 = np.concatenate([in1, in2, in3], axis=-1)
......
...@@ -75,8 +75,8 @@ class TestCosineSimilarityAPI(unittest.TestCase): ...@@ -75,8 +75,8 @@ class TestCosineSimilarityAPI(unittest.TestCase):
np_x2 = np.random.rand(*shape).astype(np.float32) np_x2 = np.random.rand(*shape).astype(np.float32)
np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps) np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps)
tesnor_x1 = paddle.to_variable(np_x1) tesnor_x1 = paddle.to_tensor(np_x1)
tesnor_x2 = paddle.to_variable(np_x2) tesnor_x2 = paddle.to_tensor(np_x2)
y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps) y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps)
self.assertTrue(np.allclose(y.numpy(), np_out)) self.assertTrue(np.allclose(y.numpy(), np_out))
...@@ -92,8 +92,8 @@ class TestCosineSimilarityAPI(unittest.TestCase): ...@@ -92,8 +92,8 @@ class TestCosineSimilarityAPI(unittest.TestCase):
np_x2 = np.random.rand(*shape).astype(np.float32) np_x2 = np.random.rand(*shape).astype(np.float32)
np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps) np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps)
tesnor_x1 = paddle.to_variable(np_x1) tesnor_x1 = paddle.to_tensor(np_x1)
tesnor_x2 = paddle.to_variable(np_x2) tesnor_x2 = paddle.to_tensor(np_x2)
y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps) y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps)
self.assertTrue(np.allclose(y.numpy(), np_out)) self.assertTrue(np.allclose(y.numpy(), np_out))
...@@ -110,8 +110,8 @@ class TestCosineSimilarityAPI(unittest.TestCase): ...@@ -110,8 +110,8 @@ class TestCosineSimilarityAPI(unittest.TestCase):
np_x2 = np.random.rand(*shape2).astype(np.float32) np_x2 = np.random.rand(*shape2).astype(np.float32)
np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps) np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps)
tesnor_x1 = paddle.to_variable(np_x1) tesnor_x1 = paddle.to_tensor(np_x1)
tesnor_x2 = paddle.to_variable(np_x2) tesnor_x2 = paddle.to_tensor(np_x2)
y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps) y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps)
self.assertTrue(np.allclose(y.numpy(), np_out)) self.assertTrue(np.allclose(y.numpy(), np_out))
...@@ -129,8 +129,8 @@ class TestCosineSimilarityAPI(unittest.TestCase): ...@@ -129,8 +129,8 @@ class TestCosineSimilarityAPI(unittest.TestCase):
np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps) np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps)
cos_sim_func = nn.CosineSimilarity(axis=axis, eps=eps) cos_sim_func = nn.CosineSimilarity(axis=axis, eps=eps)
tesnor_x1 = paddle.to_variable(np_x1) tesnor_x1 = paddle.to_tensor(np_x1)
tesnor_x2 = paddle.to_variable(np_x2) tesnor_x2 = paddle.to_tensor(np_x2)
y = cos_sim_func(tesnor_x1, tesnor_x2) y = cos_sim_func(tesnor_x1, tesnor_x2)
self.assertTrue(np.allclose(y.numpy(), np_out)) self.assertTrue(np.allclose(y.numpy(), np_out))
......
...@@ -21,13 +21,12 @@ import paddle ...@@ -21,13 +21,12 @@ import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
from paddle import to_variable
class TestCumsumOp(unittest.TestCase): class TestCumsumOp(unittest.TestCase):
def run_cases(self): def run_cases(self):
data_np = np.arange(12).reshape(3, 4) data_np = np.arange(12).reshape(3, 4)
data = to_variable(data_np) data = paddle.to_tensor(data_np)
y = paddle.cumsum(data) y = paddle.cumsum(data)
z = np.cumsum(data_np) z = np.cumsum(data_np)
......
...@@ -20,7 +20,6 @@ import paddle ...@@ -20,7 +20,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear from paddle.fluid.dygraph import Linear
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle import to_variable
class TestDefaultType(unittest.TestCase): class TestDefaultType(unittest.TestCase):
......
...@@ -36,7 +36,7 @@ class TestDirectory(unittest.TestCase): ...@@ -36,7 +36,7 @@ class TestDirectory(unittest.TestCase):
def test_new_directory(self): def test_new_directory(self):
new_directory = [ new_directory = [
'paddle.enable_static', 'paddle.disable_static', 'paddle.enable_static', 'paddle.disable_static',
'paddle.in_dynamic_mode', 'paddle.to_variable', 'paddle.grad', 'paddle.in_dynamic_mode', 'paddle.to_tensor', 'paddle.grad',
'paddle.no_grad', 'paddle.save', 'paddle.load', 'paddle.no_grad', 'paddle.save', 'paddle.load',
'paddle.static.save', 'paddle.static.load', 'paddle.static.save', 'paddle.static.load',
'paddle.distributed.ParallelEnv', 'paddle.distributed.ParallelEnv',
......
...@@ -195,7 +195,7 @@ class TestFlattenPython(unittest.TestCase): ...@@ -195,7 +195,7 @@ class TestFlattenPython(unittest.TestCase):
def test_Negative(): def test_Negative():
paddle.disable_static() paddle.disable_static()
img = paddle.to_variable(x) img = paddle.to_tensor(x)
out = paddle.flatten(img, start_axis=-2, stop_axis=-1) out = paddle.flatten(img, start_axis=-2, stop_axis=-1)
return out.numpy().shape return out.numpy().shape
......
...@@ -211,7 +211,7 @@ class TestImperative(unittest.TestCase): ...@@ -211,7 +211,7 @@ class TestImperative(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
self.assertTrue(paddle.in_dynamic_mode()) self.assertTrue(paddle.in_dynamic_mode())
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
var_inp = paddle.to_variable(np_inp) var_inp = paddle.to_tensor(np_inp)
mlp = MLP(input_size=2) mlp = MLP(input_size=2)
out = mlp(var_inp) out = mlp(var_inp)
dy_out1 = out.numpy() dy_out1 = out.numpy()
...@@ -221,7 +221,7 @@ class TestImperative(unittest.TestCase): ...@@ -221,7 +221,7 @@ class TestImperative(unittest.TestCase):
self.assertFalse(paddle.in_dynamic_mode()) self.assertFalse(paddle.in_dynamic_mode())
paddle.disable_static() paddle.disable_static()
self.assertTrue(paddle.in_dynamic_mode()) self.assertTrue(paddle.in_dynamic_mode())
var_inp = paddle.to_variable(np_inp) var_inp = paddle.to_tensor(np_inp)
mlp = MLP(input_size=2) mlp = MLP(input_size=2)
out = mlp(var_inp) out = mlp(var_inp)
dy_out2 = out.numpy() dy_out2 = out.numpy()
......
...@@ -54,7 +54,7 @@ class TestSimpleNet(unittest.TestCase): ...@@ -54,7 +54,7 @@ class TestSimpleNet(unittest.TestCase):
# grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0) # grad_clip = fluid.clip.GradientClipByGlobalNorm(5.0)
input_word = np.array([[1, 2], [2, 1]]).astype('int64') input_word = np.array([[1, 2], [2, 1]]).astype('int64')
input = paddle.to_variable(input_word) input = paddle.to_tensor(input_word)
simplenet = SimpleNet(20, 32, dtype) simplenet = SimpleNet(20, 32, dtype)
adam = SGDOptimizer( adam = SGDOptimizer(
......
...@@ -41,7 +41,7 @@ def run_dygraph(x_np, op_str, use_gpu=True): ...@@ -41,7 +41,7 @@ def run_dygraph(x_np, op_str, use_gpu=True):
if use_gpu and fluid.core.is_compiled_with_cuda(): if use_gpu and fluid.core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0) place = paddle.CUDAPlace(0)
paddle.disable_static(place) paddle.disable_static(place)
x = paddle.to_variable(x_np) x = paddle.to_tensor(x_np)
dygraph_result = getattr(paddle.tensor, op_str)(x) dygraph_result = getattr(paddle.tensor, op_str)(x)
return dygraph_result return dygraph_result
......
...@@ -543,9 +543,9 @@ class TestJitSaveMultiCases(unittest.TestCase): ...@@ -543,9 +543,9 @@ class TestJitSaveMultiCases(unittest.TestCase):
loaded_layer = paddle.jit.load(model_path) loaded_layer = paddle.jit.load(model_path)
loaded_layer.eval() loaded_layer.eval()
# inference & compare # inference & compare
x = paddle.to_variable(np.random.random((1, 784)).astype('float32')) x = paddle.to_tensor(np.random.random((1, 784)).astype('float32'))
if with_label: if with_label:
y = paddle.to_variable(np.random.random((1, 1)).astype('int64')) y = paddle.to_tensor(np.random.random((1, 1)).astype('int64'))
pred, _ = layer(x, y) pred, _ = layer(x, y)
pred = pred.numpy() pred = pred.numpy()
else: else:
...@@ -677,7 +677,7 @@ class TestJitSaveMultiCases(unittest.TestCase): ...@@ -677,7 +677,7 @@ class TestJitSaveMultiCases(unittest.TestCase):
model_path = "test_not_prune_output_spec_name_warning" model_path = "test_not_prune_output_spec_name_warning"
configs = paddle.SaveLoadConfig() configs = paddle.SaveLoadConfig()
out = paddle.to_variable(np.random.random((1, 1)).astype('float')) out = paddle.to_tensor(np.random.random((1, 1)).astype('float'))
configs.output_spec = [out] configs.output_spec = [out]
paddle.jit.save(layer, model_path, configs=configs) paddle.jit.save(layer, model_path, configs=configs)
...@@ -709,7 +709,7 @@ class TestJitSaveMultiCases(unittest.TestCase): ...@@ -709,7 +709,7 @@ class TestJitSaveMultiCases(unittest.TestCase):
model_path = "test_prune_to_static_after_train" model_path = "test_prune_to_static_after_train"
configs = paddle.SaveLoadConfig() configs = paddle.SaveLoadConfig()
out = paddle.to_variable(np.random.random((1, 1)).astype('float')) out = paddle.to_tensor(np.random.random((1, 1)).astype('float'))
configs.output_spec = [out] configs.output_spec = [out]
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
paddle.jit.save( paddle.jit.save(
...@@ -730,7 +730,7 @@ class TestJitSaveLoadEmptyLayer(unittest.TestCase): ...@@ -730,7 +730,7 @@ class TestJitSaveLoadEmptyLayer(unittest.TestCase):
def test_save_load_empty_layer(self): def test_save_load_empty_layer(self):
layer = EmptyLayer() layer = EmptyLayer()
x = paddle.to_variable(np.random.random((10)).astype('float32')) x = paddle.to_tensor(np.random.random((10)).astype('float32'))
out = layer(x) out = layer(x)
paddle.jit.save(layer, self.model_path) paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path) load_layer = paddle.jit.load(self.model_path)
...@@ -746,8 +746,8 @@ class TestJitSaveLoadNoParamLayer(unittest.TestCase): ...@@ -746,8 +746,8 @@ class TestJitSaveLoadNoParamLayer(unittest.TestCase):
def test_save_load_no_param_layer(self): def test_save_load_no_param_layer(self):
layer = NoParamLayer() layer = NoParamLayer()
x = paddle.to_variable(np.random.random((5)).astype('float32')) x = paddle.to_tensor(np.random.random((5)).astype('float32'))
y = paddle.to_variable(np.random.random((5)).astype('float32')) y = paddle.to_tensor(np.random.random((5)).astype('float32'))
out = layer(x, y) out = layer(x, y)
paddle.jit.save(layer, self.model_path) paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path) load_layer = paddle.jit.load(self.model_path)
......
...@@ -90,7 +90,7 @@ class TestKLDivLossDygraph(unittest.TestCase): ...@@ -90,7 +90,7 @@ class TestKLDivLossDygraph(unittest.TestCase):
with paddle.fluid.dygraph.guard(): with paddle.fluid.dygraph.guard():
kldiv_criterion = paddle.nn.KLDivLoss(reduction) kldiv_criterion = paddle.nn.KLDivLoss(reduction)
pred_loss = kldiv_criterion( pred_loss = kldiv_criterion(
paddle.to_variable(x), paddle.to_variable(target)) paddle.to_tensor(x), paddle.to_tensor(target))
self.assertTrue(np.allclose(pred_loss.numpy(), gt_loss)) self.assertTrue(np.allclose(pred_loss.numpy(), gt_loss))
def test_kl_loss_batchmean(self): def test_kl_loss_batchmean(self):
......
...@@ -26,8 +26,8 @@ class TestFunctionalL1Loss(unittest.TestCase): ...@@ -26,8 +26,8 @@ class TestFunctionalL1Loss(unittest.TestCase):
self.label_np = np.random.random(size=(10, 10, 5)).astype(np.float32) self.label_np = np.random.random(size=(10, 10, 5)).astype(np.float32)
def run_imperative(self): def run_imperative(self):
input = paddle.to_variable(self.input_np) input = paddle.to_tensor(self.input_np)
label = paddle.to_variable(self.label_np) label = paddle.to_tensor(self.label_np)
dy_result = paddle.nn.functional.l1_loss(input, label) dy_result = paddle.nn.functional.l1_loss(input, label)
expected = np.mean(np.abs(self.input_np - self.label_np)) expected = np.mean(np.abs(self.input_np - self.label_np))
self.assertTrue(np.allclose(dy_result.numpy(), expected)) self.assertTrue(np.allclose(dy_result.numpy(), expected))
...@@ -106,8 +106,8 @@ class TestClassL1Loss(unittest.TestCase): ...@@ -106,8 +106,8 @@ class TestClassL1Loss(unittest.TestCase):
self.label_np = np.random.random(size=(10, 10, 5)).astype(np.float32) self.label_np = np.random.random(size=(10, 10, 5)).astype(np.float32)
def run_imperative(self): def run_imperative(self):
input = paddle.to_variable(self.input_np) input = paddle.to_tensor(self.input_np)
label = paddle.to_variable(self.label_np) label = paddle.to_tensor(self.label_np)
l1_loss = paddle.nn.loss.L1Loss() l1_loss = paddle.nn.loss.L1Loss()
dy_result = l1_loss(input, label) dy_result = l1_loss(input, label)
expected = np.mean(np.abs(self.input_np - self.label_np)) expected = np.mean(np.abs(self.input_np - self.label_np))
......
...@@ -96,7 +96,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase): ...@@ -96,7 +96,7 @@ class TestNNLogSoftmaxAPI(unittest.TestCase):
# test dygrapg api # test dygrapg api
paddle.disable_static() paddle.disable_static()
x = paddle.to_variable(self.x) x = paddle.to_tensor(self.x)
y = logsoftmax(x) y = logsoftmax(x)
self.assertTrue(np.allclose(y.numpy(), ref_out)) self.assertTrue(np.allclose(y.numpy(), ref_out))
paddle.enable_static() paddle.enable_static()
...@@ -127,7 +127,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): ...@@ -127,7 +127,7 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
self.assertTrue(np.allclose(out[0], ref_out)) self.assertTrue(np.allclose(out[0], ref_out))
paddle.disable_static() paddle.disable_static()
x = paddle.to_variable(self.x) x = paddle.to_tensor(self.x)
y = F.log_softmax(x, axis, dtype) y = F.log_softmax(x, axis, dtype)
self.assertTrue(np.allclose(y.numpy(), ref_out), True) self.assertTrue(np.allclose(y.numpy(), ref_out), True)
paddle.enable_static() paddle.enable_static()
......
...@@ -111,7 +111,7 @@ class TestLogsumexpAPI(unittest.TestCase): ...@@ -111,7 +111,7 @@ class TestLogsumexpAPI(unittest.TestCase):
self.assertTrue(np.allclose(res[0], out_ref)) self.assertTrue(np.allclose(res[0], out_ref))
paddle.disable_static(self.place) paddle.disable_static(self.place)
x = paddle.to_variable(self.x) x = paddle.to_tensor(self.x)
out = paddle.logsumexp(x, axis, keepdim) out = paddle.logsumexp(x, axis, keepdim)
self.assertTrue(np.allclose(out.numpy(), out_ref)) self.assertTrue(np.allclose(out.numpy(), out_ref))
paddle.enable_static() paddle.enable_static()
...@@ -126,7 +126,7 @@ class TestLogsumexpAPI(unittest.TestCase): ...@@ -126,7 +126,7 @@ class TestLogsumexpAPI(unittest.TestCase):
def test_alias(self): def test_alias(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
x = paddle.to_variable(self.x) x = paddle.to_tensor(self.x)
out1 = paddle.logsumexp(x) out1 = paddle.logsumexp(x)
out2 = paddle.tensor.logsumexp(x) out2 = paddle.tensor.logsumexp(x)
out3 = paddle.tensor.math.logsumexp(x) out3 = paddle.tensor.math.logsumexp(x)
......
...@@ -80,7 +80,7 @@ class ApiMaxTest(unittest.TestCase): ...@@ -80,7 +80,7 @@ class ApiMaxTest(unittest.TestCase):
def test_imperative_api(self): def test_imperative_api(self):
paddle.disable_static() paddle.disable_static()
np_x = np.array([10, 10]).astype('float64') np_x = np.array([10, 10]).astype('float64')
x = paddle.to_variable(np_x) x = paddle.to_tensor(np_x)
z = paddle.max(x, axis=0) z = paddle.max(x, axis=0)
np_z = z.numpy() np_z = z.numpy()
z_expected = np.array(np.max(np_x, axis=0)) z_expected = np.array(np.max(np_x, axis=0))
......
...@@ -61,8 +61,8 @@ class ApiMaximumTest(unittest.TestCase): ...@@ -61,8 +61,8 @@ class ApiMaximumTest(unittest.TestCase):
def test_dynamic_api(self): def test_dynamic_api(self):
paddle.disable_static() paddle.disable_static()
np_x = np.array([10, 10]).astype('float64') np_x = np.array([10, 10]).astype('float64')
x = paddle.to_variable(self.input_x) x = paddle.to_tensor(self.input_x)
y = paddle.to_variable(self.input_y) y = paddle.to_tensor(self.input_y)
z = paddle.maximum(x, y) z = paddle.maximum(x, y)
np_z = z.numpy() np_z = z.numpy()
z_expected = np.array(np.maximum(self.input_x, self.input_y)) z_expected = np.array(np.maximum(self.input_x, self.input_y))
...@@ -73,8 +73,8 @@ class ApiMaximumTest(unittest.TestCase): ...@@ -73,8 +73,8 @@ class ApiMaximumTest(unittest.TestCase):
np_x = np.random.rand(5, 4, 3, 2).astype("float64") np_x = np.random.rand(5, 4, 3, 2).astype("float64")
np_y = np.random.rand(4, 3).astype("float64") np_y = np.random.rand(4, 3).astype("float64")
x = paddle.to_variable(self.input_x) x = paddle.to_tensor(self.input_x)
y = paddle.to_variable(self.input_y) y = paddle.to_tensor(self.input_y)
result_1 = paddle.maximum(x, y, axis=1) result_1 = paddle.maximum(x, y, axis=1)
result_2 = paddle.maximum(x, y, axis=-2) result_2 = paddle.maximum(x, y, axis=-2)
self.assertEqual((result_1.numpy() == result_2.numpy()).all(), True) self.assertEqual((result_1.numpy() == result_2.numpy()).all(), True)
...@@ -204,7 +204,7 @@ class TestMeanAPI(unittest.TestCase): ...@@ -204,7 +204,7 @@ class TestMeanAPI(unittest.TestCase):
paddle.disable_static(self.place) paddle.disable_static(self.place)
def test_case(x, axis=None, keepdim=False): def test_case(x, axis=None, keepdim=False):
x_tensor = paddle.to_variable(x) x_tensor = paddle.to_tensor(x)
out = paddle.mean(x_tensor, axis, keepdim) out = paddle.mean(x_tensor, axis, keepdim)
if isinstance(axis, list): if isinstance(axis, list):
axis = tuple(axis) axis = tuple(axis)
......
...@@ -80,7 +80,7 @@ class ApiMinTest(unittest.TestCase): ...@@ -80,7 +80,7 @@ class ApiMinTest(unittest.TestCase):
def test_imperative_api(self): def test_imperative_api(self):
paddle.disable_static() paddle.disable_static()
np_x = np.array([10, 10]).astype('float64') np_x = np.array([10, 10]).astype('float64')
x = paddle.to_variable(np_x) x = paddle.to_tensor(np_x)
z = paddle.min(x, axis=0) z = paddle.min(x, axis=0)
np_z = z.numpy() np_z = z.numpy()
z_expected = np.array(np.min(np_x, axis=0)) z_expected = np.array(np.min(np_x, axis=0))
......
...@@ -63,7 +63,7 @@ class TestRandnOpForDygraph(unittest.TestCase): ...@@ -63,7 +63,7 @@ class TestRandnOpForDygraph(unittest.TestCase):
dim_2 = paddle.fill_constant([1], "int32", 50) dim_2 = paddle.fill_constant([1], "int32", 50)
x3 = paddle.randn(shape=[dim_1, dim_2, 784]) x3 = paddle.randn(shape=[dim_1, dim_2, 784])
var_shape = paddle.to_variable(np.array(shape)) var_shape = paddle.to_tensor(np.array(shape))
x4 = paddle.randn(var_shape) x4 = paddle.randn(var_shape)
for out in [x1, x2, x3, x4]: for out in [x1, x2, x3, x4]:
......
...@@ -105,8 +105,8 @@ class TestRetainGraph(unittest.TestCase): ...@@ -105,8 +105,8 @@ class TestRetainGraph(unittest.TestCase):
A = np.random.rand(2, 3, 32, 32).astype('float32') A = np.random.rand(2, 3, 32, 32).astype('float32')
B = np.random.rand(2, 3, 32, 32).astype('float32') B = np.random.rand(2, 3, 32, 32).astype('float32')
realA = paddle.to_variable(A) realA = paddle.to_tensor(A)
realB = paddle.to_variable(B) realB = paddle.to_tensor(B)
fakeB = g(realA) fakeB = g(realA)
optim_d.clear_gradients() optim_d.clear_gradients()
......
...@@ -487,24 +487,24 @@ class TestTransformer(unittest.TestCase): ...@@ -487,24 +487,24 @@ class TestTransformer(unittest.TestCase):
dropout=dropout, dropout=dropout,
weight_attr=[None], weight_attr=[None],
bias_attr=[False]) bias_attr=[False])
src = paddle.to_variable( src = paddle.to_tensor(
np.random.rand(batch_size, source_length, d_model).astype( np.random.rand(batch_size, source_length, d_model).astype(
"float32")) "float32"))
tgt = paddle.to_variable( tgt = paddle.to_tensor(
np.random.rand(batch_size, target_length, d_model).astype( np.random.rand(batch_size, target_length, d_model).astype(
"float32")) "float32"))
src_mask = np.zeros((batch_size, n_head, source_length, src_mask = np.zeros((batch_size, n_head, source_length,
source_length)).astype("float32") source_length)).astype("float32")
src_mask[0][0][0][0] = -np.inf src_mask[0][0][0][0] = -np.inf
src_mask = paddle.to_variable(src_mask) src_mask = paddle.to_tensor(src_mask)
tgt_mask = np.zeros((batch_size, n_head, target_length, tgt_mask = np.zeros((batch_size, n_head, target_length,
target_length)).astype("float32") target_length)).astype("float32")
tgt_mask[0][0][0][0] = -1e9 tgt_mask[0][0][0][0] = -1e9
memory_mask = np.zeros((batch_size, n_head, target_length, memory_mask = np.zeros((batch_size, n_head, target_length,
source_length)).astype("float32") source_length)).astype("float32")
memory_mask[0][0][0][0] = -1e9 memory_mask[0][0][0][0] = -1e9
tgt_mask, memory_mask = paddle.to_variable( tgt_mask, memory_mask = paddle.to_tensor(
tgt_mask), paddle.to_variable(memory_mask) tgt_mask), paddle.to_tensor(memory_mask)
trans_output = transformer(src, tgt, src_mask, tgt_mask, trans_output = transformer(src, tgt, src_mask, tgt_mask,
memory_mask) memory_mask)
...@@ -521,24 +521,24 @@ class TestTransformer(unittest.TestCase): ...@@ -521,24 +521,24 @@ class TestTransformer(unittest.TestCase):
dropout=dropout, dropout=dropout,
weight_attr=[None, None], weight_attr=[None, None],
bias_attr=[False, False]) bias_attr=[False, False])
src = paddle.to_variable( src = paddle.to_tensor(
np.random.rand(batch_size, source_length, d_model).astype( np.random.rand(batch_size, source_length, d_model).astype(
"float32")) "float32"))
tgt = paddle.to_variable( tgt = paddle.to_tensor(
np.random.rand(batch_size, target_length, d_model).astype( np.random.rand(batch_size, target_length, d_model).astype(
"float32")) "float32"))
src_mask = np.zeros((batch_size, n_head, source_length, src_mask = np.zeros((batch_size, n_head, source_length,
source_length)).astype("float32") source_length)).astype("float32")
src_mask[0][0][0][0] = -np.inf src_mask[0][0][0][0] = -np.inf
src_mask = paddle.to_variable(src_mask) src_mask = paddle.to_tensor(src_mask)
tgt_mask = np.zeros((batch_size, n_head, target_length, tgt_mask = np.zeros((batch_size, n_head, target_length,
target_length)).astype("float32") target_length)).astype("float32")
tgt_mask[0][0][0][0] = -1e9 tgt_mask[0][0][0][0] = -1e9
memory_mask = np.zeros((batch_size, n_head, target_length, memory_mask = np.zeros((batch_size, n_head, target_length,
source_length)).astype("float32") source_length)).astype("float32")
memory_mask[0][0][0][0] = -1e9 memory_mask[0][0][0][0] = -1e9
tgt_mask, memory_mask = paddle.to_variable( tgt_mask, memory_mask = paddle.to_tensor(
tgt_mask), paddle.to_variable(memory_mask) tgt_mask), paddle.to_tensor(memory_mask)
trans_output = transformer(src, tgt, src_mask, tgt_mask, trans_output = transformer(src, tgt, src_mask, tgt_mask,
memory_mask) memory_mask)
...@@ -555,24 +555,24 @@ class TestTransformer(unittest.TestCase): ...@@ -555,24 +555,24 @@ class TestTransformer(unittest.TestCase):
dropout=dropout, dropout=dropout,
weight_attr=[None, None, None], weight_attr=[None, None, None],
bias_attr=[False, False, True]) bias_attr=[False, False, True])
src = paddle.to_variable( src = paddle.to_tensor(
np.random.rand(batch_size, source_length, d_model).astype( np.random.rand(batch_size, source_length, d_model).astype(
"float32")) "float32"))
tgt = paddle.to_variable( tgt = paddle.to_tensor(
np.random.rand(batch_size, target_length, d_model).astype( np.random.rand(batch_size, target_length, d_model).astype(
"float32")) "float32"))
src_mask = np.zeros((batch_size, n_head, source_length, src_mask = np.zeros((batch_size, n_head, source_length,
source_length)).astype("float32") source_length)).astype("float32")
src_mask[0][0][0][0] = -np.inf src_mask[0][0][0][0] = -np.inf
src_mask = paddle.to_variable(src_mask) src_mask = paddle.to_tensor(src_mask)
tgt_mask = np.zeros((batch_size, n_head, target_length, tgt_mask = np.zeros((batch_size, n_head, target_length,
target_length)).astype("float32") target_length)).astype("float32")
tgt_mask[0][0][0][0] = -1e9 tgt_mask[0][0][0][0] = -1e9
memory_mask = np.zeros((batch_size, n_head, target_length, memory_mask = np.zeros((batch_size, n_head, target_length,
source_length)).astype("float32") source_length)).astype("float32")
memory_mask[0][0][0][0] = -1e9 memory_mask[0][0][0][0] = -1e9
tgt_mask, memory_mask = paddle.to_variable( tgt_mask, memory_mask = paddle.to_tensor(
tgt_mask), paddle.to_variable(memory_mask) tgt_mask), paddle.to_tensor(memory_mask)
trans_output = transformer(src, tgt, src_mask, tgt_mask, trans_output = transformer(src, tgt, src_mask, tgt_mask,
memory_mask) memory_mask)
...@@ -588,24 +588,24 @@ class TestTransformer(unittest.TestCase): ...@@ -588,24 +588,24 @@ class TestTransformer(unittest.TestCase):
dim_feedforward=dim_feedforward, dim_feedforward=dim_feedforward,
dropout=dropout, dropout=dropout,
bias_attr=False) bias_attr=False)
src = paddle.to_variable( src = paddle.to_tensor(
np.random.rand(batch_size, source_length, d_model).astype( np.random.rand(batch_size, source_length, d_model).astype(
"float32")) "float32"))
tgt = paddle.to_variable( tgt = paddle.to_tensor(
np.random.rand(batch_size, target_length, d_model).astype( np.random.rand(batch_size, target_length, d_model).astype(
"float32")) "float32"))
src_mask = np.zeros((batch_size, n_head, source_length, src_mask = np.zeros((batch_size, n_head, source_length,
source_length)).astype("float32") source_length)).astype("float32")
src_mask[0][0][0][0] = -np.inf src_mask[0][0][0][0] = -np.inf
src_mask = paddle.to_variable(src_mask) src_mask = paddle.to_tensor(src_mask)
tgt_mask = np.zeros((batch_size, n_head, target_length, tgt_mask = np.zeros((batch_size, n_head, target_length,
target_length)).astype("float32") target_length)).astype("float32")
tgt_mask[0][0][0][0] = -1e9 tgt_mask[0][0][0][0] = -1e9
memory_mask = np.zeros((batch_size, n_head, target_length, memory_mask = np.zeros((batch_size, n_head, target_length,
source_length)).astype("float32") source_length)).astype("float32")
memory_mask[0][0][0][0] = -1e9 memory_mask[0][0][0][0] = -1e9
tgt_mask, memory_mask = paddle.to_variable( tgt_mask, memory_mask = paddle.to_tensor(
tgt_mask), paddle.to_variable(memory_mask) tgt_mask), paddle.to_tensor(memory_mask)
trans_output = transformer(src, tgt, src_mask, tgt_mask, trans_output = transformer(src, tgt, src_mask, tgt_mask,
memory_mask) memory_mask)
......
...@@ -63,7 +63,7 @@ class TestZerosLikeImpeartive(unittest.TestCase): ...@@ -63,7 +63,7 @@ class TestZerosLikeImpeartive(unittest.TestCase):
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
paddle.disable_static(place) paddle.disable_static(place)
x = paddle.to_variable(np.ones(shape)) x = paddle.to_tensor(np.ones(shape))
for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]:
out = zeros_like(x, dtype) out = zeros_like(x, dtype)
self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(), self.assertEqual((out.numpy() == np.zeros(shape, dtype)).all(),
......
...@@ -707,20 +707,14 @@ def cross(x, y, axis=None, name=None): ...@@ -707,20 +707,14 @@ def cross(x, y, axis=None, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle import to_variable
import numpy as np
paddle.disable_static() paddle.disable_static()
data_x = np.array([[1.0, 1.0, 1.0], x = paddle.to_tensor([[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0], [2.0, 2.0, 2.0],
[3.0, 3.0, 3.0]]) [3.0, 3.0, 3.0]])
data_y = np.array([[1.0, 1.0, 1.0], y = paddle.to_tensor([[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0], [1.0, 1.0, 1.0],
[1.0, 1.0, 1.0]]) [1.0, 1.0, 1.0]])
x = to_variable(data_x)
y = to_variable(data_y)
z1 = paddle.cross(x, y) z1 = paddle.cross(x, y)
print(z1.numpy()) print(z1.numpy())
# [[-1. -1. -1.] # [[-1. -1. -1.]
......
...@@ -1650,12 +1650,11 @@ def cumsum(x, axis=None, dtype=None, name=None): ...@@ -1650,12 +1650,11 @@ def cumsum(x, axis=None, dtype=None, name=None):
.. code-block:: python .. code-block:: python
import paddle import paddle
from paddle import to_variable
import numpy as np import numpy as np
paddle.disable_static() paddle.disable_static()
data_np = np.arange(12).reshape(3, 4) data_np = np.arange(12).reshape(3, 4)
data = to_variable(data_np) data = paddle.to_tensor(data_np)
y = paddle.cumsum(data) y = paddle.cumsum(data)
print(y.numpy()) print(y.numpy())
......
...@@ -251,9 +251,10 @@ ...@@ -251,9 +251,10 @@
"BilinearTensorProduct", "BilinearTensorProduct",
"GroupNorm", "GroupNorm",
"SpectralNorm", "SpectralNorm",
"TreeConv", "TreeConv"
],
"wlist_temp":[
"prroi_pool", "prroi_pool",
"to_tensor",
"ChunkEvaluator", "ChunkEvaluator",
"EditDistance", "EditDistance",
"ErrorClipByValue", "ErrorClipByValue",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册