未验证 提交 c239f15a 编写于 作者: Z zhiboniu 提交者: GitHub

tensor fluid code transfer part2 (#41096)

上级 1e56ca8a
...@@ -15,7 +15,8 @@ ...@@ -15,7 +15,8 @@
from typing import Sequence from typing import Sequence
import numpy as np import numpy as np
import paddle import paddle
from .tensor.attribute import is_complex, is_floating_point, is_integer, _real_to_complex_dtype, _complex_to_real_dtype from .tensor.attribute import is_complex, is_floating_point, is_integer
from .tensor.creation import _real_to_complex_dtype, _complex_to_real_dtype
from .fluid.framework import _non_static_mode from .fluid.framework import _non_static_mode
from . import _C_ops from . import _C_ops
from .fluid.data_feeder import check_variable_and_dtype from .fluid.data_feeder import check_variable_and_dtype
......
...@@ -17,6 +17,7 @@ from __future__ import print_function ...@@ -17,6 +17,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -225,31 +226,30 @@ class TestCropTensorException(unittest.TestCase): ...@@ -225,31 +226,30 @@ class TestCropTensorException(unittest.TestCase):
offset = fluid.data(name='offset', shape=[1], dtype='int32') offset = fluid.data(name='offset', shape=[1], dtype='int32')
def attr_shape_type(): def attr_shape_type():
out = fluid.layers.crop_tensor(input1, shape=3) out = paddle.crop(input1, shape=3)
def attr_shape_dtype(): def attr_shape_dtype():
out = fluid.layers.crop_tensor(input1, shape=[2, 2.0, 3, 3]) out = paddle.crop(input1, shape=[2, 2.0, 3, 3])
def attr_shape_value1(): def attr_shape_value1():
out = fluid.layers.crop_tensor(input1, shape=[2, -2, dim, 3]) out = paddle.crop(input1, shape=[2, -2, dim, 3])
def attr_shape_value2(): def attr_shape_value2():
out = fluid.layers.crop_tensor(input1, shape=[2, 0, dim, 3]) out = paddle.crop(input1, shape=[2, 0, dim, 3])
def attr_offsets_type(): def attr_offsets_type():
out = fluid.layers.crop_tensor( out = paddle.crop(input1, shape=[2, 2, 3, 3], offsets=0)
input1, shape=[2, 2, 3, 3], offsets=0)
def attr_offsets_dtype(): def attr_offsets_dtype():
out = fluid.layers.crop_tensor( out = paddle.crop(
input1, shape=[2, 2, 3, 3], offsets=[0, 1.0, 0, 0]) input1, shape=[2, 2, 3, 3], offsets=[0, 1.0, 0, 0])
def attr_offsets_value(): def attr_offsets_value():
out = fluid.layers.crop_tensor( out = paddle.crop(
input1, shape=[2, 2, 3, 3], offsets=[0, -1, offset, 0]) input1, shape=[2, 2, 3, 3], offsets=[0, -1, offset, 0])
def input_dtype(): def input_dtype():
out = fluid.layers.crop_tensor(input2, shape=[2, 2, 3, 3]) out = paddle.crop(input2, shape=[2, 2, 3, 3])
self.assertRaises(TypeError, attr_shape_type) self.assertRaises(TypeError, attr_shape_type)
self.assertRaises(TypeError, attr_shape_dtype) self.assertRaises(TypeError, attr_shape_dtype)
......
...@@ -534,13 +534,13 @@ class TestSliceAPI(unittest.TestCase): ...@@ -534,13 +534,13 @@ class TestSliceAPI(unittest.TestCase):
# value_int64 is greater than 2147483647 which is the max of int32 # value_int64 is greater than 2147483647 which is the max of int32
value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648) value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648)
out_1 = fluid.layers.slice( out_1 = paddle.slice(
x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1]) x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1])
out_2 = fluid.layers.slice( out_2 = paddle.slice(
x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1]) x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1])
out_3 = fluid.layers.slice( out_3 = paddle.slice(
x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1]) x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1])
out_4 = fluid.layers.slice(x, axes=[0, 1, 2], starts=starts, ends=ends) out_4 = paddle.slice(x, axes=[0, 1, 2], starts=starts, ends=ends)
out_5 = x[-3:3, 0:100, 2:-1] out_5 = x[-3:3, 0:100, 2:-1]
out_6 = x[minus_3:3, 0:100, :, 2:-1] out_6 = x[minus_3:3, 0:100, :, 2:-1]
......
...@@ -534,25 +534,25 @@ class TestStridedSliceAPI(unittest.TestCase): ...@@ -534,25 +534,25 @@ class TestStridedSliceAPI(unittest.TestCase):
shape=[3, 4, 5, 6], shape=[3, 4, 5, 6],
append_batch_size=False, append_batch_size=False,
dtype="float64") dtype="float64")
out_1 = fluid.layers.strided_slice( out_1 = paddle.strided_slice(
x, x,
axes=[0, 1, 2], axes=[0, 1, 2],
starts=[-3, 0, 2], starts=[-3, 0, 2],
ends=[3, 100, -1], ends=[3, 100, -1],
strides=[1, 1, 1]) strides=[1, 1, 1])
out_2 = fluid.layers.strided_slice( out_2 = paddle.strided_slice(
x, x,
axes=[0, 1, 3], axes=[0, 1, 3],
starts=[minus_3, 0, 2], starts=[minus_3, 0, 2],
ends=[3, 100, -1], ends=[3, 100, -1],
strides=[1, 1, 1]) strides=[1, 1, 1])
out_3 = fluid.layers.strided_slice( out_3 = paddle.strided_slice(
x, x,
axes=[0, 1, 3], axes=[0, 1, 3],
starts=[minus_3, 0, 2], starts=[minus_3, 0, 2],
ends=[3, 100, minus_1], ends=[3, 100, minus_1],
strides=[1, 1, 1]) strides=[1, 1, 1])
out_4 = fluid.layers.strided_slice( out_4 = paddle.strided_slice(
x, axes=[0, 1, 2], starts=starts, ends=ends, strides=strides) x, axes=[0, 1, 2], starts=starts, ends=ends, strides=strides)
out_5 = x[-3:3, 0:100:2, -1:2:-1] out_5 = x[-3:3, 0:100:2, -1:2:-1]
......
...@@ -14,37 +14,128 @@ ...@@ -14,37 +14,128 @@
from __future__ import print_function from __future__ import print_function
from ..framework import core from ..framework import core, _non_static_mode
from ..fluid.layer_helper import LayerHelper from ..framework import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype from ..fluid.data_feeder import check_variable_and_dtype
from ..fluid.data_feeder import check_type
from .creation import assign
from .creation import _complex_to_real_dtype
# TODO: define functions to get tensor attributes # TODO: define functions to get tensor attributes
from ..fluid.layers import rank # noqa: F401
from ..fluid.layers import shape # noqa: F401
import paddle import paddle
from paddle import _C_ops from paddle import _C_ops
from paddle.static import Variable from ..static import Variable
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode
import numpy as np
__all__ = [] __all__ = []
def _complex_to_real_dtype(dtype): def rank(input):
if dtype == core.VarDesc.VarType.COMPLEX64: """
return core.VarDesc.VarType.FP32
elif dtype == core.VarDesc.VarType.COMPLEX128: The OP returns the number of dimensions for a tensor, which is a 0-D int32 Tensor.
return core.VarDesc.VarType.FP64
else: Args:
return dtype input (Tensor): The input N-D tensor with shape of :math:`[N_1, N_2, ..., N_k]`, the data type is arbitrary.
Returns:
Tensor, the output data type is int32.: The 0-D tensor with the dimensions of the input Tensor.
Examples:
.. code-block:: python
import paddle
input = paddle.rand((3, 100, 100))
rank = paddle.rank(input)
print(rank)
# 3
"""
check_type(input, 'input', (Variable), 'input')
ndims = len(input.shape)
out = assign(np.array(ndims, 'int32'))
return out
def shape(input):
"""
:alias_main: paddle.shape
:alias: paddle.shape,paddle.tensor.shape,paddle.tensor.attribute.shape
:old_api: paddle.fluid.layers.shape
**Shape Layer**
Get the shape of the input.
.. code-block:: text
Case1:
Given N-D Tensor:
input = [ [1, 2, 3, 4], [5, 6, 7, 8] ]
Then:
input.shape = [2, 4]
Case2:
Given SelectedRows:
input.rows = [0, 4, 19]
input.height = 20
input.value = [ [1, 2], [3, 4], [5, 6] ] # inner tensor
Then:
input.shape = [3, 2]
Args:
input (Variable): The input can be N-D Tensor or SelectedRows with data type bool, float16, float32, float64, int32, int64.
If input variable is type of SelectedRows, returns the shape of it's inner tensor.
Returns:
Variable (Tensor): The shape of the input variable.
Examples:
.. code-block:: python
def _real_to_complex_dtype(dtype): import paddle.fluid as fluid
if dtype == core.VarDesc.VarType.FP32: import numpy as np
return core.VarDesc.VarType.COMPLEX64 import paddle
elif dtype == core.VarDesc.VarType.FP64: paddle.enable_static()
return core.VarDesc.VarType.COMPLEX128
else: inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32")
return dtype output = fluid.layers.shape(inputs)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.ones((3, 100, 100)).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([ 3, 100, 100], dtype=int32)]
"""
if in_dygraph_mode():
out = _C_ops.final_state_shape(input)
out.stop_gradient = True
return out
if _in_legacy_dygraph():
out = _C_ops.shape(input)
out.stop_gradient = True
return out
check_variable_and_dtype(input, 'input', [
'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64',
'complex128'
], 'shape')
helper = LayerHelper('shape', **locals())
out = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='shape',
inputs={'Input': input},
outputs={'Out': out},
stop_gradient=True)
return out
def is_complex(x): def is_complex(x):
......
...@@ -14,27 +14,138 @@ ...@@ -14,27 +14,138 @@
from __future__ import print_function from __future__ import print_function
import numpy as np import numpy as np
import math
from paddle.common_ops_import import fill_constant from paddle.common_ops_import import fill_constant
from ..fluid.layers import utils from ..fluid.layers import utils
from ..fluid.layers import tensor
from ..static import Variable, device_guard from ..static import Variable, device_guard
from ..framework import _current_expected_place, _get_paddle_place from ..framework import _current_expected_place, _get_paddle_place
from ..framework import dygraph_only from ..framework import dygraph_only
from ..framework import core from ..framework import core
from ..fluid.layer_helper import LayerHelper from ..framework import in_dygraph_mode, _non_static_mode
from ..framework import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
from ..framework import convert_np_dtype_to_dtype_, _varbase_creator, OpProtoHolder from ..framework import convert_np_dtype_to_dtype_, _varbase_creator, OpProtoHolder
from paddle.tensor.attribute import _complex_to_real_dtype, _real_to_complex_dtype
# TODO: define functions to get create a tensor # TODO: define functions to get create a tensor
from ..fluid.layers import linspace # noqa: F401
import paddle import paddle
from paddle import _C_ops from paddle import _C_ops
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode, _in_eager_without_dygraph_check from ..fluid.framework import _in_legacy_dygraph, _in_eager_without_dygraph_check
import warnings
__all__ = [] __all__ = []
def _complex_to_real_dtype(dtype):
if dtype == core.VarDesc.VarType.COMPLEX64:
return core.VarDesc.VarType.FP32
elif dtype == core.VarDesc.VarType.COMPLEX128:
return core.VarDesc.VarType.FP64
else:
return dtype
def _real_to_complex_dtype(dtype):
if dtype == core.VarDesc.VarType.FP32:
return core.VarDesc.VarType.COMPLEX64
elif dtype == core.VarDesc.VarType.FP64:
return core.VarDesc.VarType.COMPLEX128
else:
return dtype
def linspace(start, stop, num, dtype=None, name=None):
r"""
This OP return fixed number of evenly spaced values within a given interval.
Args:
start(int|float|Tensor): The input :attr:`start` is start variable of range. It is a scalar, \
or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
stop(int|float|Tensor): The input :attr:`stop` is start variable of range. It is a scalar, \
or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
num(int|Tensor): The input :attr:`num` is given num of the sequence. It is an int scalar, \
or a Tensor of shape [1] with data type int32.
dtype(np.dtype|str, optional): The data type of output tensor, it could be
int32, int64, float32 and float64. Default: if None, the data type is float32.
name(str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.Default: None.
Returns:
Tensor: the output data type will be float32, float64. The 1-D tensor with fixed number of evenly spaced values, \
the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
the value with input :attr:`start`.
Examples:
.. code-block:: python
import paddle
data = paddle.linspace(0, 10, 5, 'float32') # [0.0, 2.5, 5.0, 7.5, 10.0]
data = paddle.linspace(0, 10, 1, 'float32') # [0.0]
"""
if dtype is None:
dtype = 'float32'
tensor_num = num
tensor_start = start
tensor_stop = stop
if not isinstance(num, Variable):
check_type(num, 'num', (int), 'linspace')
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if not isinstance(start, Variable):
with device_guard("cpu"):
tensor_start = fill_constant([1], dtype, start)
if not isinstance(stop, Variable):
with device_guard("cpu"):
tensor_stop = fill_constant([1], dtype, stop)
if not isinstance(num, Variable):
with device_guard("cpu"):
tensor_num = fill_constant([1], 'int32', num)
if _non_static_mode():
return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, 'dtype',
dtype)
helper = LayerHelper("linspace", **locals())
start_dtype = convert_dtype(tensor_start.dtype)
stop_dtype = convert_dtype(tensor_stop.dtype)
out_dtype = convert_dtype(dtype)
if isinstance(start, Variable):
check_dtype(start.dtype, 'start',
['float32', 'float64', 'int32', 'int64'], 'linspace')
else:
check_type(start, 'start', (int, float), 'linspace')
if isinstance(stop, Variable):
check_dtype(stop.dtype, 'stop',
['float32', 'float64', 'int32', 'int64'], 'linspace')
else:
check_type(stop, 'stop', (int, float), 'linspace')
if isinstance(num, Variable):
check_dtype(num.dtype, 'num', ['int32'], 'linspace')
check_dtype(dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'],
'linspace')
if ((stop_dtype == "float64" or start_dtype == "float64") and
out_dtype in ["float32", "int32"]) or ((stop_dtype == "int64" or
start_dtype == "int64") and
out_dtype == "int32"):
raise ValueError(
"The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, "
"which may cause data type overflows. Please reset attr(dtype) of linspace."
.format(start_dtype, stop_dtype, dtype))
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='linspace',
inputs={'Start': tensor_start,
'Stop': tensor_stop,
'Num': tensor_num},
attrs={'dtype': dtype},
outputs={'Out': [out]})
if isinstance(num, int):
out.desc.set_shape((num, ))
return out
@dygraph_only @dygraph_only
def to_tensor(data, dtype=None, place=None, stop_gradient=True): def to_tensor(data, dtype=None, place=None, stop_gradient=True):
r""" r"""
...@@ -60,7 +171,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True): ...@@ -60,7 +171,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
Tensor: A Tensor constructed from ``data`` . Tensor: A Tensor constructed from ``data`` .
Raises: Raises:
TypeError: If the data type of ``data`` is not scalar, list, tuple, numpy.ndarray, paddle.Tensor TypeError: If the data type of ``data`` is not scalar, list, tuple, np.ndarray, paddle.Tensor
ValueError: If ``data`` is tuple|list, it can't contain nested tuple|list with different lengths , such as: [[1, 2], [3, 4, 5]] ValueError: If ``data`` is tuple|list, it can't contain nested tuple|list with different lengths , such as: [[1, 2], [3, 4, 5]]
TypeError: If ``dtype`` is not bool, float16, float32, float64, int8, int16, int32, int64, uint8, complex64, complex128 TypeError: If ``dtype`` is not bool, float16, float32, float64, int8, int16, int32, int64, uint8, complex64, complex128
ValueError: If ``place`` is not paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace or specified pattern string. ValueError: If ``place`` is not paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace or specified pattern string.
...@@ -152,7 +263,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True): ...@@ -152,7 +263,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
return data return data
else: else:
raise TypeError( raise TypeError(
"Can't constructs a 'paddle.Tensor' with data type {}, data type must be scalar|list|tuple|numpy.ndarray|paddle.Tensor". "Can't constructs a 'paddle.Tensor' with data type {}, data type must be scalar|list|tuple|np.ndarray|paddle.Tensor".
format(type(data))) format(type(data)))
if not dtype: if not dtype:
if data.dtype in [ if data.dtype in [
...@@ -439,11 +550,39 @@ def eye(num_rows, num_columns=None, dtype=None, name=None): ...@@ -439,11 +550,39 @@ def eye(num_rows, num_columns=None, dtype=None, name=None):
dtype = 'float32' dtype = 'float32'
if num_columns is None: if num_columns is None:
num_columns = num_rows num_columns = num_rows
return paddle.fluid.layers.eye(num_rows=num_rows,
num_columns=num_columns, if not isinstance(dtype, core.VarDesc.VarType):
batch_shape=None, dtype = convert_np_dtype_to_dtype_(dtype)
dtype=dtype, if num_columns is not None:
name=name) if not isinstance(num_columns, int) or num_columns < 0:
raise TypeError("num_columns should be a non-negative int")
else:
num_columns = num_rows
if _non_static_mode():
out = _C_ops.eye('dtype', dtype, 'num_rows', num_rows, 'num_columns',
num_columns)
else:
helper = LayerHelper("eye", **locals())
check_dtype(dtype, 'dtype',
['float16', 'float32', 'float64', 'int32', 'int64'], 'eye')
if not isinstance(num_rows, int) or num_rows < 0:
raise TypeError("num_rows should be a non-negative int")
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='eye',
inputs={},
outputs={'Out': [out]},
attrs={
'num_rows': num_rows,
'num_columns': num_columns,
'dtype': dtype
},
stop_gradient=True)
out.stop_gradient = True
return out
def full(shape, fill_value, dtype=None, name=None): def full(shape, fill_value, dtype=None, name=None):
...@@ -564,7 +703,53 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): ...@@ -564,7 +703,53 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
end = start end = start
start = 0 start = 0
return paddle.fluid.layers.range(start, end, step, dtype, name) if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if not isinstance(start, Variable):
with device_guard("cpu"):
start = fill_constant([1], dtype, start, force_cpu=True)
elif start.dtype != dtype:
start = paddle.cast(start, dtype)
if not isinstance(end, Variable):
with device_guard("cpu"):
end = fill_constant([1], dtype, end, force_cpu=True)
elif end.dtype != dtype:
end = paddle.cast(end, dtype)
if not isinstance(step, Variable):
with device_guard("cpu"):
step = fill_constant([1], dtype, step, force_cpu=True)
elif step.dtype != dtype:
step = paddle.cast(step, dtype)
if in_dygraph_mode():
return _C_ops.final_state_arange(start, end, step, dtype,
_current_expected_place())
if _in_legacy_dygraph():
out = _C_ops.range(start, end, step)
out.stop_gradient = True
return out
out_shape = None
if not isinstance(start, Variable) and not isinstance(
end, Variable) and not isinstance(step, Variable):
out_shape = [int(math.ceil((end - start) / step))]
check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'],
'range/arange')
helper = LayerHelper('range', **locals())
out = helper.create_variable_for_type_inference(dtype, shape=out_shape)
helper.append_op(
type='range',
inputs={'Start': start,
'End': end,
'Step': step},
outputs={'Out': out})
out.stop_gradient = True
return out
def _tril_triu_op(helper): def _tril_triu_op(helper):
...@@ -1187,7 +1372,7 @@ def assign(x, output=None): ...@@ -1187,7 +1372,7 @@ def assign(x, output=None):
The OP copies the :attr:`x` to the :attr:`output`. The OP copies the :attr:`x` to the :attr:`output`.
Parameters: Parameters:
x (Tensor|numpy.ndarray|list|tuple|scalar): A tensor, numpy ndarray, tuple/list of scalar, x (Tensor|np.ndarray|list|tuple|scalar): A tensor, numpy ndarray, tuple/list of scalar,
or scalar. Its data type supports float16, float32, float64, int32, int64, and bool. or scalar. Its data type supports float16, float32, float64, int32, int64, and bool.
Note: the float64 data will be converted to float32 because of current platform protobuf Note: the float64 data will be converted to float32 because of current platform protobuf
data limitation. data limitation.
...@@ -1211,9 +1396,91 @@ def assign(x, output=None): ...@@ -1211,9 +1396,91 @@ def assign(x, output=None):
result2 = paddle.assign(data) # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]] result2 = paddle.assign(data) # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]] result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
""" """
check_type(x, 'x', (Variable, np.ndarray, list, tuple, float, int, bool), input = x
'assign') helper = LayerHelper('assign', **locals())
return tensor.assign(x, output) check_type(input, 'input', (Variable, np.ndarray, list, tuple, float, int,
bool), 'assign')
is_inplace = True if output is not None else False
if np.isscalar(input) and not isinstance(input, str):
input = np.array([input])
elif isinstance(input, (list, tuple)):
input = np.array(input)
# NOTE(Aurelius84): Why we judge core.VarBase?
# In case of @to_static, a VarBase can be as input of `assign`,
# but _non_static_mode()==False under @to_static, which means
# isinstance(VarBase, Variable) == False. It will cause return None
# after this api.
if isinstance(input, (Variable, core.VarBase)):
if _non_static_mode():
if output is None:
if _in_legacy_dygraph():
output = core.VarBase()
else:
output = core.eager.Tensor()
_C_ops.assign(input, output)
else:
check_dtype(input.dtype, 'input', [
'float16', 'uint16', 'float32', 'float64', 'int32', 'int64',
'uint8', 'bool'
], 'assign', '(When the type of input in assign is Variable.)')
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(
type='assign', inputs={'X': [input]},
outputs={'Out': [output]})
elif isinstance(input, np.ndarray):
# Not support [var, var, ...] currently.
if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
raise TypeError(
"Required type(input) numpy.ndarray, but found `list(Variable)` in input."
)
dtype = convert_np_dtype_to_dtype_(input.dtype)
if dtype == core.VarDesc.VarType.FP64:
# Setting FP64 numpy data is not supported in Paddle, so we
# use FP32 here
warnings.warn(
"paddle.assign doesn't support float64 input now due "
"to current platform protobuf data limitation, we convert "
"it to float32")
dtype = core.VarDesc.VarType.FP32
if dtype == core.VarDesc.VarType.BOOL:
value_name = "bool_values"
values = [int(v) for v in input.flat]
elif dtype == core.VarDesc.VarType.FP32:
value_name = "fp32_values"
values = [float(v) for v in input.flat]
elif dtype == core.VarDesc.VarType.INT32:
value_name = "int32_values"
values = [int(v) for v in input.flat]
elif dtype == core.VarDesc.VarType.INT64:
value_name = "int64_values"
values = [int(v) for v in input.flat]
else:
raise TypeError(
"When the type of 'input' in assign is numpy.ndarray, "
"the data type of 'input' must be bool, float32, int32 or int64, but "
"received %s." % convert_dtype(dtype))
if input.size > 1024 * 1024:
raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it")
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(
type='assign_value',
outputs={'Out': [output]},
attrs={
'dtype': dtype,
'shape': list(input.shape),
value_name: values
})
if is_inplace and _non_static_mode():
output._bump_inplace_version()
return output
def clone(x, name=None): def clone(x, name=None):
......
...@@ -13,14 +13,16 @@ ...@@ -13,14 +13,16 @@
# limitations under the License. # limitations under the License.
import numpy as np import numpy as np
from ..fluid.layer_helper import LayerHelper from ..framework import LayerHelper
from ..framework import _varbase_creator, _dygraph_tracer, in_dygraph_mode, _non_static_mode from ..framework import _varbase_creator, _dygraph_tracer, in_dygraph_mode, _non_static_mode
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..static import Variable from ..static import Variable
from ..fluid.framework import _in_legacy_dygraph from ..fluid.framework import _in_legacy_dygraph
from .manipulation import cast from .manipulation import cast
from .math import multiply, add
from .logic import logical_not
from .creation import full
from ..fluid import layers
import paddle import paddle
from paddle.common_ops_import import core from paddle.common_ops_import import core
from paddle.common_ops_import import VarDesc from paddle.common_ops_import import VarDesc
...@@ -2532,11 +2534,11 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): ...@@ -2532,11 +2534,11 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None):
y = paddle.to_tensor(y, dtype=x.dtype) y = paddle.to_tensor(y, dtype=x.dtype)
condition = s > cutoff condition = s > cutoff
cond_int = layers.cast(condition, s.dtype) cond_int = cast(condition, s.dtype)
cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) cond_not_int = cast(logical_not(condition), s.dtype)
out1 = layers.elementwise_mul(1 / s, cond_int) out1 = multiply(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int) out2 = multiply(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2) singular = add(out1, out2)
st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2])
dims = list(range(len(vt.shape))) dims = list(range(len(vt.shape)))
...@@ -2559,11 +2561,11 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): ...@@ -2559,11 +2561,11 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None):
y = paddle.to_tensor(y, dtype=s.dtype) y = paddle.to_tensor(y, dtype=s.dtype)
condition = s_abs > cutoff condition = s_abs > cutoff
cond_int = layers.cast(condition, s.dtype) cond_int = cast(condition, s.dtype)
cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) cond_not_int = cast(logical_not(condition), s.dtype)
out1 = layers.elementwise_mul(1 / s, cond_int) out1 = multiply(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int) out2 = multiply(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2) singular = add(out1, out2)
st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2])
out_1 = u * st out_1 = u * st
...@@ -2597,17 +2599,17 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): ...@@ -2597,17 +2599,17 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None):
'keep_dim': True, 'keep_dim': True,
'reduce_all': False}) 'reduce_all': False})
rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) rcond = full(shape=[1], fill_value=rcond, dtype=dtype)
cutoff = rcond * max_singular_val cutoff = rcond * max_singular_val
y = float('inf') y = float('inf')
y = layers.fill_constant(shape=[1], value=y, dtype=dtype) y = full(shape=[1], fill_value=y, dtype=dtype)
condition = s > cutoff condition = s > cutoff
cond_int = layers.cast(condition, dtype) cond_int = cast(condition, dtype)
cond_not_int = layers.cast(layers.logical_not(condition), dtype) cond_not_int = cast(logical_not(condition), dtype)
out1 = layers.elementwise_mul(1 / s, cond_int) out1 = multiply(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int) out2 = multiply(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2) singular = add(out1, out2)
st = helper.create_variable_for_type_inference(dtype=dtype) st = helper.create_variable_for_type_inference(dtype=dtype)
st_shape = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype)
...@@ -2682,17 +2684,17 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): ...@@ -2682,17 +2684,17 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None):
'keep_dim': True, 'keep_dim': True,
'reduce_all': False}) 'reduce_all': False})
rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) rcond = full(shape=[1], fill_value=rcond, dtype=s_type)
cutoff = rcond * max_singular_val cutoff = rcond * max_singular_val
y = float('inf') y = float('inf')
y = layers.fill_constant(shape=[1], value=y, dtype=s_type) y = full(shape=[1], fill_value=y, dtype=s_type)
condition = s_abs > cutoff condition = s_abs > cutoff
cond_int = layers.cast(condition, s_type) cond_int = cast(condition, s_type)
cond_not_int = layers.cast(layers.logical_not(condition), s_type) cond_not_int = cast(logical_not(condition), s_type)
out1 = layers.elementwise_mul(1 / s, cond_int) out1 = multiply(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int) out2 = multiply(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2) singular = add(out1, out2)
st = helper.create_variable_for_type_inference(dtype=s_type) st = helper.create_variable_for_type_inference(dtype=s_type)
st_shape = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type)
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
from ..framework import core from ..framework import core
from ..framework import convert_np_dtype_to_dtype_, dygraph_only from ..framework import convert_np_dtype_to_dtype_, dygraph_only
from ..fluid.layer_helper import LayerHelper from ..framework import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, check_shape from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, check_shape
from ..fluid.layers import utils from ..fluid.layers import utils
import paddle import paddle
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
from __future__ import print_function from __future__ import print_function
import numpy as np import numpy as np
import paddle import paddle
from ..fluid.layer_helper import LayerHelper from ..framework import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..fluid import layers from ..fluid import layers
from ..framework import core, in_dygraph_mode, _non_static_mode from ..framework import core, in_dygraph_mode, _non_static_mode
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import numpy as np import numpy as np
from ..static import Variable from ..static import Variable
from ..fluid.layer_helper import LayerHelper from ..framework import LayerHelper
from ..framework import core from ..framework import core
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from .search import where from .search import where
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册