未验证 提交 05335a28 编写于 作者: L Leo Chen 提交者: GitHub

[release-2.0] Move some APIs from paddle to fluid (#24188)

* update paddle/tensor, test=develop

* update linalg,py, test=develop

* update nn/functional, test=develop

* delete paddle/tensor/*, test=develop

* merge upstream, test=develop

* update __init__, test=develop

* pass ci, test=develop
上级 9220005c
...@@ -34,172 +34,11 @@ import paddle.compat ...@@ -34,172 +34,11 @@ import paddle.compat
import paddle.distributed import paddle.distributed
batch = batch.batch batch = batch.batch
import paddle.sysconfig import paddle.sysconfig
import paddle.tensor
import paddle.nn import paddle.nn
import paddle.framework import paddle.framework
import paddle.imperative import paddle.imperative
import paddle.complex import paddle.complex
# TODO: define alias in tensor and framework directory
# from .tensor.creation import create_.tensor #DEFINE_ALIAS
# from .tensor.creation import create_lod_.tensor #DEFINE_ALIAS
# from .tensor.creation import create_random_int_lod.tensor #DEFINE_ALIAS
# from .tensor.creation import crop_.tensor #DEFINE_ALIAS
# from .tensor.creation import diag #DEFINE_ALIAS
from .tensor.creation import eye #DEFINE_ALIAS
from .tensor.creation import fill_constant #DEFINE_ALIAS
# from .tensor.creation import get_.tensor_from_selected_rows #DEFINE_ALIAS
from .tensor.creation import linspace #DEFINE_ALIAS
from .tensor.creation import ones #DEFINE_ALIAS
from .tensor.creation import ones_like #DEFINE_ALIAS
# from .tensor.creation import range #DEFINE_ALIAS
from .tensor.creation import zeros #DEFINE_ALIAS
from .tensor.creation import zeros_like #DEFINE_ALIAS
from .tensor.creation import arange #DEFINE_ALIAS
# from .tensor.creation import eye #DEFINE_ALIAS
from .tensor.creation import full #DEFINE_ALIAS
# from .tensor.creation import linspace #DEFINE_ALIAS
# from .tensor.creation import full_like #DEFINE_ALIAS
# from .tensor.creation import triu #DEFINE_ALIAS
# from .tensor.creation import tril #DEFINE_ALIAS
from .tensor.creation import meshgrid #DEFINE_ALIAS
# from .tensor.stat import mean #DEFINE_ALIAS
# from .tensor.stat import reduce_mean #DEFINE_ALIAS
# from .tensor.stat import std #DEFINE_ALIAS
from .tensor.stat import var #DEFINE_ALIAS
from .tensor.logic import equal #DEFINE_ALIAS
# from .tensor.logic import greater_equal #DEFINE_ALIAS
# from .tensor.logic import greater_than #DEFINE_ALIAS
# from .tensor.logic import is_empty #DEFINE_ALIAS
# from .tensor.logic import isfinite #DEFINE_ALIAS
# from .tensor.logic import less_equal #DEFINE_ALIAS
# from .tensor.logic import less_than #DEFINE_ALIAS
# from .tensor.logic import logical_and #DEFINE_ALIAS
# from .tensor.logic import logical_not #DEFINE_ALIAS
# from .tensor.logic import logical_or #DEFINE_ALIAS
# from .tensor.logic import logical_xor #DEFINE_ALIAS
# from .tensor.logic import not_equal #DEFINE_ALIAS
# from .tensor.logic import reduce_all #DEFINE_ALIAS
# from .tensor.logic import reduce_any #DEFINE_ALIAS
from .tensor.logic import allclose #DEFINE_ALIAS
from .tensor.logic import elementwise_equal #DEFINE_ALIAS
# from .tensor.logic import isnan #DEFINE_ALIAS
# from .tensor..tensor import Tensor #DEFINE_ALIAS
# from .tensor..tensor import LoDTensor #DEFINE_ALIAS
# from .tensor..tensor import LoDTensorArray #DEFINE_ALIAS
# from .tensor.random import gaussin #DEFINE_ALIAS
# from .tensor.random import uniform #DEFINE_ALIAS
# from .tensor.random import shuffle #DEFINE_ALIAS
from .tensor.random import randn #DEFINE_ALIAS
from .tensor.random import randperm
# from .tensor.random import rand #DEFINE_ALIAS
from .tensor.random import randint #DEFINE_ALIAS
# from .tensor.math import abs #DEFINE_ALIAS
# from .tensor.math import acos #DEFINE_ALIAS
# from .tensor.math import asin #DEFINE_ALIAS
from .tensor.math import atan #DEFINE_ALIAS
# from .tensor.math import ceil #DEFINE_ALIAS
# from .tensor.math import cos #DEFINE_ALIAS
# from .tensor.math import cumsum #DEFINE_ALIAS
# from .tensor.math import elementwise_add #DEFINE_ALIAS
# from .tensor.math import elementwise_div #DEFINE_ALIAS
# from .tensor.math import elementwise_floordiv #DEFINE_ALIAS
# from .tensor.math import elementwise_max #DEFINE_ALIAS
# from .tensor.math import elementwise_min #DEFINE_ALIAS
# from .tensor.math import elementwise_mod #DEFINE_ALIAS
# from .tensor.math import elementwise_mul #DEFINE_ALIAS
# from .tensor.math import elementwise_pow #DEFINE_ALIAS
# from .tensor.math import elementwise_sub #DEFINE_ALIAS
# from .tensor.math import exp #DEFINE_ALIAS
# from .tensor.math import floor #DEFINE_ALIAS
# from .tensor.math import increment #DEFINE_ALIAS
# from .tensor.math import log #DEFINE_ALIAS
from .tensor.math import mul #DEFINE_ALIAS
# from .tensor.math import multiplex #DEFINE_ALIAS
from .tensor.math import pow #DEFINE_ALIAS
# from .tensor.math import reciprocal #DEFINE_ALIAS
# from .tensor.math import reduce_max #DEFINE_ALIAS
# from .tensor.math import reduce_min #DEFINE_ALIAS
# from .tensor.math import reduce_prod #DEFINE_ALIAS
# from .tensor.math import reduce_sum #DEFINE_ALIAS
# from .tensor.math import round #DEFINE_ALIAS
# from .tensor.math import rsqrt #DEFINE_ALIAS
# from .tensor.math import scale #DEFINE_ALIAS
# from .tensor.math import sign #DEFINE_ALIAS
from .tensor.math import sin #DEFINE_ALIAS
from .tensor.math import sqrt #DEFINE_ALIAS
# from .tensor.math import square #DEFINE_ALIAS
# from .tensor.math import stanh #DEFINE_ALIAS
from .tensor.math import sum #DEFINE_ALIAS
# from .tensor.math import sums #DEFINE_ALIAS
from .tensor.math import tanh #DEFINE_ALIAS
from .tensor.math import elementwise_sum #DEFINE_ALIAS
from .tensor.math import max #DEFINE_ALIAS
from .tensor.math import min #DEFINE_ALIAS
from .tensor.math import mm #DEFINE_ALIAS
from .tensor.math import div #DEFINE_ALIAS
from .tensor.math import add #DEFINE_ALIAS
# from .tensor.math import atan #DEFINE_ALIAS
from .tensor.math import logsumexp #DEFINE_ALIAS
# from .tensor.math import inverse #DEFINE_ALIAS
from .tensor.math import log1p #DEFINE_ALIAS
# from .tensor.math import erf #DEFINE_ALIAS
from .tensor.math import addcmul #DEFINE_ALIAS
from .tensor.math import addmm #DEFINE_ALIAS
from .tensor.math import clamp #DEFINE_ALIAS
# from .tensor.attribute import rank #DEFINE_ALIAS
# from .tensor.attribute import shape #DEFINE_ALIAS
# from .tensor.io import save #DEFINE_ALIAS
# from .tensor.io import load #DEFINE_ALIAS
from .tensor.linalg import matmul #DEFINE_ALIAS
from .tensor.linalg import dot #DEFINE_ALIAS
from .tensor.linalg import bmm #DEFINE_ALIAS
# from .tensor.linalg import einsum #DEFINE_ALIAS
from .tensor.linalg import norm #DEFINE_ALIAS
# from .tensor.linalg import transpose #DEFINE_ALIAS
from .tensor.linalg import dist #DEFINE_ALIAS
from .tensor.linalg import t #DEFINE_ALIAS
from .tensor.linalg import cross #DEFINE_ALIAS
# from .tensor.linalg import cholesky #DEFINE_ALIAS
# from .tensor.linalg import .tensordot #DEFINE_ALIAS
# from .tensor.manipulation import cast #DEFINE_ALIAS
# from .tensor.manipulation import concat #DEFINE_ALIAS
# from .tensor.manipulation import expand #DEFINE_ALIAS
# from .tensor.manipulation import expand_as #DEFINE_ALIAS
# from .tensor.manipulation import flatten #DEFINE_ALIAS
from .tensor.manipulation import gather #DEFINE_ALIAS
# from .tensor.manipulation import gather_nd #DEFINE_ALIAS
# from .tensor.manipulation import reshape #DEFINE_ALIAS
# from .tensor.manipulation import reverse #DEFINE_ALIAS
# from .tensor.manipulation import scatter #DEFINE_ALIAS
# from .tensor.manipulation import scatter_nd_add #DEFINE_ALIAS
# from .tensor.manipulation import scatter_nd #DEFINE_ALIAS
# from .tensor.manipulation import shard_index #DEFINE_ALIAS
# from .tensor.manipulation import slice #DEFINE_ALIAS
from .tensor.manipulation import split #DEFINE_ALIAS
from .tensor.manipulation import squeeze #DEFINE_ALIAS
from .tensor.manipulation import stack #DEFINE_ALIAS
# from .tensor.manipulation import strided_slice #DEFINE_ALIAS
# from .tensor.manipulation import transpose #DEFINE_ALIAS
# from .tensor.manipulation import unique #DEFINE_ALIAS
# from .tensor.manipulation import unique_with_counts #DEFINE_ALIAS
from .tensor.manipulation import unsqueeze #DEFINE_ALIAS
# from .tensor.manipulation import unstack #DEFINE_ALIAS
from .tensor.manipulation import flip #DEFINE_ALIAS
# from .tensor.manipulation import unbind #DEFINE_ALIAS
from .tensor.manipulation import roll #DEFINE_ALIAS
from .tensor.search import argmax #DEFINE_ALIAS
# from .tensor.search import argmin #DEFINE_ALIAS
# from .tensor.search import argsort #DEFINE_ALIAS
# from .tensor.search import has_inf #DEFINE_ALIAS
# from .tensor.search import has_nan #DEFINE_ALIAS
# from .tensor.search import masked_select #DEFINE_ALIAS
# from .tensor.search import topk #DEFINE_ALIAS
from .tensor.search import where #DEFINE_ALIAS
from .tensor.search import index_select #DEFINE_ALIAS
from .tensor.search import index_sample #DEFINE_ALIAS
from .tensor.search import nonzero #DEFINE_ALIAS
from .tensor.search import sort #DEFINE_ALIAS
# from .framework.framework import set_default_dtype #DEFINE_ALIAS # from .framework.framework import set_default_dtype #DEFINE_ALIAS
# from .framework.framework import get_default_dtype #DEFINE_ALIAS # from .framework.framework import get_default_dtype #DEFINE_ALIAS
from .framework.random import manual_seed #DEFINE_ALIAS from .framework.random import manual_seed #DEFINE_ALIAS
......
...@@ -37,40 +37,181 @@ from functools import reduce ...@@ -37,40 +37,181 @@ from functools import reduce
from .. import core from .. import core
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
import paddle import paddle
from .ops import exp
import sys
__all__ = [ __all__ = [
'fc', 'embedding', 'linear_chain_crf', 'crf_decoding', 'cos_sim', 'log1p',
'chunk_eval', 'conv2d', 'conv3d', 'softmax', 'pool2d', 'pool3d', 'logsumexp',
'adaptive_pool2d', 'adaptive_pool3d', 'batch_norm', 'inplace_abn', 'clamp',
'instance_norm', 'data_norm', 'conv2d_transpose', 'conv3d_transpose', 'addmm',
'reduce_sum', 'reduce_mean', 'reduce_max', 'reduce_min', 'reduce_prod', 'addcmul',
'reduce_all', 'reduce_any', 'dropout', 'split', 'ctc_greedy_decoder', 'bmm',
'l2_normalize', 'matmul', 'topk', 'transpose', 'im2sequence', 'row_conv', 'nonzero',
'multiplex', 'layer_norm', 'group_norm', 'spectral_norm', 'smooth_l1', 'index_select',
'one_hot', 'autoincreased_step_counter', 'reshape', 'squeeze', 'unsqueeze', 'dist',
'lod_reset', 'lod_append', 'lrn', 'pad', 'pad_constant_like', 'dot',
'label_smooth', 'roi_pool', 'roi_align', 'dice_loss', 'image_resize', 't',
'image_resize_short', 'resize_bilinear', 'resize_trilinear', 'cross',
'resize_nearest', 'gather', 'gather_nd', 'scatter', 'scatter_nd_add', 'interpolate',
'scatter_nd', 'random_crop', 'mean_iou', 'relu', 'selu', 'log', 'crop', 'diag_embed',
'crop_tensor', 'elu', 'relu6', 'pow', 'stanh', 'hard_sigmoid', 'swish', 'meshgrid',
'prelu', 'brelu', 'leaky_relu', 'soft_relu', 'flatten', 'stack', 'pad2d', 'fc',
'unstack', 'unique', 'unique_with_counts', 'expand', 'expand_as', 'scale', 'embedding',
'elementwise_add', 'elementwise_div', 'elementwise_sub', 'elementwise_mul', 'linear_chain_crf',
'elementwise_max', 'elementwise_min', 'elementwise_pow', 'elementwise_mod', 'crf_decoding',
'elementwise_floordiv', 'uniform_random_batch_size_like', 'gaussian_random', 'cos_sim',
'sampling_id', 'gaussian_random_batch_size_like', 'sum', 'slice', 'chunk_eval',
'strided_slice', 'shape', 'rank', 'size', 'logical_and', 'logical_or', 'conv2d',
'logical_xor', 'logical_not', 'clip', 'clip_by_norm', 'mean', 'mul', 'conv3d',
'maxout', 'space_to_depth', 'affine_grid', 'affine_channel', 'softmax',
'similarity_focus', 'hash', 'grid_sampler', 'log_loss', 'pool2d',
'add_position_encoding', 'bilinear_tensor_product', 'merge_selected_rows', 'pool3d',
'get_tensor_from_selected_rows', 'shuffle_channel', 'temporal_shift', 'adaptive_pool2d',
'py_func', 'psroi_pool', 'prroi_pool', 'pixel_shuffle', 'fsp_matrix', 'adaptive_pool3d',
'continuous_value_model', 'where', 'sign', 'deformable_conv', 'unfold', 'batch_norm',
'deformable_roi_pooling', 'filter_by_instag', 'shard_index', 'hard_swish', 'inplace_abn',
'gather_tree', 'uniform_random', 'randint', 'randn', 'randperm', 'allclose', 'instance_norm',
'elementwise_equal', 'flip', 'roll', 'log_softmax' 'data_norm',
'conv2d_transpose',
'conv3d_transpose',
'reduce_sum',
'reduce_mean',
'reduce_max',
'reduce_min',
'reduce_prod',
'reduce_all',
'reduce_any',
'dropout',
'split',
'ctc_greedy_decoder',
'l2_normalize',
'matmul',
'topk',
'transpose',
'im2sequence',
'row_conv',
'multiplex',
'layer_norm',
'group_norm',
'spectral_norm',
'smooth_l1',
'one_hot',
'autoincreased_step_counter',
'reshape',
'squeeze',
'unsqueeze',
'lod_reset',
'lod_append',
'lrn',
'pad',
'pad_constant_like',
'label_smooth',
'roi_pool',
'roi_align',
'dice_loss',
'image_resize',
'image_resize_short',
'resize_bilinear',
'resize_trilinear',
'resize_nearest',
'gather',
'gather_nd',
'scatter',
'scatter_nd_add',
'scatter_nd',
'random_crop',
'mean_iou',
'relu',
'selu',
'log',
'crop',
'crop_tensor',
'elu',
'relu6',
'pow',
'stanh',
'hard_sigmoid',
'swish',
'prelu',
'brelu',
'leaky_relu',
'soft_relu',
'flatten',
'stack',
'pad2d',
'unstack',
'unique',
'unique_with_counts',
'expand',
'expand_as',
'scale',
'elementwise_add',
'elementwise_div',
'elementwise_sub',
'elementwise_mul',
'elementwise_max',
'elementwise_min',
'elementwise_pow',
'elementwise_mod',
'elementwise_floordiv',
'uniform_random_batch_size_like',
'gaussian_random',
'sampling_id',
'gaussian_random_batch_size_like',
'sum',
'slice',
'strided_slice',
'shape',
'rank',
'size',
'logical_and',
'logical_or',
'logical_xor',
'logical_not',
'clip',
'clip_by_norm',
'mean',
'mul',
'bmm',
'maxout',
'space_to_depth',
'affine_grid',
'affine_channel',
'similarity_focus',
'hash',
'grid_sampler',
'log_loss',
'add_position_encoding',
'bilinear_tensor_product',
'merge_selected_rows',
'get_tensor_from_selected_rows',
'shuffle_channel',
'temporal_shift',
'py_func',
'psroi_pool',
'prroi_pool',
'pixel_shuffle',
'fsp_matrix',
'continuous_value_model',
'where',
'sign',
'deformable_conv',
'unfold',
'deformable_roi_pooling',
'filter_by_instag',
'shard_index',
'hard_swish',
'gather_tree',
'uniform_random',
'randint',
'randn',
'randperm',
'allclose',
'elementwise_equal',
'flip',
'roll',
'log_softmax',
] ]
...@@ -88,6 +229,1354 @@ def _elementwise_op_in_dygraph(x, ...@@ -88,6 +229,1354 @@ def _elementwise_op_in_dygraph(x,
out, act, use_mkldnn=use_mkldnn) out, act, use_mkldnn=use_mkldnn)
def log1p(x, out=None, name=None):
"""
Calculates the natural log of the given input tensor, element-wise.
.. math::
Out = \\ln(x+1)
Args:
x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The natural log of the input LoDTensor or Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
# Graph Organizing
x = fluid.data(name="x", shape=[2,1], dtype="float32")
res = fluid.layers.log1p(x)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[0], [1]]).astype(np.float32)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[0.], [0.6931472]]
"""
if in_dygraph_mode():
return core.ops.log1p(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log1p")
inputs = {'X': [x]}
helper = LayerHelper('log1p', **locals())
dtype = helper.input_dtype(input_param_name='x')
if out is None:
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out})
return out
def logsumexp(x, dim=None, keepdim=False, out=None, name=None):
"""
This operator calculates the log of the sum of exponentials of the input Tensor.
.. math::
logsumexp(x) = \log\sum exp(x)
Parameters:
x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64.
dim (list|int, optional): The dimensions along which the sum is performed. If :attr:`None`,
sum all elements of :attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor.
The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim`
is true, default value is False.
out (Variable), optional): Enable user to explicitly specify an output variable to save result.
name (str, optional): The default value is None. Normally there is no need for user to
set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The calcuated result Tensor/LoDTensor.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
np_x = np.random.uniform(0.1, 1, [10]).astype(np.float32)
x = fluid.dygraph.to_variable(np_x)
print(fluid.layers.logsumexp(x).numpy())
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
np_x = np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32)
x = fluid.dygraph.to_variable(np_x)
print(fluid.layers.logsumexp(x, dim=1).numpy())
print(fluid.layers.logsumexp(x, dim=[0, 2]).numpy())
"""
op_type = 'logsumexp'
assert x is not None, 'x cannot be None in {}'.format(op_type)
# reduce_sum does not support float16
check_variable_and_dtype(x, 'x', ['float32', 'float64'], op_type)
exp_out = exp(x)
sum_out = reduce_sum(exp_out, dim, keepdim)
if out is not None:
check_variable_and_dtype(out, 'out', [x.dtype], op_type)
helper = LayerHelper(op_type, **locals())
helper.append_op(
type="log", inputs={"X": sum_out}, outputs={"Out": out})
return out
return log(sum_out, name)
def clamp(input, min=None, max=None, output=None, name=None):
"""
**clampe layer**
This operator clamps all elements in input into the range [ min, max ] and return
a resulting tensor as the following equation:
.. math::
Out = MIN(MAX(x, min), max)
Args:
input (Variable): An input N-D Tensor or LoDTensor
with data type float32, float64.
min (float32|Variable): The lower bound with type ``float32`` or a ``Tensor``
with shape [1] and type ``int32``, ``float32``, ``float64``.
max (float32|Variable): The upper bound with type ``float32`` or a ``Tensor``
with shape [1] and type ``int32``, ``float32``, ``float64``.
output (Variable, optional): A tensor or LoDTensor. If :attr:`output` is None,
a new tensor will be created as :attr:`output`. Default: None.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: A Tensor or LodTensor with the same data type and data shape as input's.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[1.2,3.5],
[4.5,6.4]]).astype('float32')
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.clamp(x1, min=3.5, max=5.0)
out2 = fluid.layers.clamp(x1, min=2.5)
print(out1.numpy())
# [[3.5, 3.5]
# [4.5, 5.0]]
print(out2.numpy())
# [[2.5, 3.5]
# [[4.5, 6.4]
"""
assert min is not None or max is not None, "either min or max should be defined."
if min is not None:
check_type(min, 'min', (float, Variable), 'clamp')
if isinstance(min, Variable):
check_dtype(min.dtype, 'min', ['float32', 'float64', 'int32'],
'clamp', '(When the type of min in clamp is Variable.)')
if max is not None:
check_type(max, 'max', (float, Variable), 'clamp')
if isinstance(max, Variable):
check_dtype(max.dtype, 'max', ['float32', 'float64', 'int32'],
'clamp', '(When the type of max in clamp is Variable.)')
inputs = {'X': input}
attrs = {'min': sys.float_info.min, 'max': sys.float_info.max}
if isinstance(min, Variable):
min.stop_gradient = True
inputs['Min'] = min
elif min is not None:
attrs['min'] = min
if isinstance(max, Variable):
max.stop_gradient = True
inputs['Max'] = max
elif max is not None:
attrs['max'] = max
helper = LayerHelper('clamp', **locals())
if output is None:
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(
type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs)
return output
def addmm(input, x, y, alpha=1.0, beta=1.0, name=None):
"""
**addmm**
This operator is used to perform matrix multiplication for input $x$ and $y$.
$input$ is added to the final result.
The equation is:
.. math::
Out = alpha * x * y + beta * input
$Input$, $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $input$.
Args:
input (Variable): The input Tensor/LoDTensor to be added to the final result.
x (Variable): The first input Tensor/LoDTensor for matrix multiplication.
y (Variable): The second input Tensor/LoDTensor for matrix multiplication.
alpha (float): Coefficient of $x*y$.
beta (float): Coefficient of $input$.
name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
Returns:
Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of addmm op.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[2, 2], dtype='float32')
x = fluid.data(name='x', shape=[2, 2], dtype='float32')
y = fluid.data(name='y', shape=[2, 2], dtype='float32')
out = fluid.layers.addmm( input=input, x=x, y=y, alpha=5.0, beta=0.5 )
data_x = np.ones((2, 2)).astype(np.float32)
data_y = np.ones((2, 2)).astype(np.float32)
data_input = np.ones((2, 2)).astype(np.float32)
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place)
results = exe.run(fluid.default_main_program(),
fetch_list=[out], feed={"input": data_input, 'x': data_x, "y": data_y})
print( np.array(results[0]) )
# [[10.5 10.5]
# [10.5 10.5]]
"""
if in_dygraph_mode():
out = core.ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta)
return out
inputs = {'Input': input, "X": x, "Y": y}
attrs = {'Alpha': alpha, 'Beta': beta}
helper = LayerHelper("addmm", **locals())
check_variable_and_dtype(x, 'Input', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="addmm", inputs=inputs, attrs=attrs, outputs={"Out": out})
return out
def addcmul(input, tensor1, tensor2, value=1.0, out=None, name=None):
"""
Calculate the element-wise multiplication of tensor1 and tensor2,
then multiply the result by value, and add it to input. The shape of input,
tensor1, tensor2 should be broadcastable.
The equation is:
.. math::
out = input + value * tensor1 * tensor2
Args:
input(Variable): The input to be added. A Tensor with type float32, float64, int32, int64.
tensor1(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64.
tensor2(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64.
value(int|float): The multiplier for tensor1*tensor2. For float32 and float64 type input, value must be float, otherwise an integer.
out(Variable, Optional): The variable that specifies the output of the
operator, which can be Variable that has been created in the
program. The default value is None, and a new Variable will be
created to save the output. Default: None.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
out(Variable): The output result. A Tensor with the same data type as input's.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
input = fluid.data(name='input', dtype='float32', shape=[3, 4])
tensor1 = fluid.data(name='tenosr1', dtype='float32', shape=[1, 4])
tensor2 = fluid.data(name='tensor2', dtype='float32', shape=[3, 4])
data = fluid.layers.addcmul(input, tensor1, tensor2, value=1.0)
"""
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'addcmul')
check_variable_and_dtype(
tensor1, 'tensor1', ['float32', 'float64', 'int32', 'int64'], 'addcmul')
check_variable_and_dtype(
tensor2, 'tensor2', ['float32', 'float64', 'int32', 'int64'], 'addcmul')
if convert_dtype(input.dtype) in ['float32', 'float64']:
check_type(value, 'value', float, 'addcmul')
if convert_dtype(input.dtype) in ['int32', 'int64']:
check_type(value, 'value', int, 'addcmul')
if out is not None:
assign(
elementwise_add(input, elementwise_mul(tensor1, tensor2) * value),
out)
else:
out = elementwise_add(input, elementwise_mul(tensor1, tensor2) * value)
return out
def bmm(x, y, name=None):
"""
Applies batched matrix multiplication to two tensors.
Both of the two input tensors must be three-dementional and share the same batch size.
if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor.
Args:
x (Variable): The input variable which is a Tensor or LoDTensor.
y (Variable): The input variable which is a Tensor or LoDTensor.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: The product Tensor (or LoDTensor) variable.
Examples:
import paddle
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10, 3, 4], dtype='float32')
y = fluid.layers.data(name='y', shape=[10, 4, 5], dtype='float32')
out = fluid.layers.bmm(x, y)
# In dygraph mode:
# size input1: (2, 2, 3) and input2: (2, 3, 2)
input1 = np.array([[[1.0, 1.0, 1.0],[2.0, 2.0, 2.0]],[[3.0, 3.0, 3.0],[4.0, 4.0, 4.0]]])
input2 = np.array([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(input1)
y = fluid.dygraph.to_variable(input2)
out = fluid.layers.bmm(x, y)
#output size: (2, 2, 2)
#output value:
#[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]]
out_np = out.numpy()
"""
helper = LayerHelper('bmm', **locals())
if in_dygraph_mode():
return core.ops.bmm(x, y)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out
def nonzero(input, as_tuple=False):
"""
Return a tensor containing the indices of all non-zero elements of the `input`
tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension
in `input`, each containing the indices (in that dimension) of all non-zero elements
of `input`. Given a n-Dimensional `input` tensor with shape [x_1, x_2, ..., x_n], If
as_tuple is False, we can get a output tensor with shape [z, n], where `z` is the
number of all non-zero elements in the `input` tensor. If as_tuple is True, we can get
a 1-D tensor tuple of length `n`, and the shape of each 1-D tensor is [z, 1].
Args:
inputs (Variable): The input tensor variable.
as_tuple (bool): Return type, Tensor or tuple of Tensor.
Returns:
Variable. The data type is int64.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
data1 = np.array([[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]])
data2 = np.array([0.0, 1.0, 0.0, 3.0])
data3 = np.array([0.0, 0.0, 0.0])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(data1)
x2 = fluid.dygraph.to_variable(data2)
x3 = fluid.dygraph.to_variable(data3)
out_z1 = fluid.layers.nonzero(x1)
print(out_z1.numpy())
#[[0 0]
# [1 1]
# [2 2]]
out_z1_tuple = fluid.layers.nonzero(x1, as_tuple=True)
for out in out_z1_tuple:
print(out.numpy())
#[[0]
# [1]
# [2]]
#[[0]
# [1]
# [2]]
out_z2 = fluid.layers.nonzero(x2)
print(out_z2.numpy())
#[[1]
# [3]]
out_z2_tuple = fluid.layers.nonzero(x2, as_tuple=True)
for out in out_z2_tuple:
print(out.numpy())
#[[1]
# [3]]
out_z3 = fluid.layers.nonzero(x3)
print(out_z3.numpy())
#[]
out_z3_tuple = fluid.layers.nonzero(x3, as_tuple=True)
for out in out_z3_tuple:
print(out.numpy())
#[]
"""
list_out = []
shape = input.shape
rank = len(shape)
if in_dygraph_mode():
outs = core.ops.where_index(input)
else:
outs = where(input)
if not as_tuple:
return outs
elif rank == 1:
return tuple([outs])
else:
for i in range(rank):
list_out.append(
slice(
outs, axes=[rank - 1], starts=[i], ends=[i + 1]))
return tuple(list_out)
def index_select(input, index, dim=0):
"""
Returns a new tensor which indexes the `input` tensor along dimension `dim` using
the entries in `index` which is a Tensor. The returned tensor has the same number
of dimensions as the original `input` tensor. The dim-th dimension has the same
size as the length of `index`; other dimensions have the same size as in the `input` tensor.
Args:
input (Variable): The input tensor variable.
index (Variable): The 1-D tensor containing the indices to index.
dim (int): The dimension in which we index.
Returns:
Variable: A Tensor with same data type as `input`.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
data = np.array([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]])
data_index = np.array([0, 1, 1]).astype('int32')
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(data)
index = fluid.dygraph.to_variable(data_index)
out_z1 = fluid.layers.index_select(x, index)
print(out_z1.numpy())
#[[1. 2. 3. 4.]
# [5. 6. 7. 8.]
# [5. 6. 7. 8.]]
out_z2 = fluid.layers.index_select(x, index, dim=1)
print(out_z2.numpy())
#[[ 1. 2. 2.]
# [ 5. 6. 6.]
# [ 9. 10. 10.]]
"""
helper = LayerHelper("index_select", **locals())
if in_dygraph_mode():
return core.ops.index_select(input, index, 'dim', dim)
check_variable_and_dtype(
input, 'x', ['float32', 'float64', 'int32', 'int64'], 'index_select')
check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'index_select')
out = helper.create_variable_for_type_inference(input.dtype)
helper.append_op(
type='index_select',
inputs={'X': input,
'Index': index},
outputs={'Out': out},
attrs={'dim': dim})
return out
def dist(x, y, p=2):
"""
This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure
of distance. The shapes of x and y must be broadcastable.
Where, z = x - y,
When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z.
.. math::
||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p}
When p = inf, the inf-norm of z is the maximum element of z.
.. math::
||z||_\infty=\max_i |z_i|
When p = -inf, the negative-inf-norm of z is the minimum element of z.
.. math::
||z||_{-\infty}=\min_i |z_i|
Otherwise, the p-norm of z follows the formula,
.. math::
||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}}
Args:
x (Variable): 1-D to 6-D Tensor, its data type is float32 or float64.
y (Variable): 1-D to 6-D Tensor, its data type is float32 or float64.
p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2.
Returns:
Variable: Tensor that is the p-norm of (x - y).
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.array([[3, 3],[3, 3]]).astype(np.float32))
y = fluid.dygraph.to_variable(np.array([[3, 3],[3, 1]]).astype(np.float32))
out = fluid.layers.dist(x, y, 0)
print(out.numpy()) # out = [1.]
out = fluid.layers.dist(x, y, 2)
print(out.numpy()) # out = [2.]
out = fluid.layers.dist(x, y, float("inf"))
print(out.numpy()) # out = [2.]
out = fluid.layers.dist(x, y, float("-inf"))
print(out.numpy()) # out = [0.]
"""
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist')
check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist')
check_type(p, 'p', (float, int), 'dist')
helper = LayerHelper("dist", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
inputs = {"X": [x], "Y": [y]}
outputs = {'Out': [out]}
attrs = {"p": float(p)}
helper.append_op(
type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def dot(x, y, name=None):
"""
This operator calculates inner product for vectors.
.. note::
Only support 1-d Tensor(vector).
Parameters:
x(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype should be ``float32``, ``float64``, ``int32``, ``int64``
y(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype soulde be ``float32``, ``float64``, ``int32``, ``int64``
name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`
Returns:
Variable: the calculated result Tensor/LoDTensor.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.random.uniform(0.1, 1, [10]).astype(np.float32))
y = fluid.dygraph.to_variable(np.random.uniform(1, 3, [10]).astype(np.float32))
z = fluid.layers.dot(x, y)
print(z.numpy())
"""
op_type = 'dot'
# skip var type check in dygraph mode to improve efficiency
if in_dygraph_mode():
op = getattr(core.ops, op_type)
return op(x, y)
assert x is not None, 'x cannot be None in {}'.format(op_type)
assert y is not None, 'y cannot be None in {}'.format(op_type)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
op_type)
check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'],
op_type)
helper = LayerHelper(op_type, **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="dot", inputs={'X': x,
'Y': y}, attrs={}, outputs={"Out": out})
return out
def t(input, name=None):
"""
Transpose <=2-D tensor.
0-D and 1-D tensors are returned as it is and 2-D tensor is equal to
the fluid.layers.transpose function which perm dimensions set 0 and 1.
Args:
input (Variable): The input Tensor. It is a N-D (N<=2) Tensor of data types float32, float64, int32.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: A transposed n-D Tensor, with data type being float32, float64, int32, int64.
For Example:
.. code-block:: text
# Example 1 (0-D tensor)
x = tensor([0.79])
fluid.layers.t(x) = tensor([0.79])
# Example 2 (1-D tensor)
x = tensor([0.79, 0.84, 0.32])
fluid.layers.t(x) = tensor([0.79, 0.84, 0.32])
# Example 3 (2-D tensor)
x = tensor([0.79, 0.84, 0.32],
[0.64, 0.14, 0.57])
fluid.layers.t(x) = tensor([0.79, 0.64],
[0.84, 0.14],
[0.32, 0.57])
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2, 3],
dtype='float32')
x_transposed = fluid.layers.t(x)
print x_transposed.shape
#(3L, 2L)
"""
if len(input.shape) > 2:
raise ValueError(
"Input(input) only support N-D (N<=2) tensor, but received "
"length of Input(input) is %s. Perhaps you can use paddle."
"tensor.transpose() instead." % len(input.shape))
if in_dygraph_mode():
if len(input.shape) == 1:
return input
# 2-D tensor
perm = [1, 0]
out, _ = core.ops.transpose2(input, 'axis', perm)
return out
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'transpose')
helper = LayerHelper('t', **locals())
out = helper.create_variable_for_type_inference(input.dtype)
input_shape = helper.create_variable_for_type_inference(input.dtype)
if len(input.shape) == 1:
out = input
else:
helper.append_op(
type='transpose2',
inputs={'X': [input]},
outputs={'Out': [out],
'XShape': [input_shape]},
attrs={'axis': [1, 0]})
return out
def cross(input, other, dim=None):
"""
Returns the cross product of vectors in dimension `dim` of the `input` and `other` tensor.
Inputs must have the same shape, and the size of their dim-th dimension should be equla to 3.
If `dim` is not given, it defaults to the first dimension found with the size 3.
Args:
input (Variable): The first input tensor variable.
other (Variable): The second input tensor variable.
dim (int): The dimension to take the cross-product in.
Returns:
Variable: A Tensor with same data type as `input`.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
data_x = np.array([[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
[3.0, 3.0, 3.0]])
data_y = np.array([[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(data_x)
y = fluid.dygraph.to_variable(data_y)
out_z1 = fluid.layers.cross(x, y)
print(out_z1.numpy())
#[[-1. -1. -1.]
# [ 2. 2. 2.]
# [-1. -1. -1.]]
out_z2 = fluid.layers.cross(x, y, dim=1)
print(out_z2.numpy())
#[[0. 0. 0.]
# [0. 0. 0.]
# [0. 0. 0.]]
"""
helper = LayerHelper("cross", **locals())
if in_dygraph_mode():
if dim:
return core.ops.cross(input, other, 'dim', dim)
else:
return core.ops.cross(input, other)
out = helper.create_variable_for_type_inference(input.dtype)
attrs = dict()
if dim:
attrs['dim'] = dim
helper.append_op(
type='cross',
inputs={'X': input,
'Y': other},
outputs={'Out': out},
attrs=attrs)
return out
def interpolate(input,
out_shape=None,
scale=None,
name=None,
resample='BILINEAR',
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCHW'):
"""
This op resizes a batch of images.
The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w)
or (num_batches, in_h, in_w, channels), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
and the resizing only applies on the three dimensions(depth, height and width).
**Warning:** the parameter :attr:`actual_shape` will be deprecated in the
future and only use :attr:`out_shape` instead.
Supporting resample methods:
'BILINEAR' : Bilinear interpolation
'TRILINEAR' : Trilinear interpolation
'NEAREST' : Nearest neighbor interpolation
'BICUBIC' : Bicubic interpolation
Nearest neighbor interpolation is to perform nearest neighbor interpolation
in both the 3rd dimension(in height direction) and the 4th dimension(in width
direction) on input tensor.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
Align_corners and align_mode are optional parameters,the calculation method
of interpolation can be selected by them.
Bicubic interpolation is an extension of cubic interpolation for interpolating
data points on a two-dimensional regular grid. The interpolated surface is
smoother than corresponding surfaces obtained by bilinear interpolation or
nearest-neighbor interpolation.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Nearest neighbor interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor (H_{in} * scale_{factor})
W_out = floor (W_{in} * scale_{factor})
else:
align_corners = True
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = round(H_{in} * scale_{factor})
W_out = round(W_{in} * scale_{factor})
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Bicubic interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Trilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation.
For details of bicubic interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bicubic_interpolation
Parameters:
input (Variable): 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of image resize
layer, the shape is (out_h, out_w) when input is a 4-D Tensor and is
(out_d, out_h, out_w) when input is a 5-D Tensor. Default: None. If
a list, each element can be an integer or a Tensor Variable of shape: [1].
If a Tensor Variable, its dimensions size should be a 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
resample(str): The resample method. It supports 'BILINEAR', 'TRILINEAR' ,
'BICUBIC' and 'NEAREST' currently. Default: 'BILINEAR'
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the
corner pixels.
Default: True
align_mode(int) : An optional for bilinear interpolation. can be \'0\'
for src_idx = scale*(dst_indx+0.5)-0.5 , can be \'1\' for
src_idx = scale*dst_index.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`, `"NCDHW"`,
`"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
Raises:
TypeError: out_shape should be a list or tuple or Variable.
TypeError: actual_shape should either be Variable or None.
ValueError: The 'resample' of image_resize can only be 'BILINEAR',
'TRILINEAR', 'BICUBIC', or 'NEAREST' currently.
ValueError: 'BILINEAR', 'BICUBIC' and 'NEAREST' only support 4-D tensor.
ValueError: 'TRILINEAR' only support 5-D tensor.
ValueError: One of out_shape and scale must not be None.
ValueError: out_shape length should be 2 for input 4-D tensor.
ValueError: out_shape length should be 3 for input 5-D tensor.
ValueError: scale should be greater than zero.
TypeError: align_corners should be a bool value
ValueError: align_mode can only be '0' or '1'
ValueError: data_format can only be 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
Examples:
.. code-block:: python
#declarative mode
import paddle
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = fluid.layers.interpolate(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = fluid.layers.interpolate(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = pfluid.layers.interpolate(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = fluid.layers.interpolate(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
import paddle.fluid as fluid
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = fluid.layers.interpolate(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
resample_methods = {
'BILINEAR': 'bilinear',
'TRILINEAR': 'trilinear',
'NEAREST': 'nearest',
'BICUBIC': 'bicubic',
}
if resample not in resample_methods:
raise ValueError(
"The 'resample' of image_resize can only be 'BILINEAR', 'TRILINEAR', "
" 'BICUBIC' or 'NEAREST' currently.")
resample_type = resample_methods[resample]
if resample in ['BILINEAR', 'NEAREST', 'BICUBIC'] and len(input.shape) != 4:
raise ValueError(
"'BILINEAR', 'BICUBIC' and 'NEAREST' only support 4-D tensor.")
if resample == 'TRILINEAR' and len(input.shape) != 5:
raise ValueError("'TRILINEAR'only support 5-D tensor.")
if not isinstance(align_corners, bool):
raise TypeError("Attr align_corners should be a bool value")
if align_mode != 0 and align_mode != 1:
raise ValueError("align_mode can only be 0 or 1")
if out_shape is None and scale is None:
raise ValueError("One of out_shape and scale must not be None.")
helper = LayerHelper('{}_interp'.format(resample_type), **locals())
dtype = helper.input_dtype()
if len(input.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCHW` or `NHWC` supported for 4-D input.")
elif len(input.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCDHW` or `NDHWC` supported for 5-D input.")
def _is_list_or_turple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if data_format == 'NCHW' or data_format == 'NCDHW':
data_layout = 'NCHW'
if data_format == 'NHWC' or data_format == 'NDHWC':
data_layout = 'NHWC'
inputs = {"X": input}
attrs = {
"out_d": -1,
"out_h": -1,
"out_w": -1,
"interp_method": resample_type,
"align_corners": align_corners,
"align_mode": align_mode,
"data_layout": data_layout
}
if out_shape is not None:
if isinstance(out_shape, Variable):
out_shape.stop_gradient = True
inputs['OutSize'] = out_shape
else:
if not (_is_list_or_turple_(out_shape)):
raise TypeError(
"out_shape should be a list or tuple or Variable.")
# Validate the shape
contain_var = False
for dim_idx, dim_size in enumerate(out_shape):
if isinstance(dim_size, Variable):
contain_var = True
continue
assert dim_size > 0, (
"Each dimension size given in out_shape must be greater than 0."
)
if contain_var:
new_size_tensor = []
size_list = []
for dim in out_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_size_tensor.append(dim)
size_list.append(-1)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference(
'int32')
fill_constant(
[1], 'int32', dim, force_cpu=True, out=temp_out)
new_size_tensor.append(temp_out)
size_list.append(dim)
inputs['SizeTensor'] = new_size_tensor
if len(input.shape) == 4:
if len(out_shape) != 2:
raise ValueError("out_shape length should be 2 for "
"input 4-D tensor.")
if contain_var:
attrs['out_h'] = size_list[0]
attrs['out_w'] = size_list[1]
else:
out_shape = list(map(int, out_shape))
attrs['out_h'] = out_shape[0]
attrs['out_w'] = out_shape[1]
if len(input.shape) == 5:
if len(out_shape) != 3:
raise ValueError("out_shape length should be 3 for "
"input 5-D tensor.")
if contain_var:
attrs['out_d'] = size_list[0]
attrs['out_h'] = size_list[1]
attrs['out_w'] = size_list[2]
else:
out_shape = list(map(int, out_shape))
attrs['out_d'] = out_shape[0]
attrs['out_h'] = out_shape[1]
attrs['out_w'] = out_shape[2]
else:
if isinstance(scale, Variable):
scale.stop_gradient = True
inputs["Scale"] = scale
elif isinstance(scale, float) or isinstance(scale, int):
if scale <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
attrs['scale'] = float(scale)
else:
raise TypeError(
"Attr(scale)'s type should be float, int or Variable.")
if isinstance(actual_shape, Variable):
warnings.warn(
"actual_shape will be deprecated, it is recommended to use "
"out_shape instead of actual_shape to specify output shape dynamically."
)
actual_shape.stop_gradient = True
inputs["OutSize"] = actual_shape
elif actual_shape is not None:
raise TypeError("actual_shape should either be Variable or None.")
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='{}_interp'.format(resample_type),
inputs=inputs,
outputs={"Out": out},
attrs=attrs)
return out
def diag_embed(input, offset=0, dim1=-2, dim2=-1):
"""
This OP creates a tensor whose diagonals of certain 2D planes (specified by dim1 and dim2)
are filled by ``input``. By default, a 2D plane formed by the last two dimensions
of the returned tensor will be selected.
The argument ``offset`` determines which diagonal is generated:
- If offset = 0, it is the main diagonal.
- If offset > 0, it is above the main diagonal.
- If offset < 0, it is below the main diagonal.
Args:
input(Variable|numpy.ndarray): The input tensor. Must be at least 1-dimensional. The input data type should be float32, float64, int32, int64.
offset(int, optional): Which diagonal to consider. Default: 0 (main diagonal).
dim1(int, optional): The first dimension with respect to which to take diagonal. Default: -2.
dim2(int, optional): The second dimension with respect to which to take diagonal. Default: -1.
Returns:
Variable, the output data type is the same as input data type.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.dygraph as dg
import numpy as np
diag_embed = np.random.randn(2, 3).astype('float32')
# [[ 0.7545889 , -0.25074545, 0.5929117 ],
# [-0.6097662 , -0.01753256, 0.619769 ]]
with dg.guard():
data1 = fluid.layers.diag_embed(diag_embed)
data1.numpy()
# [[[ 0.7545889 , 0. , 0. ],
# [ 0. , -0.25074545, 0. ],
# [ 0. , 0. , 0.5929117 ]],
# [[-0.6097662 , 0. , 0. ],
# [ 0. , -0.01753256, 0. ],
# [ 0. , 0. , 0.619769 ]]]
data2 = fluid.layers.diag_embed(diag_embed, offset=-1, dim1=0, dim2=2)
data2.numpy()
# [[[ 0. , 0. , 0. , 0. ],
# [ 0.7545889 , 0. , 0. , 0. ],
# [ 0. , -0.25074545, 0. , 0. ],
# [ 0. , 0. , 0.5929117 , 0. ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [-0.6097662 , 0. , 0. , 0. ],
# [ 0. , -0.01753256, 0. , 0. ],
# [ 0. , 0. , 0.619769 , 0. ]]]
data3 = fluid.layers.diag_embed(diag_embed, offset=1, dim1=0, dim2=2)
data3.numpy()
# [[[ 0. , 0.7545889 , 0. , 0. ],
# [ 0. , -0.6097662 , 0. , 0. ]],
#
# [[ 0. , 0. , -0.25074545, 0. ],
# [ 0. , 0. , -0.01753256, 0. ]],
#
# [[ 0. , 0. , 0. , 0.5929117 ],
# [ 0. , 0. , 0. , 0.619769 ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [ 0. , 0. , 0. , 0. ]]]
"""
inputs = {'Input': [input]}
attrs = {'offset': offset, 'dim1': dim1, 'dim2': dim2}
if not isinstance(input, Variable):
input = assign(input)
def __check_input(input, offset, dim1, dim2):
check_dtype(input.dtype, 'Input',
['int32', 'int64', 'float16', 'float32', 'float64'],
'diag_embed')
input_shape = list(input.shape)
assert len(input_shape) >= 1, \
"Input must be at least 1-dimensional, " \
"But received Input's dimensional: %s.\n" % \
len(input_shape)
assert np.abs(dim1) <= len(input_shape), \
"Dim1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim1)
assert np.abs(dim2) <= len(input_shape), \
"Dim2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim2)
dim1_ = dim1 if dim1 >= 0 else len(input_shape) + dim1 + 1
dim2_ = dim2 if dim2 >= 0 else len(input_shape) + dim2 + 1
assert dim1_ != dim2_, \
"dim1 and dim2 cannot be the same dimension." \
"But received dim1 = %d, dim2 = %d\n"%(dim1, dim2)
if not in_dygraph_mode():
__check_input(input, offset, dim1, dim2)
helper = LayerHelper("diag_embed", **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='diag_embed',
inputs={'Input': [input]},
attrs={'offset': offset,
'dim1': dim1,
'dim2': dim2},
outputs={'Out': [out]})
out.stop_gradient = True
return out
def meshgrid(input, name=None):
"""
This op takes a list of N tensors as input, each of which is 1-dimensional
vector, and creates N-dimensional grids.
Args:
input(Variable) : tensors (list of tensor): the shapes of input k tensors are (N1,),
(N2,),..., (Nk,). Support data types: ``float64``, ``float32``, ``int32``, ``int64``.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: k tensors. The shape of each tensor is (N1, N2, ..., Nk)
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
x = fluid.data(name='x', shape=[100], dtype='int32')
y = fluid.data(name='y', shape=[200], dtype='int32')
input_1 = np.random.randint(0, 100, [100, ]).astype('int32')
input_2 = np.random.randint(0, 100, [200, ]).astype('int32')
exe = fluid.Executor(place=fluid.CPUPlace())
grid_x, grid_y = fluid.layers.meshgrid([x, y])
res_1, res_2 = exe.run(fluid.default_main_program(),
feed={'x': input_1,
'y': input_2},
fetch_list=[grid_x, grid_y])
#the shape of res_1 is (100, 200)
#the shape of res_2 is (100, 200)
.. code-block:: python
#example 2: in dygraph mode
import paddle
import paddle.fluid as fluid
import numpy as np
input_3 = np.random.randint(0, 100, [100, ]).astype('int32')
input_4 = np.random.randint(0, 100, [200, ]).astype('int32')
with fluid.dygraph.guard():
tensor_3 = fluid.dygraph.to_variable(input_3)
tensor_4 = fluid.dygraph.to_variable(input_4)
grid_x, grid_y = fluid.layers.meshgrid([tensor_3, tensor_4])
#the shape of grid_x is (100, 200)
#the shape of grid_y is (100, 200)
"""
if in_dygraph_mode():
num = len(input)
out = core.ops.meshgrid(input, num)
return out
helper = LayerHelper('meshgrid', **locals())
if not isinstance(input, list):
raise TypeError("The type of input in meshgrid should be list.")
for id, input_ in enumerate(input):
check_dtype(input_.dtype, 'create data type',
['float16', 'float32', 'float64', 'int32', 'int64'],
'meshgrid')
num = len(input)
out = [
helper.create_variable_for_type_inference(dtype=input[i].dtype)
for i in range(num)
]
helper.append_op(type='meshgrid', inputs={'X': input}, outputs={'Out': out})
return out
def fc(input, def fc(input,
size, size,
num_flatten_dims=1, num_flatten_dims=1,
...@@ -4857,7 +6346,65 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): ...@@ -4857,7 +6346,65 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32') y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32')
out = fluid.layers.matmul(x, y, True, True) out = fluid.layers.matmul(x, y, True, True)
""" """
return paddle.matmul(x, y, transpose_x, transpose_y, alpha, name) attrs = {
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': float(alpha),
}
if in_dygraph_mode():
out = _varbase_creator(dtype=x.dtype)
core.ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y',
transpose_y, 'alpha', float(alpha))
return out
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(
val, name, ['float16', 'float32', 'float64'], 'matmul')
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
x_shape = [1] + x_shape
if len(y_shape) == 1:
y_shape = y_shape + [1]
# check the inner 2 dimensions
if transpose_x:
x_shape[-2], x_shape[-1] = x_shape[-1], x_shape[-2]
if transpose_y:
y_shape[-2], y_shape[-1] = y_shape[-1], y_shape[-2]
if x_shape[-1] != y_shape[-2]:
assert (x_shape[-1] == -1) or (y_shape[-2] == -1), \
"After performing an optional transpose, Input X's width should be " \
"equal to Y's width for multiplication " \
"prerequisites. But received X's shape: %s, Y's shape: %s\n" % \
(x_shape, y_shape)
if len(y_shape) > 2 and len(x_shape) > 2:
for i, dim_x in enumerate(x_shape[:-2]):
# don't check neg shape
if dim_x < 0 or y_shape[i] < 0:
continue
if dim_x != y_shape[i]:
raise ValueError(
"When the matrix is larger than 2 dimensions, the higher "
"dimensional values of the two matrices need to be equal. "
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.\n" % (i, i, x_shape, y_shape))
__check_input(x, y)
helper = LayerHelper('matmul', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matmul',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
def topk(input, k, name=None): def topk(input, k, name=None):
...@@ -10085,7 +11632,26 @@ def sum(x): ...@@ -10085,7 +11632,26 @@ def sum(x):
# and '__int64' on Windows. They both represent 64-bit integer variables. # and '__int64' on Windows. They both represent 64-bit integer variables.
""" """
return paddle.elementwise_sum(x) helper = LayerHelper('sum', **locals())
check_type(x, 'x', (Variable, tuple, list), 'sum')
if isinstance(x, list) or isinstance(x, tuple):
if len(x) > 0:
for input in x:
check_variable_and_dtype(
input, "x", ['float32', 'float64', 'int32', 'int64'], 'sum')
else:
check_variable_and_dtype(
x, "x", ['float32', 'float64', 'int32', 'int64'], 'sum')
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('x'))
helper.append_op(
type='sum',
inputs={'X': x},
outputs={'Out': out},
attrs={'use_mkldnn': False})
return out
@templatedoc() @templatedoc()
......
...@@ -73,7 +73,7 @@ class TestParameter(object): ...@@ -73,7 +73,7 @@ class TestParameter(object):
def test_out(self): def test_out(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
data = fluid.layers.data(name="X", shape=[1]) data = fluid.layers.data(name="X", shape=[1])
out = eval("paddle.%s(data, out=data)" % self.op_type) out = eval("fluid.layers.%s(data, out=data)" % self.op_type)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
result = exe.run(feed={"X": np.array([0.1])}, result = exe.run(feed={"X": np.array([0.1])},
...@@ -83,7 +83,8 @@ class TestParameter(object): ...@@ -83,7 +83,8 @@ class TestParameter(object):
def test_out_name(self): def test_out_name(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
data = fluid.layers.data(name="X", shape=[1]) data = fluid.layers.data(name="X", shape=[1])
out = eval("paddle.%s(data, name='Y', out=data)" % self.op_type) out = eval("fluid.layers.%s(data, name='Y', out=data)" %
self.op_type)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
result = exe.run(feed={"X": np.array([0.1])}, result = exe.run(feed={"X": np.array([0.1])},
...@@ -94,7 +95,7 @@ class TestParameter(object): ...@@ -94,7 +95,7 @@ class TestParameter(object):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
np_x = np.array([0.1]) np_x = np.array([0.1])
x = fluid.dygraph.to_variable(np_x) x = fluid.dygraph.to_variable(np_x)
z = eval("paddle.%s(x).numpy()" % self.op_type) z = eval("fluid.layers.%s(x).numpy()" % self.op_type)
z_expected = eval("np.%s(np_x)" % self.op_type) z_expected = eval("np.%s(np_x)" % self.op_type)
self.assertEqual(z, z_expected) self.assertEqual(z, z_expected)
...@@ -136,7 +137,7 @@ class TestLogSigmoid(TestActivation): ...@@ -136,7 +137,7 @@ class TestLogSigmoid(TestActivation):
self.check_grad(['X'], 'Out', max_relative_error=0.008) self.check_grad(['X'], 'Out', max_relative_error=0.008)
class TestTanh(TestActivation, TestParameter): class TestTanh(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "tanh" self.op_type = "tanh"
self.init_dtype() self.init_dtype()
...@@ -152,37 +153,12 @@ class TestTanh(TestActivation, TestParameter): ...@@ -152,37 +153,12 @@ class TestTanh(TestActivation, TestParameter):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
def init_dtype(self): def init_dtype(self):
#TODO If dtype is float64, the output (Out) has diff at CPUPlace #TODO If dtype is float64, the output (Out) has diff at CPUPlace
# when using and not using inplace. Therefore, set dtype as float32 # when using and not using inplace. Therefore, set dtype as float32
# for now. # for now.
self.dtype = np.float32 self.dtype = np.float32
class TestAtan(TestActivation, TestParameter):
def setUp(self):
self.op_type = "atan"
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.arctan(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out')
def test_dygraph(self):
with fluid.dygraph.guard():
np_x = np.array([0.1])
x = fluid.dygraph.to_variable(np_x)
z = paddle.atan(x).numpy()
z_expected = np.arctan(np_x)
self.assertEqual(z, z_expected)
class TestTanhShrink(TestActivation): class TestTanhShrink(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "tanh_shrink" self.op_type = "tanh_shrink"
...@@ -267,7 +243,7 @@ class TestSoftShrinkOpError(unittest.TestCase): ...@@ -267,7 +243,7 @@ class TestSoftShrinkOpError(unittest.TestCase):
fluid.layers.softshrink(x_fp16) fluid.layers.softshrink(x_fp16)
class TestSqrt(TestActivation, TestParameter): class TestSqrt(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "sqrt" self.op_type = "sqrt"
self.init_dtype() self.init_dtype()
...@@ -391,7 +367,7 @@ class TestAcos(TestActivation): ...@@ -391,7 +367,7 @@ class TestAcos(TestActivation):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
class TestSin(TestActivation, TestParameter): class TestSin(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "sin" self.op_type = "sin"
self.init_dtype() self.init_dtype()
...@@ -805,8 +781,8 @@ class TestLog1p(TestActivation): ...@@ -805,8 +781,8 @@ class TestLog1p(TestActivation):
append_batch_size=False, append_batch_size=False,
dtype="float64") dtype="float64")
out1 = paddle.log1p(data_x) out1 = fluid.layers.log1p(data_x)
out2 = paddle.log1p(data_x, out=res_log1p) out2 = fluid.layers.log1p(data_x, out=res_log1p)
exe = fluid.Executor(place=fluid.CPUPlace()) exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
res1, res_in = exe.run(fluid.default_main_program(), res1, res_in = exe.run(fluid.default_main_program(),
...@@ -820,7 +796,7 @@ class TestLog1p(TestActivation): ...@@ -820,7 +796,7 @@ class TestLog1p(TestActivation):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64") np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
data_x = fluid.dygraph.to_variable(np_x) data_x = fluid.dygraph.to_variable(np_x)
z = paddle.log1p(data_x) z = fluid.layers.log1p(data_x)
np_z = z.numpy() np_z = z.numpy()
z_expected = np.array(np.log1p(np_x)) z_expected = np.array(np.log1p(np_x))
np.testing.assert_allclose(np_z, z_expected) np.testing.assert_allclose(np_z, z_expected)
...@@ -899,22 +875,14 @@ class TestPow_factor_tensor(TestActivation): ...@@ -899,22 +875,14 @@ class TestPow_factor_tensor(TestActivation):
factor_2 = fluid.layers.fill_constant([1], "float32", 3.0) factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
out_1 = fluid.layers.pow(x, factor=factor_1) out_1 = fluid.layers.pow(x, factor=factor_1)
out_2 = fluid.layers.pow(x, factor=factor_2) out_2 = fluid.layers.pow(x, factor=factor_2)
out_3 = paddle.pow(x, factor_1, out=res)
out_4 = paddle.pow(x, factor_1, name='pow_res')
out_5 = paddle.pow(x, factor_1, out=res, name='pow_res')
out_6 = paddle.pow(x, factor_2)
self.assertEqual(('pow_res' in out_4.name), True)
exe = fluid.Executor(place=fluid.CPUPlace()) exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2, res_3, res, res_6 = exe.run( res_1, res_2 = exe.run(fluid.default_main_program(),
fluid.default_main_program(), feed={"x": input},
feed={"x": input}, fetch_list=[out_1, out_2])
fetch_list=[out_1, out_2, out_3, res, out_6])
assert np.array_equal(res_1, np.power(input, 2)) assert np.array_equal(res_1, np.power(input, 2))
assert np.array_equal(res_2, np.power(input, 3)) assert np.array_equal(res_2, np.power(input, 3))
assert np.array_equal(res_3, res)
assert np.array_equal(res_6, np.power(input, 3))
def test_error(self): def test_error(self):
in1 = fluid.layers.data( in1 = fluid.layers.data(
...@@ -1214,7 +1182,6 @@ create_test_act_fp16_class(TestCos, grad_atol=0.85) ...@@ -1214,7 +1182,6 @@ create_test_act_fp16_class(TestCos, grad_atol=0.85)
create_test_act_fp16_class(TestAcos, grad_atol=0.85) create_test_act_fp16_class(TestAcos, grad_atol=0.85)
create_test_act_fp16_class(TestSin) create_test_act_fp16_class(TestSin)
create_test_act_fp16_class(TestAsin) create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
create_test_act_fp16_class(TestRound, grad_check=False) create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu) create_test_act_fp16_class(TestRelu)
create_test_act_fp16_class(TestGelu) create_test_act_fp16_class(TestGelu)
......
...@@ -37,7 +37,7 @@ class TestAddcmulLayer(unittest.TestCase): ...@@ -37,7 +37,7 @@ class TestAddcmulLayer(unittest.TestCase):
tensor1 = fluid.data(name="tensor1", dtype=self._dtype, shape=[100]) tensor1 = fluid.data(name="tensor1", dtype=self._dtype, shape=[100])
tensor2 = fluid.data( tensor2 = fluid.data(
name="tensor2", dtype=self._dtype, shape=[3, 100]) name="tensor2", dtype=self._dtype, shape=[3, 100])
out = paddle.addcmul(input, tensor1, tensor2, value) out = fluid.layers.addcmul(input, tensor1, tensor2, value)
exe = fluid.Executor(self._place) exe = fluid.Executor(self._place)
return exe.run(feed={ return exe.run(feed={
...@@ -53,7 +53,7 @@ class TestAddcmulLayer(unittest.TestCase): ...@@ -53,7 +53,7 @@ class TestAddcmulLayer(unittest.TestCase):
input = fluid.dygraph.to_variable(self.input) input = fluid.dygraph.to_variable(self.input)
tensor1 = fluid.dygraph.to_variable(self.tensor1) tensor1 = fluid.dygraph.to_variable(self.tensor1)
tensor2 = fluid.dygraph.to_variable(self.tensor2) tensor2 = fluid.dygraph.to_variable(self.tensor2)
out = paddle.addcmul(input, tensor1, tensor2, value) out = fluid.layers.addcmul(input, tensor1, tensor2, value)
return out.numpy() return out.numpy()
def numpy(self, value=1.0): def numpy(self, value=1.0):
...@@ -85,7 +85,7 @@ class TestAddcmul(unittest.TestCase): ...@@ -85,7 +85,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=data_shape, dtype='float32') tensor1 = fluid.data(name='t1', shape=data_shape, dtype='float32')
tensor2 = fluid.data(name='t2', shape=data_shape, dtype='float32') tensor2 = fluid.data(name='t2', shape=data_shape, dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2) out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape) self.assertEqual(out.shape, input.shape)
def test_addcmul_with_broadcast0(self): def test_addcmul_with_broadcast0(self):
...@@ -95,7 +95,7 @@ class TestAddcmul(unittest.TestCase): ...@@ -95,7 +95,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=[3, 100], dtype='float32') tensor1 = fluid.data(name='t1', shape=[3, 100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') tensor2 = fluid.data(name='t2', shape=[100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2) out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape) self.assertEqual(out.shape, input.shape)
def test_addcmul_with_broadcast1(self): def test_addcmul_with_broadcast1(self):
...@@ -105,7 +105,7 @@ class TestAddcmul(unittest.TestCase): ...@@ -105,7 +105,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=[100], dtype='float32') tensor1 = fluid.data(name='t1', shape=[100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[4, 100], dtype='float32') tensor2 = fluid.data(name='t2', shape=[4, 100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2) out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape) self.assertEqual(out.shape, input.shape)
def test_addcmul_with_broadcast2(self): def test_addcmul_with_broadcast2(self):
...@@ -115,7 +115,7 @@ class TestAddcmul(unittest.TestCase): ...@@ -115,7 +115,7 @@ class TestAddcmul(unittest.TestCase):
tensor1 = fluid.data(name='t1', shape=[100], dtype='float32') tensor1 = fluid.data(name='t1', shape=[100], dtype='float32')
tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') tensor2 = fluid.data(name='t2', shape=[100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2) out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertEqual(out.shape, input.shape) self.assertEqual(out.shape, input.shape)
def test_addcmul_has_out(self): def test_addcmul_has_out(self):
...@@ -126,7 +126,7 @@ class TestAddcmul(unittest.TestCase): ...@@ -126,7 +126,7 @@ class TestAddcmul(unittest.TestCase):
tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') tensor2 = fluid.data(name='t2', shape=[100], dtype='float32')
out = fluid.data(name='out', shape=[4, 100], dtype='float32') out = fluid.data(name='out', shape=[4, 100], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2, out=out) out = fluid.layers.addcmul(input, tensor1, tensor2, out=out)
self.assertEqual(out.shape, input.shape) self.assertEqual(out.shape, input.shape)
...@@ -140,7 +140,7 @@ class InvalidInputTest(unittest.TestCase): ...@@ -140,7 +140,7 @@ class InvalidInputTest(unittest.TestCase):
name='tensor1', shape=[20, 20], dtype='float32') name='tensor1', shape=[20, 20], dtype='float32')
tensor2 = fluid.data( tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='float32') name='tensor2', shape=[20, 20], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2) out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertRaises(TypeError, test_invalid_input) self.assertRaises(TypeError, test_invalid_input)
...@@ -152,7 +152,7 @@ class InvalidInputTest(unittest.TestCase): ...@@ -152,7 +152,7 @@ class InvalidInputTest(unittest.TestCase):
tensor1 = [20, 20] tensor1 = [20, 20]
tensor2 = fluid.data( tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='float32') name='tensor2', shape=[20, 20], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2) out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertRaises(TypeError, test_invalid_tensor1) self.assertRaises(TypeError, test_invalid_tensor1)
...@@ -164,7 +164,7 @@ class InvalidInputTest(unittest.TestCase): ...@@ -164,7 +164,7 @@ class InvalidInputTest(unittest.TestCase):
tensor1 = fluid.data( tensor1 = fluid.data(
name='tensor1', shape=[20, 20], dtype='float32') name='tensor1', shape=[20, 20], dtype='float32')
tensor2 = [20, 20] tensor2 = [20, 20]
out = paddle.addcmul(input, tensor1, tensor2) out = fluid.layers.addcmul(input, tensor1, tensor2)
self.assertRaises(TypeError, test_invalid_tensor2) self.assertRaises(TypeError, test_invalid_tensor2)
...@@ -177,7 +177,7 @@ class InvalidInputTest(unittest.TestCase): ...@@ -177,7 +177,7 @@ class InvalidInputTest(unittest.TestCase):
name='tensor1', shape=[20, 20], dtype='float32') name='tensor1', shape=[20, 20], dtype='float32')
tensor2 = fluid.data( tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='float32') name='tensor2', shape=[20, 20], dtype='float32')
out = paddle.addcmul(input, tensor1, tensor2, value=1) out = fluid.layers.addcmul(input, tensor1, tensor2, value=1)
self.assertRaises(TypeError, test_invalid_value_int) self.assertRaises(TypeError, test_invalid_value_int)
...@@ -189,7 +189,7 @@ class InvalidInputTest(unittest.TestCase): ...@@ -189,7 +189,7 @@ class InvalidInputTest(unittest.TestCase):
name='tensor1', shape=[20, 20], dtype='int32') name='tensor1', shape=[20, 20], dtype='int32')
tensor2 = fluid.data( tensor2 = fluid.data(
name='tensor2', shape=[20, 20], dtype='int32') name='tensor2', shape=[20, 20], dtype='int32')
out = paddle.addcmul(input, tensor1, tensor2, value=1.0) out = fluid.layers.addcmul(input, tensor1, tensor2, value=1.0)
self.assertRaises(TypeError, test_invalid_value_float) self.assertRaises(TypeError, test_invalid_value_float)
......
...@@ -69,12 +69,12 @@ class TestAddMMOpError(unittest.TestCase): ...@@ -69,12 +69,12 @@ class TestAddMMOpError(unittest.TestCase):
np.array([[-1]]), [[1]], fluid.CPUPlace()) np.array([[-1]]), [[1]], fluid.CPUPlace())
x2 = fluid.create_lod_tensor( x2 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace()) np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, paddle.addmm, input, x1, x2) self.assertRaises(TypeError, fluid.layers.addmm, input, x1, x2)
# The input dtype of mul_op must be float32 or float64. # The input dtype of mul_op must be float32 or float64.
input = fluid.layers.data(name='input', shape=[4], dtype="int32") input = fluid.layers.data(name='input', shape=[4], dtype="int32")
x3 = fluid.layers.data(name='x3', shape=[4], dtype="int32") x3 = fluid.layers.data(name='x3', shape=[4], dtype="int32")
x4 = fluid.layers.data(name='x4', shape=[4], dtype="int32") x4 = fluid.layers.data(name='x4', shape=[4], dtype="int32")
self.assertRaises(TypeError, paddle.addmm, input, x3, x4) self.assertRaises(TypeError, fluid.layers.addmm, input, x3, x4)
class TestAddMMOp2(TestAddMMOp): class TestAddMMOp2(TestAddMMOp):
...@@ -143,7 +143,7 @@ class TestAddMMOp4(unittest.TestCase): ...@@ -143,7 +143,7 @@ class TestAddMMOp4(unittest.TestCase):
input = fluid.dygraph.to_variable(np_input) input = fluid.dygraph.to_variable(np_input)
x = fluid.dygraph.to_variable(np_x) x = fluid.dygraph.to_variable(np_x)
y = fluid.dygraph.to_variable(np_y) y = fluid.dygraph.to_variable(np_y)
out = paddle.tensor.addmm(input, x, y) out = fluid.layers.addmm(input, x, y)
assert np.allclose(np_input + np.dot(np_x, np_y), out.numpy()) assert np.allclose(np_input + np.dot(np_x, np_y), out.numpy())
......
...@@ -201,107 +201,5 @@ class BaseTestComplex2_2(OpTest): ...@@ -201,107 +201,5 @@ class BaseTestComplex2_2(OpTest):
} }
class APT_ArgMaxTest(unittest.TestCase):
def test_output_result(self):
with fluid.program_guard(fluid.Program()):
data1 = fluid.data(name="X", shape=[3, 4], dtype="float32")
data2 = fluid.data(name="Y", shape=[3], dtype="int64")
out = paddle.argmax(input=data1, out=data2)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(
feed={"X": np.random.rand(3, 4).astype("float32")},
fetch_list=[data2, out])
self.assertEqual((result[0] == result[1]).all(), True)
def test_basic(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=[3, 4], dtype="float32")
out = paddle.argmax(input=data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
np_input = np.random.rand(3, 4).astype("float32")
expected_result = np.argmax(np_input, axis=1)
result, = exe.run(feed={"X": np_input}, fetch_list=[out])
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=[3, 4], dtype="float32")
out = paddle.argmax(input=data, axis=0)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
np_input = np.random.rand(3, 4).astype("float32")
expected_result = np.argmax(np_input, axis=0)
result = exe.run(feed={"X": np_input}, fetch_list=[out])
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=[3, 4], dtype="float32")
out = paddle.argmax(input=data, dtype="int32")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
np_input = np.random.rand(3, 4).astype("float32")
expected_result = np.argmax(np_input, axis=1).astype(np.int32)
result = exe.run(feed={"X": np_input}, fetch_list=[out])
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data1 = fluid.data(name="X", shape=[3, 4], dtype="float32")
data2 = fluid.data(name="Y", shape=[3], dtype="int64")
out = paddle.argmax(input=data, out=data2)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(
feed={"X": np.random.rand(3, 4).astype("float32")},
fetch_list=[data2, out])
self.assertEqual((result[0] == result[1]).all(), True)
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[100], dtype="float32")
y_1 = paddle.argmax(x, name='arg_max_res')
self.assertEqual(('arg_max_res' in y_1.name), True)
def test_errors(self):
def test_dtype1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.argmax(data, dtype="float32")
self.assertRaises(TypeError, test_dtype1)
def test_dtype2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float64")
paddle.argmax(data, dtype="float32")
self.assertRaises(TypeError, test_dtype2)
class TestArgMinMaxOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
def test_argmax_x_type():
x1 = [1, 2, 3]
output = fluid.layers.argmax(x=x1)
self.assertRaises(TypeError, test_argmax_x_type)
def test_argmin_x_type():
x2 = [1, 2, 3]
output = fluid.layers.argmin(x=x2)
self.assertRaises(TypeError, test_argmin_x_type)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -321,36 +321,6 @@ class TestArgsortOpDescendingAxisNeg2GPU(TestArgsortOpAxisNeg2GPU): ...@@ -321,36 +321,6 @@ class TestArgsortOpDescendingAxisNeg2GPU(TestArgsortOpAxisNeg2GPU):
self.descending = True self.descending = True
class TestSortOnCPU(TestArgsortOpCPU):
def init_place(self):
self.place = core.CPUPlace()
def test_out(self):
self.init_place()
with fluid.program_guard(fluid.Program()):
input = fluid.data(name="input", shape=[2, 3, 4], dtype="float32")
res = fluid.data(name="output", shape=[2, 3, 4], dtype="float32")
output = paddle.tensor.sort(input=input, out=res)
exe = fluid.Executor(self.place)
data = np.array(
[[[5, 8, 9, 5], [0, 0, 1, 7], [6, 9, 2, 4]],
[[5, 2, 4, 2], [4, 7, 7, 9], [1, 7, 0, 6]]],
dtype='float32')
result = exe.run(feed={'input': data}, fetch_list=[res, output[0]])
self.assertEqual((result[0] == result[1]).all(), True)
class TestSortOnGPU(TestSortOnCPU):
def init_place(self):
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()
class TestArgsortErrorOnCPU(unittest.TestCase): class TestArgsortErrorOnCPU(unittest.TestCase):
def init_place(self): def init_place(self):
self.place = core.CPUPlace() self.place = core.CPUPlace()
......
...@@ -21,7 +21,7 @@ import paddle.fluid.core as core ...@@ -21,7 +21,7 @@ import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle import paddle
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
from paddle.nn.functional import interpolate from paddle.fluid.layers import interpolate
def cubic_1(x, a): def cubic_1(x, a):
......
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.tensor as tensor
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
...@@ -46,7 +45,7 @@ class API_TestBmm(unittest.TestCase): ...@@ -46,7 +45,7 @@ class API_TestBmm(unittest.TestCase):
'data1', shape=[-1, 3, 4], dtype='float64') 'data1', shape=[-1, 3, 4], dtype='float64')
data2 = fluid.layers.data( data2 = fluid.layers.data(
'data2', shape=[-1, 4, 5], dtype='float64') 'data2', shape=[-1, 4, 5], dtype='float64')
result_bmm = paddle.bmm(data1, data2) result_bmm = fluid.layers.bmm(data1, data2)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
input1 = np.random.random([10, 3, 4]).astype('float64') input1 = np.random.random([10, 3, 4]).astype('float64')
...@@ -67,7 +66,7 @@ class API_TestDygraphBmm(unittest.TestCase): ...@@ -67,7 +66,7 @@ class API_TestDygraphBmm(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(input1) x = fluid.dygraph.to_variable(input1)
y = fluid.dygraph.to_variable(input2) y = fluid.dygraph.to_variable(input2)
out = paddle.bmm(x, y) out = fluid.layers.bmm(x, y)
out_np = out.numpy() out_np = out.numpy()
expected_result = np.matmul(input1, input2) expected_result = np.matmul(input1, input2)
self.assertTrue(np.allclose(expected_result, out_np)) self.assertTrue(np.allclose(expected_result, out_np))
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
from __future__ import print_function from __future__ import print_function
import paddle.tensor as tensor
import paddle.fluid as fluid import paddle.fluid as fluid
import numpy as np import numpy as np
import unittest import unittest
...@@ -31,12 +30,12 @@ class TestClampAPI(unittest.TestCase): ...@@ -31,12 +30,12 @@ class TestClampAPI(unittest.TestCase):
) else fluid.CPUPlace() ) else fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
out_1 = tensor.clamp(images, min=min, max=max) out_1 = fluid.layers.clamp(images, min=min, max=max)
out_2 = tensor.clamp(images, min=0.2, max=0.9) out_2 = fluid.layers.clamp(images, min=0.2, max=0.9)
out_3 = tensor.clamp(images, min=0.3) out_3 = fluid.layers.clamp(images, min=0.3)
out_4 = tensor.clamp(images, max=0.7) out_4 = fluid.layers.clamp(images, max=0.7)
out_5 = tensor.clamp(images, min=min) out_5 = fluid.layers.clamp(images, min=min)
out_6 = tensor.clamp(images, max=max) out_6 = fluid.layers.clamp(images, max=max)
res1, res2, res3, res4, res5, res6 = exe.run( res1, res2, res3, res4, res5, res6 = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
...@@ -59,8 +58,8 @@ class TestClampError(unittest.TestCase): ...@@ -59,8 +58,8 @@ class TestClampError(unittest.TestCase):
def test_errors(self): def test_errors(self):
x1 = fluid.layers.data(name='x1', shape=[1], dtype="int16") x1 = fluid.layers.data(name='x1', shape=[1], dtype="int16")
x2 = fluid.layers.data(name='x2', shape=[1], dtype="int8") x2 = fluid.layers.data(name='x2', shape=[1], dtype="int8")
self.assertRaises(TypeError, tensor.clamp, x=x1, min=0.2, max=0.8) self.assertRaises(TypeError, fluid.layers.clamp, x=x1, min=0.2, max=0.8)
self.assertRaises(TypeError, tensor.clamp, x=x2, min=0.2, max=0.8) self.assertRaises(TypeError, fluid.layers.clamp, x=x2, min=0.2, max=0.8)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -143,14 +143,5 @@ def np_broadcast_equal(_x, _y): ...@@ -143,14 +143,5 @@ def np_broadcast_equal(_x, _y):
for args in broadcast_args: for args in broadcast_args:
create_test_broadcast_class('equal_reduce', args, np_broadcast_equal) create_test_broadcast_class('equal_reduce', args, np_broadcast_equal)
class TestEqualReduceAPI(unittest.TestCase):
def test_name(self):
x = fluid.layers.assign(np.array([3, 4], dtype="int32"))
y = fluid.layers.assign(np.array([3, 4], dtype="int32"))
out = paddle.equal(x, y, name='equal_res')
assert 'equal_res' in out.name
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle import fluid, tensor from paddle import fluid
import paddle.complex as cpx import paddle.complex as cpx
import paddle.fluid.dygraph as dg import paddle.fluid.dygraph as dg
import numpy as np import numpy as np
......
...@@ -79,7 +79,7 @@ class TestCrossAPI(unittest.TestCase): ...@@ -79,7 +79,7 @@ class TestCrossAPI(unittest.TestCase):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1, 3]) x = fluid.layers.data(name='x', shape=[-1, 3])
y = fluid.layers.data(name='y', shape=[-1, 3]) y = fluid.layers.data(name='y', shape=[-1, 3])
z = paddle.cross(x, y, dim=1) z = fluid.layers.cross(x, y, dim=1)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(feed={'x': self.data_x, res, = exe.run(feed={'x': self.data_x,
'y': self.data_y}, 'y': self.data_y},
...@@ -93,7 +93,7 @@ class TestCrossAPI(unittest.TestCase): ...@@ -93,7 +93,7 @@ class TestCrossAPI(unittest.TestCase):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1, 3]) x = fluid.layers.data(name='x', shape=[-1, 3])
y = fluid.layers.data(name='y', shape=[-1, 3]) y = fluid.layers.data(name='y', shape=[-1, 3])
z = paddle.cross(x, y) z = fluid.layers.cross(x, y)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(feed={'x': self.data_x, res, = exe.run(feed={'x': self.data_x,
'y': self.data_y}, 'y': self.data_y},
...@@ -109,7 +109,7 @@ class TestCrossAPI(unittest.TestCase): ...@@ -109,7 +109,7 @@ class TestCrossAPI(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(self.data_x) x = fluid.dygraph.to_variable(self.data_x)
y = fluid.dygraph.to_variable(self.data_y) y = fluid.dygraph.to_variable(self.data_y)
z = paddle.cross(x, y) z = fluid.layers.cross(x, y)
np_z = z.numpy() np_z = z.numpy()
expect_out = np.array([[-1.0, -1.0, -1.0], [2.0, 2.0, 2.0], expect_out = np.array([[-1.0, -1.0, -1.0], [2.0, 2.0, 2.0],
[-1.0, -1.0, -1.0]]) [-1.0, -1.0, -1.0]])
...@@ -119,7 +119,7 @@ class TestCrossAPI(unittest.TestCase): ...@@ -119,7 +119,7 @@ class TestCrossAPI(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(self.data_x) x = fluid.dygraph.to_variable(self.data_x)
y = fluid.dygraph.to_variable(self.data_y) y = fluid.dygraph.to_variable(self.data_y)
z = paddle.cross(x, y, dim=1) z = fluid.layers.cross(x, y, dim=1)
np_z = z.numpy() np_z = z.numpy()
expect_out = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], expect_out = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]) [0.0, 0.0, 0.0]])
......
...@@ -17,7 +17,6 @@ from __future__ import print_function ...@@ -17,7 +17,6 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.nn.functional as F
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.dygraph as dg import paddle.fluid.dygraph as dg
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -52,8 +51,8 @@ class TestDiagEmbedAPICase(unittest.TestCase): ...@@ -52,8 +51,8 @@ class TestDiagEmbedAPICase(unittest.TestCase):
def test_case1(self): def test_case1(self):
diag_embed = np.random.randn(2, 3, 4).astype('float32') diag_embed = np.random.randn(2, 3, 4).astype('float32')
data1 = fluid.data(name='data1', shape=[2, 3, 4], dtype='float32') data1 = fluid.data(name='data1', shape=[2, 3, 4], dtype='float32')
out1 = F.diag_embed(data1) out1 = fluid.layers.diag_embed(data1)
out2 = F.diag_embed(data1, offset=1, dim1=-2, dim2=3) out2 = fluid.layers.diag_embed(data1, offset=1, dim1=-2, dim2=3)
place = core.CPUPlace() place = core.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
......
...@@ -150,7 +150,7 @@ class TestDistAPI(unittest.TestCase): ...@@ -150,7 +150,7 @@ class TestDistAPI(unittest.TestCase):
p = 2 p = 2
x_i = np.random.random((2, 3, 4, 5)).astype("float64") x_i = np.random.random((2, 3, 4, 5)).astype("float64")
y_i = np.random.random((3, 1, 5)).astype("float64") y_i = np.random.random((3, 1, 5)).astype("float64")
result = paddle.dist(x, y, p) result = fluid.layers.dist(x, y, p)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
......
...@@ -73,15 +73,15 @@ class TestDotOpError(unittest.TestCase): ...@@ -73,15 +73,15 @@ class TestDotOpError(unittest.TestCase):
# float16 only can be set on GPU place # float16 only can be set on GPU place
x1 = fluid.layers.data(name='x1', shape=[120], dtype="uint8") x1 = fluid.layers.data(name='x1', shape=[120], dtype="uint8")
y1 = fluid.layers.data(name='y1', shape=[120], dtype="uint8") y1 = fluid.layers.data(name='y1', shape=[120], dtype="uint8")
self.assertRaises(Exception, paddle.dot, x1, y1) self.assertRaises(Exception, fluid.layers.dot, x1, y1)
x2 = fluid.layers.data(name='x2', shape=[2, 3], dtype="float32") x2 = fluid.layers.data(name='x2', shape=[2, 3], dtype="float32")
y2 = fluid.layers.data(name='y2', shape=[2, 3], dtype="float32") y2 = fluid.layers.data(name='y2', shape=[2, 3], dtype="float32")
self.assertRaises(Exception, paddle.dot, x2, y2) self.assertRaises(Exception, fluid.layers.dot, x2, y2)
x3 = fluid.layers.data(name='x3', shape=[3], dtype="float32") x3 = fluid.layers.data(name='x3', shape=[3], dtype="float32")
y3 = fluid.layers.data(name='y3', shape=[2, 3], dtype="float32") y3 = fluid.layers.data(name='y3', shape=[2, 3], dtype="float32")
self.assertRaises(Exception, paddle.dot, x2, y3) self.assertRaises(Exception, fluid.layers.dot, x2, y3)
class TestDygraph(unittest.TestCase): class TestDygraph(unittest.TestCase):
...@@ -90,7 +90,7 @@ class TestDygraph(unittest.TestCase): ...@@ -90,7 +90,7 @@ class TestDygraph(unittest.TestCase):
x1 = fluid.dygraph.to_variable(np.array([1, 3]).astype(np.float32)) x1 = fluid.dygraph.to_variable(np.array([1, 3]).astype(np.float32))
y1 = fluid.dygraph.to_variable(np.array([2, 5]).astype(np.float32)) y1 = fluid.dygraph.to_variable(np.array([2, 5]).astype(np.float32))
self.assertTrue( self.assertTrue(
np.allclose(paddle.dot(x1, y1).numpy(), np.array([17]))) np.allclose(fluid.layers.dot(x1, y1).numpy(), np.array([17])))
x1 = fluid.dygraph.to_variable( x1 = fluid.dygraph.to_variable(
np.array([[1, 3], [3, 5]]).astype(np.float32)) np.array([[1, 3], [3, 5]]).astype(np.float32))
...@@ -98,7 +98,7 @@ class TestDygraph(unittest.TestCase): ...@@ -98,7 +98,7 @@ class TestDygraph(unittest.TestCase):
np.array([[2, 5], [6, 8]]).astype(np.float32)) np.array([[2, 5], [6, 8]]).astype(np.float32))
self.assertTrue( self.assertTrue(
np.array_equal( np.array_equal(
paddle.dot(x1, y1).numpy(), np.array([[17], [58]]))) fluid.layers.dot(x1, y1).numpy(), np.array([[17], [58]])))
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -381,104 +381,5 @@ class TestElementwiseAddOpError(unittest.TestCase): ...@@ -381,104 +381,5 @@ class TestElementwiseAddOpError(unittest.TestCase):
self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2) self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2)
class TestAddOp(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.add(x, y, out=res)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_out_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.add(x, y, out=res)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[2, 3], dtype='float32')
y_1 = paddle.add(x, y, name='add_res')
self.assertEqual(('add_res' in y_1.name), True)
def test_alpha(self):
with fluid.program_guard(fluid.Program()):
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = paddle.add(x, y, alpha=10)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
z_expected = np.array([12., 53., 24.])
self.assertEqual((z_value == z_expected).all(), True)
def test_alpha_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return
with fluid.program_guard(fluid.Program()):
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = paddle.add(x, y, alpha=-0.5)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(), fetch_list=[z.name])
z_expected = np.array([1.5, 0.5, 3.])
self.assertEqual((z_value == z_expected).all(), True)
def test_dygraph(self):
with fluid.dygraph.guard():
np_x = np.array([2, 3, 4]).astype('float64')
np_y = np.array([1, 5, 2]).astype('float64')
x = fluid.dygraph.to_variable(np_x)
y = fluid.dygraph.to_variable(np_y)
z = paddle.add(x, y, alpha=-0.5)
np_z = z.numpy()
z_expected = np.array([1.5, 0.5, 3.])
self.assertEqual((np_z == z_expected).all(), True)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -227,64 +227,5 @@ class TestElementwiseDivOpFp16(ElementwiseDivOp): ...@@ -227,64 +227,5 @@ class TestElementwiseDivOpFp16(ElementwiseDivOp):
['X'], 'Out', max_relative_error=1, no_grad_set=set('Y')) ['X'], 'Out', max_relative_error=1, no_grad_set=set('Y'))
class TestDivOp(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.div(x, y, out=res)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_out_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
res = fluid.data(name="output", shape=[3], dtype="float32")
y_1 = paddle.div(x, y, out=res)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[2, 3], dtype='float32')
y_1 = paddle.div(x, y, name='div_res')
self.assertEqual(('div_res' in y_1.name), True)
def test_dygraph(self):
with fluid.dygraph.guard():
np_x = np.array([2, 3, 4]).astype('float64')
np_y = np.array([1, 5, 2]).astype('float64')
x = fluid.dygraph.to_variable(np_x)
y = fluid.dygraph.to_variable(np_y)
z = paddle.div(x, y)
np_z = z.numpy()
z_expected = np.array([2., 0.6, 2.])
self.assertEqual((np_z == z_expected).all(), True)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -75,7 +75,7 @@ class TestEyeOp2(OpTest): ...@@ -75,7 +75,7 @@ class TestEyeOp2(OpTest):
class API_TestTensorEye(unittest.TestCase): class API_TestTensorEye(unittest.TestCase):
def test_out(self): def test_out(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
data = paddle.eye(10) data = fluid.layers.eye(10)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
result, = exe.run(fetch_list=[data]) result, = exe.run(fetch_list=[data])
...@@ -83,7 +83,7 @@ class API_TestTensorEye(unittest.TestCase): ...@@ -83,7 +83,7 @@ class API_TestTensorEye(unittest.TestCase):
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
data = paddle.eye(10, num_columns=7, dtype="float64") data = fluid.layers.eye(10, num_columns=7, dtype="float64")
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
result, = exe.run(fetch_list=[data]) result, = exe.run(fetch_list=[data])
...@@ -91,7 +91,7 @@ class API_TestTensorEye(unittest.TestCase): ...@@ -91,7 +91,7 @@ class API_TestTensorEye(unittest.TestCase):
self.assertEqual((result == expected_result).all(), True) self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
data = paddle.eye(10, dtype="int64") data = fluid.layers.eye(10, dtype="int64")
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
result, = exe.run(fetch_list=[data]) result, = exe.run(fetch_list=[data])
...@@ -102,12 +102,12 @@ class API_TestTensorEye(unittest.TestCase): ...@@ -102,12 +102,12 @@ class API_TestTensorEye(unittest.TestCase):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
def test_num_rows_type_check(): def test_num_rows_type_check():
paddle.eye(-1, dtype="int64") fluid.layers.eye(-1, dtype="int64")
self.assertRaises(TypeError, test_num_rows_type_check) self.assertRaises(TypeError, test_num_rows_type_check)
def test_num_columns_type_check(): def test_num_columns_type_check():
paddle.eye(10, num_columns=5.2, dtype="int64") fluid.layers.eye(10, num_columns=5.2, dtype="int64")
self.assertRaises(TypeError, test_num_columns_type_check) self.assertRaises(TypeError, test_num_columns_type_check)
......
...@@ -153,130 +153,5 @@ class TestFillAnyLikeOpError(unittest.TestCase): ...@@ -153,130 +153,5 @@ class TestFillAnyLikeOpError(unittest.TestCase):
dtype='int16') dtype='int16')
class ApiOnesLikeTest(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
ones = paddle.ones_like(data, device="cpu")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[ones])
expected_result = np.ones(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
ones = paddle.ones_like(data, device="cpu", dtype="float32")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[ones])
expected_result = np.ones(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
ones = paddle.ones_like(data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[ones])
expected_result = np.ones(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
class ApiZerosLikeTest(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
zeros = paddle.zeros_like(data, device="cpu")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
zeros = paddle.zeros_like(data, device="cpu", dtype="float32")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data")
zeros = paddle.zeros_like(data)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(feed={"data": np.random.rand(10)},
fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
class TestOnesZerosError(unittest.TestCase):
def test_errors(self):
def test_device_error1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.ones_like(data, device="opu")
self.assertRaises(ValueError, test_device_error1)
def test_device_error2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.ones_like(data, dtype="float")
self.assertRaises(ValueError, test_device_error2)
def test_device_error3():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.zeros_like(data, device="opu")
self.assertRaises(ValueError, test_device_error3)
def test_device_error4():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.zeros_like(data, dtype="float")
self.assertRaises(ValueError, test_device_error4)
def test_ones_like_type_error():
with fluid.program_guard(fluid.Program(), fluid.Program()):
fluid.layers.ones_like([10], dtype="float")
self.assertRaises(TypeError, test_ones_like_type_error)
def test_ones_like_dtype_error():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float16")
fluid.layers.ones_like(data, dtype="float32")
self.assertRaises(TypeError, test_ones_like_dtype_error)
def test_ones_like_out_type_error():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
fluid.layers.ones_like(data, dtype="float32", out=[10])
self.assertRaises(TypeError, test_ones_like_out_type_error)
def test_ones_like_out_dtype_error():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
out = fluid.data(name="out", shape=[10], dtype="float16")
fluid.layers.ones_like(data, dtype="float32", out=out)
self.assertRaises(TypeError, test_ones_like_out_dtype_error)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -83,28 +83,6 @@ class TestFillConstantOp4(OpTest): ...@@ -83,28 +83,6 @@ class TestFillConstantOp4(OpTest):
self.check_output() self.check_output()
class TestFillConstantOp5(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=[1], dtype="float32")
out = paddle.zeros(shape=[1], out=data, dtype="float32")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(feed={"X": np.array(
[0.1], dtype="float32")},
fetch_list=[data, out])
self.assertEqual(result[0], result[1])
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=[1], dtype="float32")
out = paddle.ones(shape=[1], out=data, dtype="float32")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(feed={"X": np.array(
[0.1], dtype="float32")},
fetch_list=[data, out])
self.assertEqual(result[0], result[1])
class TestFillConstantOpWithSelectedRows(unittest.TestCase): class TestFillConstantOpWithSelectedRows(unittest.TestCase):
def check_with_place(self, place): def check_with_place(self, place):
scope = core.Scope() scope = core.Scope()
...@@ -389,98 +367,5 @@ class TestFillConstantOpError(unittest.TestCase): ...@@ -389,98 +367,5 @@ class TestFillConstantOpError(unittest.TestCase):
self.assertRaises(TypeError, test_shape_tensor_list_dtype) self.assertRaises(TypeError, test_shape_tensor_list_dtype)
class ApiZerosTest(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
zeros = paddle.zeros(shape=[10], dtype="float64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
zeros = paddle.zeros(shape=[10], dtype="int64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
zeros = paddle.zeros(shape=[10], dtype="int64", device="cpu")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[zeros])
expected_result = np.zeros(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
class ApiOnesTest(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=[10], dtype="float64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=[10], dtype="int64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=[10], dtype="int64", device="cpu")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result, = exe.run(fetch_list=[ones])
expected_result = np.ones(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
class ApiOnesZerosError(unittest.TestCase):
def test_errors(self):
def test_error1():
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=10, dtype="int64", device="opu")
self.assertRaises(ValueError, test_error1)
def test_error2():
with fluid.program_guard(fluid.Program()):
ones = paddle.ones(shape=10, dtype="int64", device="opu")
self.assertRaises(ValueError, test_error2)
def test_error3():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.ones(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error3)
def test_error4():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.ones(shape=[10], dtype="int8")
self.assertRaises(TypeError, test_error4)
def test_error5():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=10, dtype="int64")
self.assertRaises(TypeError, test_error5)
def test_error6():
with fluid.program_guard(fluid.Program()):
ones = fluid.layers.zeros(shape=[10], dtype="int8")
self.assertRaises(TypeError, test_error6)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -113,7 +113,7 @@ class API_TestGather(unittest.TestCase): ...@@ -113,7 +113,7 @@ class API_TestGather(unittest.TestCase):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.layers.data('data1', shape=[-1, 2], dtype='float64') data1 = fluid.layers.data('data1', shape=[-1, 2], dtype='float64')
index = fluid.layers.data('index', shape=[-1, 1], dtype='float64') index = fluid.layers.data('index', shape=[-1, 1], dtype='float64')
out = paddle.gather(data1, index) out = fluid.layers.gather(data1, index)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
input = np.array([[1, 2], [3, 4], [5, 6]]) input = np.array([[1, 2], [3, 4], [5, 6]])
......
...@@ -96,33 +96,5 @@ class TestCase4(TestIndexSampleOp): ...@@ -96,33 +96,5 @@ class TestCase4(TestIndexSampleOp):
self.index_type = "int64" self.index_type = "int64"
class TestIndexSampleShape(unittest.TestCase):
def test_shape(self):
import paddle.fluid as fluid
import paddle
# create x value
x_shape = (2, 5)
x_type = "float64"
x_np = np.random.random(x_shape).astype(x_type)
# create index value
index_shape = (2, 3)
index_type = "int32"
index_np = np.random.randint(
low=0, high=x_shape[1], size=index_shape).astype(index_type)
x = fluid.data(name='x', shape=[-1, 5], dtype='float64')
index = fluid.data(name='index', shape=[-1, 3], dtype='int32')
output = paddle.index_sample(x=x, index=index)
place = fluid.CPUPlace()
exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program())
feed = {'x': x_np, 'index': index_np}
res = exe.run(feed=feed, fetch_list=[output])
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -83,7 +83,7 @@ class TestIndexSelectAPI(unittest.TestCase): ...@@ -83,7 +83,7 @@ class TestIndexSelectAPI(unittest.TestCase):
x = fluid.layers.data(name='x', shape=[-1, 4]) x = fluid.layers.data(name='x', shape=[-1, 4])
index = fluid.layers.data( index = fluid.layers.data(
name='index', shape=[3], dtype='int32', append_batch_size=False) name='index', shape=[3], dtype='int32', append_batch_size=False)
z = paddle.index_select(x, index, dim=1) z = fluid.layers.index_select(x, index, dim=1)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(feed={'x': self.data_x, res, = exe.run(feed={'x': self.data_x,
'index': self.data_index}, 'index': self.data_index},
...@@ -98,7 +98,7 @@ class TestIndexSelectAPI(unittest.TestCase): ...@@ -98,7 +98,7 @@ class TestIndexSelectAPI(unittest.TestCase):
x = fluid.layers.data(name='x', shape=[-1, 4]) x = fluid.layers.data(name='x', shape=[-1, 4])
index = fluid.layers.data( index = fluid.layers.data(
name='index', shape=[3], dtype='int32', append_batch_size=False) name='index', shape=[3], dtype='int32', append_batch_size=False)
z = paddle.index_select(x, index) z = fluid.layers.index_select(x, index)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(feed={'x': self.data_x, res, = exe.run(feed={'x': self.data_x,
'index': self.data_index}, 'index': self.data_index},
...@@ -114,7 +114,7 @@ class TestIndexSelectAPI(unittest.TestCase): ...@@ -114,7 +114,7 @@ class TestIndexSelectAPI(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(self.data_x) x = fluid.dygraph.to_variable(self.data_x)
index = fluid.dygraph.to_variable(self.data_index) index = fluid.dygraph.to_variable(self.data_index)
z = paddle.index_select(x, index) z = fluid.layers.index_select(x, index)
np_z = z.numpy() np_z = z.numpy()
expect_out = np.array( expect_out = np.array(
[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0]]) [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [5.0, 6.0, 7.0, 8.0]])
...@@ -124,7 +124,7 @@ class TestIndexSelectAPI(unittest.TestCase): ...@@ -124,7 +124,7 @@ class TestIndexSelectAPI(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(self.data_x) x = fluid.dygraph.to_variable(self.data_x)
index = fluid.dygraph.to_variable(self.data_index) index = fluid.dygraph.to_variable(self.data_index)
z = paddle.index_select(x, index, dim=1) z = fluid.layers.index_select(x, index, dim=1)
np_z = z.numpy() np_z = z.numpy()
expect_out = np.array([[1.0, 2.0, 2.0], [5.0, 6.0, 6.0], expect_out = np.array([[1.0, 2.0, 2.0], [5.0, 6.0, 6.0],
[9.0, 10.0, 10.0]]) [9.0, 10.0, 10.0]])
......
...@@ -3488,7 +3488,7 @@ class TestBook(LayerTest): ...@@ -3488,7 +3488,7 @@ class TestBook(LayerTest):
append_batch_size=False, append_batch_size=False,
dtype='float32') dtype='float32')
out = paddle.addmm(input=input, x=x, y=y) out = fluid.layers.addmm(input=input, x=x, y=y)
return (out) return (out)
def test_retinanet_detection_output(self): def test_retinanet_detection_output(self):
......
...@@ -70,68 +70,5 @@ class TestLinspaceOpNumOneCase(OpTest): ...@@ -70,68 +70,5 @@ class TestLinspaceOpNumOneCase(OpTest):
self.check_output() self.check_output()
class TestLinspaceAPI(unittest.TestCase):
def test_out(self):
with program_guard(fluid.Program()):
out_1 = fluid.data(name="out_1", shape=[5], dtype="float32")
out_2 = paddle.tensor.linspace(0, 10, 5, dtype='float32', out=out_1)
exe = fluid.Executor(place=fluid.CPUPlace())
ipt = {'out_1': np.random.random([5]).astype('float32')}
res_1, res_2 = exe.run(fluid.default_main_program(),
feed=ipt,
fetch_list=[out_1, out_2])
assert np.array_equal(res_1, res_2)
def test_name(self):
with fluid.program_guard(fluid.Program()):
out = paddle.linspace(
0, 10, 5, dtype='float32', name='linspace_res')
assert 'linspace_res' in out.name
class TestLinspaceOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# for ci coverage
# The device of fill_constant must be in 'cpu', 'gpu' or None
def test_device_value():
paddle.linspace(0, 10, 1, dtype="float32", device='xxxpu')
self.assertRaises(ValueError, test_device_value)
def test_start_type():
fluid.layers.linspace([0], 10, 1, dtype="float32")
self.assertRaises(TypeError, test_start_type)
def test_end_dtype():
fluid.layers.linspace(0, [10], 1, dtype="float32")
self.assertRaises(TypeError, test_end_dtype)
def test_step_dtype():
fluid.layers.linspace(0, 10, [0], dtype="float32")
self.assertRaises(TypeError, test_step_dtype)
def test_start_dtype():
start = fluid.data(shape=[1], type="int32", name="start")
fluid.layers.linspace(start, 10, 1, dtype="float32")
self.assertRaises(TypeError, test_start_dtype)
def test_end_dtype():
end = fluid.data(shape=[1], type="int32", name="end")
fluid.layers.linspace(0, end, 1, dtype="float32")
self.assertRaises(TypeError, test_end_dtype)
def test_step_dtype():
step = fluid.data(shape=[1], type="int32", name="step")
fluid.layers.linspace(0, 10, step, dtype="float32")
self.assertRaises(TypeError, test_step_dtype)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -27,15 +27,15 @@ class TestLogSumOpError(unittest.TestCase): ...@@ -27,15 +27,15 @@ class TestLogSumOpError(unittest.TestCase):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x1 = fluid.layers.data(name='x1', shape=[120], dtype="uint8") x1 = fluid.layers.data(name='x1', shape=[120], dtype="uint8")
self.assertRaises(Exception, paddle.logsumexp, x1) self.assertRaises(Exception, fluid.layers.logsumexp, x1)
x2 = fluid.layers.data(name='x2', shape=[2, 3], dtype="int") x2 = fluid.layers.data(name='x2', shape=[2, 3], dtype="int")
self.assertRaises(Exception, paddle.logsumexp, x2) self.assertRaises(Exception, fluid.layers.logsumexp, x2)
x3 = fluid.layers.data(name='x3', shape=[3], dtype="float16") x3 = fluid.layers.data(name='x3', shape=[3], dtype="float16")
self.assertRaises(Exception, paddle.logsumexp, x3) self.assertRaises(Exception, fluid.layers.logsumexp, x3)
self.assertRaises(AssertionError, paddle.logsumexp, None) self.assertRaises(AssertionError, fluid.layers.logsumexp, None)
class TestLogSumExpOp(unittest.TestCase): class TestLogSumExpOp(unittest.TestCase):
...@@ -45,13 +45,14 @@ class TestLogSumExpOp(unittest.TestCase): ...@@ -45,13 +45,14 @@ class TestLogSumExpOp(unittest.TestCase):
x = fluid.dygraph.to_variable(np_x) x = fluid.dygraph.to_variable(np_x)
self.assertTrue( self.assertTrue(
np.allclose( np.allclose(
paddle.logsumexp(x).numpy(), np.log(np.sum(np.exp(np_x))))) fluid.layers.logsumexp(x).numpy(),
np.log(np.sum(np.exp(np_x)))))
np_x = np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32) np_x = np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32)
x = fluid.dygraph.to_variable(np_x) x = fluid.dygraph.to_variable(np_x)
self.assertTrue( self.assertTrue(
np.allclose( np.allclose(
paddle.logsumexp( fluid.layers.logsumexp(
x, dim=[1, 2]).numpy(), x, dim=[1, 2]).numpy(),
np.log(np.sum(np.exp(np_x), axis=(1, 2))))) np.log(np.sum(np.exp(np_x), axis=(1, 2)))))
...@@ -59,7 +60,7 @@ class TestLogSumExpOp(unittest.TestCase): ...@@ -59,7 +60,7 @@ class TestLogSumExpOp(unittest.TestCase):
x = fluid.dygraph.to_variable(np_x) x = fluid.dygraph.to_variable(np_x)
self.assertTrue( self.assertTrue(
np.allclose( np.allclose(
paddle.logsumexp( fluid.layers.logsumexp(
x, dim=[2]).numpy(), x, dim=[2]).numpy(),
np.log(np.sum(np.exp(np_x), axis=(2))))) np.log(np.sum(np.exp(np_x), axis=(2)))))
...@@ -67,7 +68,7 @@ class TestLogSumExpOp(unittest.TestCase): ...@@ -67,7 +68,7 @@ class TestLogSumExpOp(unittest.TestCase):
x = fluid.dygraph.to_variable(np_x) x = fluid.dygraph.to_variable(np_x)
self.assertTrue( self.assertTrue(
np.allclose( np.allclose(
paddle.logsumexp( fluid.layers.logsumexp(
x, keepdim=True).numpy(), x, keepdim=True).numpy(),
np.log(np.sum(np.exp(np_x), keepdims=True)))) np.log(np.sum(np.exp(np_x), keepdims=True))))
...@@ -76,7 +77,7 @@ class TestLogSumExpOp(unittest.TestCase): ...@@ -76,7 +77,7 @@ class TestLogSumExpOp(unittest.TestCase):
helper = LayerHelper("test_logsumexp") helper = LayerHelper("test_logsumexp")
out = helper.create_variable( out = helper.create_variable(
type=x.type, name='out', dtype=x.dtype, persistable=False) type=x.type, name='out', dtype=x.dtype, persistable=False)
paddle.logsumexp(x, out=out) fluid.layers.logsumexp(x, out=out)
self.assertTrue( self.assertTrue(
np.allclose(out.numpy(), np.log(np.sum(np.exp(np_x))))) np.allclose(out.numpy(), np.log(np.sum(np.exp(np_x)))))
......
...@@ -243,67 +243,6 @@ for dim in [4]: ...@@ -243,67 +243,6 @@ for dim in [4]:
}) })
class API_TestMm(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[3, 2], dtype="float64")
y = fluid.data(name='y', shape=[2, 3], dtype='float64')
res = fluid.data(name="output", shape=[3, 3], dtype="float64")
y_1 = paddle.mm(x, y, out=res)
exe = fluid.Executor(fluid.CPUPlace())
data1 = np.random.rand(3, 2)
data2 = np.random.rand(2, 3)
np_res, expected_result = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertTrue(
np.allclose(
np.array(np_res), np.array(expected_result), atol=1e-5),
"two value is\
{}\n{}, check diff!".format(np_res, expected_result))
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2], dtype="float64")
y = fluid.data(name='y', shape=[2], dtype='float64')
res = fluid.data(name="output", shape=[1], dtype="float64")
result = paddle.mm(x, y)
exe = fluid.Executor(fluid.CPUPlace())
data1 = np.random.rand(2)
data2 = np.random.rand(2)
np_res = exe.run(feed={'x': data1, 'y': data2}, fetch_list=[result])
expected_result = np.matmul(
data1.reshape(1, 2), data2.reshape(2, 1))
self.assertTrue(
np.allclose(
np_res, expected_result, atol=1e-5),
"two value is\
{}\n{}, check diff!".format(np_res, expected_result))
def test_dygraph_with_out(self):
device = fluid.CPUPlace()
with fluid.dygraph.guard(device):
input_array1 = np.random.rand(3, 4).astype("float64")
input_array2 = np.random.rand(4, 3).astype("float64")
out_array = np.random.rand(3, 3).astype("float64")
data1 = fluid.dygraph.to_variable(input_array1)
data2 = fluid.dygraph.to_variable(input_array2)
paddle_out_holder = fluid.dygraph.to_variable(out_array)
out = paddle.mm(data1, data2, out=paddle_out_holder)
self.assertTrue(np.allclose(paddle_out_holder.numpy(), out.numpy()))
def test_dygraph_without_out(self):
device = fluid.CPUPlace()
with fluid.dygraph.guard(device):
input_array1 = np.random.rand(3, 4).astype("float64")
input_array2 = np.random.rand(4, 3).astype("float64")
data1 = fluid.dygraph.to_variable(input_array1)
data2 = fluid.dygraph.to_variable(input_array2)
out = paddle.mm(data1, data2)
expected_result = np.matmul(input_array1, input_array2)
self.assertTrue(np.allclose(expected_result, out.numpy()))
class Test_API_Matmul(unittest.TestCase): class Test_API_Matmul(unittest.TestCase):
def test_dygraph_without_out(self): def test_dygraph_without_out(self):
device = fluid.CPUPlace() device = fluid.CPUPlace()
...@@ -312,41 +251,10 @@ class Test_API_Matmul(unittest.TestCase): ...@@ -312,41 +251,10 @@ class Test_API_Matmul(unittest.TestCase):
input_array2 = np.random.rand(4, 3).astype("float64") input_array2 = np.random.rand(4, 3).astype("float64")
data1 = fluid.dygraph.to_variable(input_array1) data1 = fluid.dygraph.to_variable(input_array1)
data2 = fluid.dygraph.to_variable(input_array2) data2 = fluid.dygraph.to_variable(input_array2)
out = paddle.matmul(data1, data2) out = fluid.layers.matmul(data1, data2)
expected_result = np.matmul(input_array1, input_array2) expected_result = np.matmul(input_array1, input_array2)
self.assertTrue(np.allclose(expected_result, out.numpy())) self.assertTrue(np.allclose(expected_result, out.numpy()))
class API_TestMmError(unittest.TestCase):
def test_errors(self):
def test_error1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(name="data1", shape=[10, 2], dtype="float32")
data2 = fluid.data(name="data2", shape=[3, 10], dtype="float32")
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error1)
def test_error2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(
name="data1", shape=[-1, 10, 2], dtype="float32")
data2 = fluid.data(
name="data2", shape=[-1, 2, 10], dtype="float32")
paddle.mm(data1, data2)
test_error2()
def test_error3():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data(
name="data1", shape=[10, 10, 2], dtype="float32")
data2 = fluid.data(
name="data2", shape=[3, 2, 10], dtype="float32")
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error3)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at # You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function from __future__ import print_function
...@@ -79,7 +79,7 @@ class TestMeshgridOp3(unittest.TestCase): ...@@ -79,7 +79,7 @@ class TestMeshgridOp3(unittest.TestCase):
out_2 = np.broadcast_to(out_2, [100, 200]) out_2 = np.broadcast_to(out_2, [100, 200])
exe = fluid.Executor(place=fluid.CPUPlace()) exe = fluid.Executor(place=fluid.CPUPlace())
grid_x, grid_y = paddle.meshgrid([x, y]) grid_x, grid_y = fluid.layers.meshgrid([x, y])
res_1, res_2 = exe.run(fluid.default_main_program(), res_1, res_2 = exe.run(fluid.default_main_program(),
feed={'x': input_1, feed={'x': input_1,
'y': input_2}, 'y': input_2},
...@@ -95,7 +95,7 @@ class TestMeshgridOp4(unittest.TestCase): ...@@ -95,7 +95,7 @@ class TestMeshgridOp4(unittest.TestCase):
def test_input_type(): def test_input_type():
x = fluid.data(shape=[200], dtype='float32', name='x2') x = fluid.data(shape=[200], dtype='float32', name='x2')
paddle.meshgrid(x) fluid.layers.meshgrid(x)
self.assertRaises(TypeError, test_input_type) self.assertRaises(TypeError, test_input_type)
...@@ -108,7 +108,7 @@ class TestMeshgridOp5(unittest.TestCase): ...@@ -108,7 +108,7 @@ class TestMeshgridOp5(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
tensor_3 = fluid.dygraph.to_variable(input_3) tensor_3 = fluid.dygraph.to_variable(input_3)
tensor_4 = fluid.dygraph.to_variable(input_4) tensor_4 = fluid.dygraph.to_variable(input_4)
res_3, res_4 = paddle.meshgrid([tensor_3, tensor_4]) res_3, res_4 = fluid.layers.meshgrid([tensor_3, tensor_4])
assert np.array_equal(res_3.shape, [100, 200]) assert np.array_equal(res_3.shape, [100, 200])
assert np.array_equal(res_4.shape, [100, 200]) assert np.array_equal(res_4.shape, [100, 200])
......
...@@ -175,35 +175,5 @@ class TestFP16MulOp2(TestMulOp2): ...@@ -175,35 +175,5 @@ class TestFP16MulOp2(TestMulOp2):
no_grad_set=set('Y')) no_grad_set=set('Y'))
class TestMulOpAttr(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[3, 2], dtype='float32')
res = fluid.data(name="output", shape=[2, 2], dtype="float32")
y_1 = paddle.mul(x, y, out=res)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data1 = np.array([[1, 2, 3], [4, 5, 6]], dtype='float32')
data2 = np.array([[1, 2], [1, 2], [1, 2]], dtype='float32')
np_res, np_y_1 = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[res, y_1])
self.assertEqual((np_res == np_y_1).all(), True)
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
y = fluid.data(name='y', shape=[3, 2], dtype='float32')
res = fluid.data(name="output", shape=[2, 2], dtype="float32")
y_1 = paddle.mul(x, y, name='mul_res')
y_2 = paddle.mul(x, y, out=res, name='mul_res')
self.assertEqual(('mul_res' in y_1.name), True)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -27,7 +27,7 @@ class TestNonZeroAPI(unittest.TestCase): ...@@ -27,7 +27,7 @@ class TestNonZeroAPI(unittest.TestCase):
data = np.array([[True, False], [False, True]]) data = np.array([[True, False], [False, True]])
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1, 2]) x = fluid.layers.data(name='x', shape=[-1, 2])
y = paddle.nonzero(x, as_tuple=True) y = fluid.layers.nonzero(x, as_tuple=True)
self.assertEqual(type(y), tuple) self.assertEqual(type(y), tuple)
self.assertEqual(len(y), 2) self.assertEqual(len(y), 2)
z = fluid.layers.concat(list(y), axis=1) z = fluid.layers.concat(list(y), axis=1)
...@@ -42,7 +42,7 @@ class TestNonZeroAPI(unittest.TestCase): ...@@ -42,7 +42,7 @@ class TestNonZeroAPI(unittest.TestCase):
data = np.array([True, True, False]) data = np.array([True, True, False])
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1]) x = fluid.layers.data(name='x', shape=[-1])
y = paddle.nonzero(x, as_tuple=True) y = fluid.layers.nonzero(x, as_tuple=True)
self.assertEqual(type(y), tuple) self.assertEqual(type(y), tuple)
self.assertEqual(len(y), 1) self.assertEqual(len(y), 1)
z = fluid.layers.concat(list(y), axis=1) z = fluid.layers.concat(list(y), axis=1)
...@@ -57,7 +57,7 @@ class TestNonZeroAPI(unittest.TestCase): ...@@ -57,7 +57,7 @@ class TestNonZeroAPI(unittest.TestCase):
data = np.array([[True, False], [False, True]]) data = np.array([[True, False], [False, True]])
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1, 2]) x = fluid.layers.data(name='x', shape=[-1, 2])
y = paddle.nonzero(x) y = fluid.layers.nonzero(x)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(feed={'x': data}, res, = exe.run(feed={'x': data},
fetch_list=[y.name], fetch_list=[y.name],
...@@ -68,7 +68,7 @@ class TestNonZeroAPI(unittest.TestCase): ...@@ -68,7 +68,7 @@ class TestNonZeroAPI(unittest.TestCase):
data = np.array([True, True, False]) data = np.array([True, True, False])
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[-1]) x = fluid.layers.data(name='x', shape=[-1])
y = paddle.nonzero(x) y = fluid.layers.nonzero(x)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
res, = exe.run(feed={'x': data}, res, = exe.run(feed={'x': data},
fetch_list=[y.name], fetch_list=[y.name],
......
...@@ -118,93 +118,5 @@ class TestPnormOp2(TestPnormOp): ...@@ -118,93 +118,5 @@ class TestPnormOp2(TestPnormOp):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
def run_out(self, p, axis, shape_x, shape_y, dtype):
with fluid.program_guard(fluid.Program()):
data1 = fluid.data(name="X", shape=shape_x, dtype=dtype)
data2 = fluid.data(name="Y", shape=shape_y, dtype=dtype)
out = paddle.norm(input=data1, p=p, axis=axis, out=data2)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
result = exe.run(feed={"X": np.random.rand(*shape_x).astype(dtype)},
fetch_list=[data2, out])
self.assertEqual((result[0] == result[1]).all(), True)
def run_fro(self, p, axis, shape_x, dtype):
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=shape_x, dtype=dtype)
out = paddle.norm(input=data, p=p, axis=axis)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
np_input = (np.random.rand(*shape_x) + 1.0).astype(dtype)
expected_result = frobenius_norm(np_input, axis=axis)
result, = exe.run(feed={"X": np_input}, fetch_list=[out])
self.assertEqual((np.abs(result - expected_result) < 1e-6).all(), True)
def run_pnorm(self, p, axis, shape_x, dtype):
with fluid.program_guard(fluid.Program()):
data = fluid.data(name="X", shape=shape_x, dtype=dtype)
out = paddle.norm(input=data, p=p, axis=axis)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
np_input = (np.random.rand(*shape_x) + 1.0).astype(dtype)
expected_result = p_norm(np_input, porder=p, axis=axis).astype(dtype)
result, = exe.run(feed={"X": np_input}, fetch_list=[out])
self.assertEqual((np.abs(result - expected_result) < 1e-6).all(), True)
class API_NormTest(unittest.TestCase):
def test_output_result(self):
run_out(self, p=2, axis=1, shape_x=[3, 4], shape_y=[3], dtype="float32")
run_out(
self,
p='fro',
axis=None,
shape_x=[3, 4],
shape_y=[1],
dtype="float32")
def test_basic(self):
run_fro(self, p='fro', axis=None, shape_x=[3, 3, 4], dtype="float32")
run_fro(self, p='fro', axis=[0, 1], shape_x=[3, 3, 4], dtype="float64")
run_pnorm(self, p=2, axis=None, shape_x=[3, 4], dtype="float32")
run_pnorm(self, p=2, axis=1, shape_x=[3, 4], dtype="float64")
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[10, 10], dtype="float32")
y_1 = paddle.norm(x, p='fro', name='frobenius_name')
y_2 = paddle.norm(x, p=2, name='pnorm_name')
self.assertEqual(('frobenius_name' in y_1.name), True)
self.assertEqual(('pnorm_name' in y_2.name), True)
def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
def err_dtype(p, shape_x, xdtype, out=None):
data = fluid.data(shape=shape_x, dtype=xdtype)
paddle.norm(data, p=p, out=out)
self.assertRaises(TypeError, err_dtype, "fro", [2, 2], "int64")
out = fluid.data(name="out", shape=[1], dtype="int64")
self.assertRaises(TypeError, err_dtype, "fro", [2, 2], "float64",
out)
self.assertRaises(TypeError, err_dtype, 2, [10], "int64")
self.assertRaises(TypeError, err_dtype, 2, [10], "float64", out)
data = fluid.data(name="data_2d", shape=[2, 2], dtype="float64")
self.assertRaises(ValueError, paddle.norm, data, p="unsupport norm")
self.assertRaises(ValueError, paddle.norm, data, p=[1])
self.assertRaises(ValueError, paddle.norm, data, p=[1], axis=-1)
self.assertRaises(
ValueError, paddle.norm, data, p='unspport', axis=[-2, -1])
data = fluid.data(name="data_3d", shape=[2, 2, 2], dtype="float64")
self.assertRaises(
ValueError, paddle.norm, data, p='unspport', axis=[-2, -1])
self.assertRaises(
ValueError, paddle.norm, data, p='unspport', axis=[-3, -2, -1])
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -518,149 +518,5 @@ class TestReduceMeanOpError(unittest.TestCase): ...@@ -518,149 +518,5 @@ class TestReduceMeanOpError(unittest.TestCase):
self.assertRaises(TypeError, fluid.layers.reduce_mean, x2) self.assertRaises(TypeError, fluid.layers.reduce_mean, x2)
class API_TestSumOpError(unittest.TestCase):
def test_errors(self):
def test_dtype1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.sum(data, dtype="int32")
self.assertRaises(ValueError, test_dtype1)
def test_dtype2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="float32")
paddle.sum(data, dtype="float32")
self.assertRaises(ValueError, test_dtype2)
def test_dtype3():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="int32")
paddle.sum(data, dtype="bool")
self.assertRaises(ValueError, test_dtype3)
def test_dtype4():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data(name="data", shape=[10], dtype="int32")
paddle.sum(data, dtype="int32")
self.assertRaises(ValueError, test_dtype3)
class API_TestSumOp(unittest.TestCase):
def test_1(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="float32")
result_sum = paddle.sum(input=data, dim=1, dtype="float64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.rand(10, 10).astype(np.float32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_sum])
self.assertEqual(
(res == np.sum(input_data.astype(np.float64), axis=1)).all(), True)
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="int32")
result_sum = paddle.sum(input=data, dim=1, dtype="int64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_sum])
self.assertEqual(
(res == np.sum(input_data.astype(np.int64), axis=1)).all(), True)
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="int32")
result_sum = paddle.sum(input=data, dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_sum])
self.assertEqual((res == np.sum(input_data, axis=1)).all(), True)
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="int32")
result_sum = paddle.sum(input=data, dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_sum])
self.assertEqual((res == np.sum(input_data, axis=1)).all(), True)
with fluid.dygraph.guard():
np_x = np.array([10, 10]).astype('float64')
x = fluid.dygraph.to_variable(np_x)
z = paddle.sum(x, dim=0)
np_z = z.numpy()
z_expected = np.array(np.sum(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)
class API_TestMaxOp(unittest.TestCase):
def test_1(self):
# type: float
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="float32")
result_max = paddle.max(input=data, dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.rand(10, 10).astype(np.float32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_max])
self.assertEqual((res == np.max(input_data, axis=1)).all(), True)
# type: int
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="int64")
result_max = paddle.max(input=data, dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int64)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_max])
self.assertEqual((res == np.max(input_data, axis=1)).all(), True)
# dygraph
with fluid.dygraph.guard():
np_x = np.array([10, 10]).astype('float64')
x = fluid.dygraph.to_variable(np_x)
z = paddle.max(x, dim=0)
np_z = z.numpy()
z_expected = np.array(np.max(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)
class API_TestMinOp(unittest.TestCase):
def test_1(self):
# type: float
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="float32")
result_min = paddle.min(input=data, dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.rand(10, 10).astype(np.float32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_min])
self.assertEqual((res == np.min(input_data, axis=1)).all(), True)
# type: int
with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.data("data", shape=[10, 10], dtype="int64")
result_min = paddle.min(input=data, dim=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int64)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_min])
self.assertEqual((res == np.min(input_data, axis=1)).all(), True)
# dygraph
with fluid.dygraph.guard():
np_x = np.array([10, 10]).astype('float64')
x = fluid.dygraph.to_variable(np_x)
z = paddle.min(x, dim=0)
np_z = z.numpy()
z_expected = np.array(np.min(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -59,7 +59,7 @@ class TestRollAPI(unittest.TestCase): ...@@ -59,7 +59,7 @@ class TestRollAPI(unittest.TestCase):
self.data_x = np.array( self.data_x = np.array(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]) [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])
def test_index_select_api(self): def test_roll_api(self):
self.input_data() self.input_data()
# case 1: # case 1:
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle import fluid, nn
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import paddle.nn.functional as F
import unittest
class RowConvTestCase(unittest.TestCase):
def __init__(self,
methodName='runTest',
batch_size=4,
num_channels=8,
time_steps=12,
context_size=3,
act=None,
dtype="float32"):
super(RowConvTestCase, self).__init__(methodName=methodName)
self.batch_size = batch_size
self.num_channels = num_channels
self.time_steps = time_steps
self.context_size = context_size
self.act = act
self.dtype = dtype
def setUp(self):
input_shape = (self.batch_size, self.time_steps, self.num_channels)
self.input = np.random.uniform(size=input_shape).astype(self.dtype)
self.weight_shape = weight_shape = (self.context_size + 1,
self.num_channels)
self.weight = np.random.uniform(size=weight_shape).astype(self.dtype)
def fluid_layer(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data(
"input", [-1, -1, self.num_channels], dtype=self.dtype)
y = fluid.layers.row_conv(
x,
self.context_size,
param_attr=I.NumpyArrayInitializer(self.weight),
act=self.act)
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
return y_np
def functional_declarative(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data(
"input", [-1, -1, self.num_channels], dtype=self.dtype)
w = fluid.data("weight", self.weight_shape, dtype=self.dtype)
y = F.row_conv(x, w, act=self.act)
exe = fluid.Executor(place)
exe.run(start)
y_np, = exe.run(main,
feed={"input": self.input,
"weight": self.weight},
fetch_list=[y])
return y_np
def functional_imperative(self, place):
with dg.guard(place):
x_var = dg.to_variable(self.input)
w_var = dg.to_variable(self.weight)
y_var = F.row_conv(x_var, w_var, act=self.act)
y_np = y_var.numpy()
return y_np
def nn_layer(self, place):
with dg.guard(place):
x_var = dg.to_variable(self.input)
conv = nn.RowConv(
self.num_channels,
self.context_size,
param_attr=I.NumpyArrayInitializer(self.weight),
act=self.act,
dtype=self.dtype)
y_var = conv(x_var)
y_np = y_var.numpy()
return y_np
def _test_equivalence(self, place):
result1 = self.fluid_layer(place)
result2 = self.functional_declarative(place)
result3 = self.functional_imperative(place)
result4 = self.nn_layer(place)
np.testing.assert_array_almost_equal(result1, result2)
np.testing.assert_array_almost_equal(result2, result3)
np.testing.assert_array_almost_equal(result3, result4)
def runTest(self):
place = fluid.CPUPlace()
self._test_equivalence(place)
if fluid.core.is_compiled_with_cuda():
palce = fluid.CUDAPlace(0)
self._test_equivalence(place)
def load_tests(loader, standard_tests, pattern):
suite = unittest.TestSuite()
suite.addTest(RowConvTestCase(methodName="runTest"))
suite.addTest(RowConvTestCase(methodName="runTest", act="sigmoid"))
suite.addTest(
RowConvTestCase(
methodName="runTest", context_size=5, act="sigmoid"))
return suite
if __name__ == "__main__":
unittest.main()
...@@ -280,13 +280,13 @@ class TestSplitOpError(unittest.TestCase): ...@@ -280,13 +280,13 @@ class TestSplitOpError(unittest.TestCase):
def test_num_or_sections_type_tensor(): def test_num_or_sections_type_tensor():
x7 = fluid.layers.data(shape=[4], dtype='float16', name='x5') x7 = fluid.layers.data(shape=[4], dtype='float16', name='x5')
paddle.split(input=x7, num_or_sections=2.1, dim=3) fluid.layers.split(input=x7, num_or_sections=2.1, dim=3)
self.assertRaises(TypeError, test_num_or_sections_type_tensor) self.assertRaises(TypeError, test_num_or_sections_type_tensor)
def test_axis_type_tensor(): def test_axis_type_tensor():
x8 = fluid.layers.data(shape=[4], dtype='float16', name='x6') x8 = fluid.layers.data(shape=[4], dtype='float16', name='x6')
paddle.split(input=x8, num_or_sections=2, dim=3.2) fluid.layers.split(input=x8, num_or_sections=2, dim=3.2)
self.assertRaises(TypeError, test_axis_type_tensor) self.assertRaises(TypeError, test_axis_type_tensor)
...@@ -296,7 +296,7 @@ class API_TestSplit(unittest.TestCase): ...@@ -296,7 +296,7 @@ class API_TestSplit(unittest.TestCase):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.layers.data('data1', shape=[4, 6, 6], dtype='float64') data1 = fluid.layers.data('data1', shape=[4, 6, 6], dtype='float64')
data2 = fluid.layers.data('data2', shape=[1], dtype='int32') data2 = fluid.layers.data('data2', shape=[1], dtype='int32')
x0, x1, x2 = paddle.split(data1, num_or_sections=3, dim=data2) x0, x1, x2 = fluid.layers.split(data1, num_or_sections=3, dim=data2)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
input1 = np.random.random([4, 6, 6]).astype('float64') input1 = np.random.random([4, 6, 6]).astype('float64')
...@@ -314,7 +314,7 @@ class API_TestSplit2(unittest.TestCase): ...@@ -314,7 +314,7 @@ class API_TestSplit2(unittest.TestCase):
def test_out(self): def test_out(self):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.layers.data('data1', shape=[4, 6, 6], dtype='float64') data1 = fluid.layers.data('data1', shape=[4, 6, 6], dtype='float64')
x0, x1, x2 = paddle.split(data1, num_or_sections=3, dim=2) x0, x1, x2 = fluid.layers.split(data1, num_or_sections=3, dim=2)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
input1 = np.random.random([4, 6, 6]).astype('float64') input1 = np.random.random([4, 6, 6]).astype('float64')
...@@ -330,7 +330,7 @@ class API_TestSplit3(unittest.TestCase): ...@@ -330,7 +330,7 @@ class API_TestSplit3(unittest.TestCase):
def test_out(self): def test_out(self):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.layers.data('data', shape=[-1, 10], dtype='float64') data = fluid.layers.data('data', shape=[-1, 10], dtype='float64')
x0, x1 = paddle.split(data, num_or_sections=(3, 7), dim=1) x0, x1 = fluid.layers.split(data, num_or_sections=(3, 7), dim=1)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
input1 = np.random.random([1, 10]).astype('float64') input1 = np.random.random([1, 10]).astype('float64')
...@@ -345,7 +345,7 @@ class API_TestSplit4(unittest.TestCase): ...@@ -345,7 +345,7 @@ class API_TestSplit4(unittest.TestCase):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
data = fluid.layers.data('data', shape=[-1, 10], dtype='float64') data = fluid.layers.data('data', shape=[-1, 10], dtype='float64')
index = fluid.layers.data('index', shape=[1], dtype='int32') index = fluid.layers.data('index', shape=[1], dtype='int32')
x0, x1 = paddle.split(data, num_or_sections=(3, index), dim=1) x0, x1 = fluid.layers.split(data, num_or_sections=(3, index), dim=1)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
input1 = np.random.random([1, 10]).astype('float64') input1 = np.random.random([1, 10]).astype('float64')
...@@ -364,7 +364,7 @@ class API_TestDygraphSplit(unittest.TestCase): ...@@ -364,7 +364,7 @@ class API_TestDygraphSplit(unittest.TestCase):
input_1 = np.random.random([4, 6, 6]).astype("int32") input_1 = np.random.random([4, 6, 6]).astype("int32")
# input is a variable which shape is [4, 6, 6] # input is a variable which shape is [4, 6, 6]
input = fluid.dygraph.to_variable(input_1) input = fluid.dygraph.to_variable(input_1)
x0, x1, x2 = paddle.split(input, num_or_sections=3, dim=1) x0, x1, x2 = fluid.layers.split(input, num_or_sections=3, dim=1)
x0_out = x0.numpy() x0_out = x0.numpy()
x1_out = x1.numpy() x1_out = x1.numpy()
x2_out = x2.numpy() x2_out = x2.numpy()
......
...@@ -90,7 +90,7 @@ class API_TestSqueeze(unittest.TestCase): ...@@ -90,7 +90,7 @@ class API_TestSqueeze(unittest.TestCase):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.layers.data( data1 = fluid.layers.data(
'data1', shape=[-1, 1, 10], dtype='float64') 'data1', shape=[-1, 1, 10], dtype='float64')
result_squeeze = paddle.squeeze(data1, axes=[1]) result_squeeze = fluid.layers.squeeze(data1, axes=[1])
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
input1 = np.random.random([5, 1, 10]).astype('float64') input1 = np.random.random([5, 1, 10]).astype('float64')
...@@ -105,7 +105,7 @@ class API_TestDygraphSqueeze(unittest.TestCase): ...@@ -105,7 +105,7 @@ class API_TestDygraphSqueeze(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
input_1 = np.random.random([5, 1, 10]).astype("int32") input_1 = np.random.random([5, 1, 10]).astype("int32")
input = fluid.dygraph.to_variable(input_1) input = fluid.dygraph.to_variable(input_1)
output = paddle.squeeze(input, axes=[1]) output = fluid.layers.squeeze(input, axes=[1])
out_np = output.numpy() out_np = output.numpy()
expected_out = np.squeeze(input_1, axis=1) expected_out = np.squeeze(input_1, axis=1)
self.assertTrue(np.allclose(expected_out, out_np)) self.assertTrue(np.allclose(expected_out, out_np))
......
...@@ -150,7 +150,7 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase): ...@@ -150,7 +150,7 @@ class TestTensorStackAPIWithLoDTensorArray(unittest.TestCase):
for i in range(self.iter_num): for i in range(self.iter_num):
fluid.layers.array_write(input, zero + i, tensor_array) fluid.layers.array_write(input, zero + i, tensor_array)
self.out_var = paddle.stack(tensor_array, axis=self.axis) self.out_var = fluid.layers.stack(tensor_array, axis=self.axis)
def test_case(self): def test_case(self):
self.assertTrue(self.out_var.shape[self.axis] == -1) self.assertTrue(self.out_var.shape[self.axis] == -1)
...@@ -168,7 +168,7 @@ class API_test(unittest.TestCase): ...@@ -168,7 +168,7 @@ class API_test(unittest.TestCase):
data1 = fluid.layers.data('data1', shape=[1, 2], dtype='float64') data1 = fluid.layers.data('data1', shape=[1, 2], dtype='float64')
data2 = fluid.layers.data('data2', shape=[1, 2], dtype='float64') data2 = fluid.layers.data('data2', shape=[1, 2], dtype='float64')
data3 = fluid.layers.data('data3', shape=[1, 2], dtype='float64') data3 = fluid.layers.data('data3', shape=[1, 2], dtype='float64')
result_stack = paddle.stack([data1, data2, data3], axis=0) result_stack = fluid.layers.stack([data1, data2, data3], axis=0)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
input1 = np.random.random([1, 2]).astype('float64') input1 = np.random.random([1, 2]).astype('float64')
...@@ -192,14 +192,14 @@ class API_DygraphTest(unittest.TestCase): ...@@ -192,14 +192,14 @@ class API_DygraphTest(unittest.TestCase):
x1 = fluid.dygraph.to_variable(data1) x1 = fluid.dygraph.to_variable(data1)
x2 = fluid.dygraph.to_variable(data2) x2 = fluid.dygraph.to_variable(data2)
x3 = fluid.dygraph.to_variable(data3) x3 = fluid.dygraph.to_variable(data3)
result = paddle.stack([x1, x2, x3], axis=0) result = fluid.layers.stack([x1, x2, x3], axis=0)
result_np = result.numpy() result_np = result.numpy()
expected_result = np.stack([data1, data2, data3], axis=0) expected_result = np.stack([data1, data2, data3], axis=0)
self.assertTrue(np.allclose(expected_result, result_np)) self.assertTrue(np.allclose(expected_result, result_np))
with fluid.dygraph.guard(): with fluid.dygraph.guard():
y1 = fluid.dygraph.to_variable(data1) y1 = fluid.dygraph.to_variable(data1)
result = paddle.stack(y1, axis=0) result = fluid.layers.stack(y1, axis=0)
result_np_2 = result.numpy() result_np_2 = result.numpy()
expected_result_2 = np.stack(data1, axis=0) expected_result_2 = np.stack(data1, axis=0)
self.assertTrue(np.allclose(expected_result_2, result_np_2)) self.assertTrue(np.allclose(expected_result_2, result_np_2))
......
...@@ -225,22 +225,6 @@ def create_test_sum_fp16_class(parent): ...@@ -225,22 +225,6 @@ def create_test_sum_fp16_class(parent):
globals()[cls_name] = TestSumFp16Case globals()[cls_name] = TestSumFp16Case
class API_Test_Elementwise_Sum(unittest.TestCase):
def test_api(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input0 = fluid.layers.fill_constant(
shape=[2, 3], dtype='int64', value=5)
input1 = fluid.layers.fill_constant(
shape=[2, 3], dtype='int64', value=3)
expected_result = np.empty((2, 3))
expected_result.fill(8)
sum_value = paddle.elementwise_sum([input0, input1])
exe = fluid.Executor(fluid.CPUPlace())
result = exe.run(fetch_list=[sum_value])
self.assertEqual((result == expected_result).all(), True)
class TestRaiseSumError(unittest.TestCase): class TestRaiseSumError(unittest.TestCase):
def test_errors(self): def test_errors(self):
def test_type(): def test_type():
......
...@@ -142,7 +142,7 @@ class TestTAPI(unittest.TestCase): ...@@ -142,7 +142,7 @@ class TestTAPI(unittest.TestCase):
def test_out(self): def test_out(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float64", name="data") data = fluid.data(shape=[10], dtype="float64", name="data")
data_t = paddle.t(data) data_t = fluid.layers.t(data)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
data_np = np.random.random([10]).astype("float64") data_np = np.random.random([10]).astype("float64")
...@@ -152,7 +152,7 @@ class TestTAPI(unittest.TestCase): ...@@ -152,7 +152,7 @@ class TestTAPI(unittest.TestCase):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10, 5], dtype="float64", name="data") data = fluid.data(shape=[10, 5], dtype="float64", name="data")
data_t = paddle.t(data) data_t = fluid.layers.t(data)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
data_np = np.random.random([10, 5]).astype("float64") data_np = np.random.random([10, 5]).astype("float64")
...@@ -162,7 +162,7 @@ class TestTAPI(unittest.TestCase): ...@@ -162,7 +162,7 @@ class TestTAPI(unittest.TestCase):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[1, 5], dtype="float64", name="data") data = fluid.data(shape=[1, 5], dtype="float64", name="data")
data_t = paddle.t(data) data_t = fluid.layers.t(data)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
data_np = np.random.random([1, 5]).astype("float64") data_np = np.random.random([1, 5]).astype("float64")
...@@ -173,7 +173,7 @@ class TestTAPI(unittest.TestCase): ...@@ -173,7 +173,7 @@ class TestTAPI(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
np_x = np.random.random([10]).astype("float64") np_x = np.random.random([10]).astype("float64")
data = fluid.dygraph.to_variable(np_x) data = fluid.dygraph.to_variable(np_x)
z = paddle.t(data) z = fluid.layers.t(data)
np_z = z.numpy() np_z = z.numpy()
z_expected = np.array(np.transpose(np_x)) z_expected = np.array(np.transpose(np_x))
self.assertEqual((np_z == z_expected).all(), True) self.assertEqual((np_z == z_expected).all(), True)
...@@ -181,7 +181,7 @@ class TestTAPI(unittest.TestCase): ...@@ -181,7 +181,7 @@ class TestTAPI(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
np_x = np.random.random([10, 5]).astype("float64") np_x = np.random.random([10, 5]).astype("float64")
data = fluid.dygraph.to_variable(np_x) data = fluid.dygraph.to_variable(np_x)
z = paddle.t(data) z = fluid.layers.t(data)
np_z = z.numpy() np_z = z.numpy()
z_expected = np.array(np.transpose(np_x)) z_expected = np.array(np.transpose(np_x))
self.assertEqual((np_z == z_expected).all(), True) self.assertEqual((np_z == z_expected).all(), True)
...@@ -189,7 +189,7 @@ class TestTAPI(unittest.TestCase): ...@@ -189,7 +189,7 @@ class TestTAPI(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
np_x = np.random.random([1, 5]).astype("float64") np_x = np.random.random([1, 5]).astype("float64")
data = fluid.dygraph.to_variable(np_x) data = fluid.dygraph.to_variable(np_x)
z = paddle.t(data) z = fluid.layers.t(data)
np_z = z.numpy() np_z = z.numpy()
z_expected = np.array(np.transpose(np_x)) z_expected = np.array(np.transpose(np_x))
self.assertEqual((np_z == z_expected).all(), True) self.assertEqual((np_z == z_expected).all(), True)
...@@ -199,7 +199,7 @@ class TestTAPI(unittest.TestCase): ...@@ -199,7 +199,7 @@ class TestTAPI(unittest.TestCase):
x = fluid.data(name='x', shape=[10, 5, 3], dtype='float64') x = fluid.data(name='x', shape=[10, 5, 3], dtype='float64')
def test_x_dimension_check(): def test_x_dimension_check():
paddle.t(x) fluid.layers.t(x)
self.assertRaises(ValueError, test_x_dimension_check) self.assertRaises(ValueError, test_x_dimension_check)
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.nn.functional import interpolate from paddle.fluid.layers import interpolate
def trilinear_interp_np(input, def trilinear_interp_np(input,
......
...@@ -81,7 +81,7 @@ class API_TestUnsqueeze(unittest.TestCase): ...@@ -81,7 +81,7 @@ class API_TestUnsqueeze(unittest.TestCase):
def test_out(self): def test_out(self):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.layers.data('data1', shape=[-1, 10], dtype='float64') data1 = fluid.layers.data('data1', shape=[-1, 10], dtype='float64')
result_squeeze = paddle.unsqueeze(data1, axes=[1]) result_squeeze = fluid.layers.unsqueeze(data1, axes=[1])
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
input1 = np.random.random([5, 1, 10]).astype('float64') input1 = np.random.random([5, 1, 10]).astype('float64')
...@@ -98,7 +98,7 @@ class TestUnsqueezeOpError(unittest.TestCase): ...@@ -98,7 +98,7 @@ class TestUnsqueezeOpError(unittest.TestCase):
def test_axes_type(): def test_axes_type():
x6 = fluid.layers.data( x6 = fluid.layers.data(
shape=[-1, 10], dtype='float16', name='x3') shape=[-1, 10], dtype='float16', name='x3')
paddle.unsqueeze(x6, axes=3.2) fluid.layers.unsqueeze(x6, axes=3.2)
self.assertRaises(TypeError, test_axes_type) self.assertRaises(TypeError, test_axes_type)
...@@ -108,7 +108,7 @@ class API_TestUnsqueeze2(unittest.TestCase): ...@@ -108,7 +108,7 @@ class API_TestUnsqueeze2(unittest.TestCase):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data('data1', shape=[-1, 10], dtype='float64') data1 = fluid.data('data1', shape=[-1, 10], dtype='float64')
data2 = fluid.data('data2', shape=[1], dtype='int32') data2 = fluid.data('data2', shape=[1], dtype='int32')
result_squeeze = paddle.unsqueeze(data1, axes=data2) result_squeeze = fluid.layers.unsqueeze(data1, axes=data2)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
input1 = np.random.random([5, 1, 10]).astype('float64') input1 = np.random.random([5, 1, 10]).astype('float64')
...@@ -125,7 +125,7 @@ class API_TestUnsqueeze3(unittest.TestCase): ...@@ -125,7 +125,7 @@ class API_TestUnsqueeze3(unittest.TestCase):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = fluid.data('data1', shape=[-1, 10], dtype='float64') data1 = fluid.data('data1', shape=[-1, 10], dtype='float64')
data2 = fluid.data('data2', shape=[1], dtype='int32') data2 = fluid.data('data2', shape=[1], dtype='int32')
result_squeeze = paddle.unsqueeze(data1, axes=[data2, 3]) result_squeeze = fluid.layers.unsqueeze(data1, axes=[data2, 3])
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
input1 = np.random.random([5, 1, 10, 1]).astype('float64') input1 = np.random.random([5, 1, 10, 1]).astype('float64')
...@@ -143,7 +143,7 @@ class API_TestDyUnsqueeze(unittest.TestCase): ...@@ -143,7 +143,7 @@ class API_TestDyUnsqueeze(unittest.TestCase):
input_1 = np.random.random([5, 1, 10]).astype("int32") input_1 = np.random.random([5, 1, 10]).astype("int32")
input1 = np.squeeze(input_1, axis=1) input1 = np.squeeze(input_1, axis=1)
input = fluid.dygraph.to_variable(input_1) input = fluid.dygraph.to_variable(input_1)
output = paddle.unsqueeze(input, axes=[1]) output = fluid.layers.unsqueeze(input, axes=[1])
out_np = output.numpy() out_np = output.numpy()
self.assertTrue(np.allclose(input1, out_np)) self.assertTrue(np.allclose(input1, out_np))
...@@ -154,7 +154,7 @@ class API_TestDyUnsqueeze2(unittest.TestCase): ...@@ -154,7 +154,7 @@ class API_TestDyUnsqueeze2(unittest.TestCase):
input_1 = np.random.random([5, 1, 10]).astype("int32") input_1 = np.random.random([5, 1, 10]).astype("int32")
input1 = np.squeeze(input_1, axis=1) input1 = np.squeeze(input_1, axis=1)
input = fluid.dygraph.to_variable(input_1) input = fluid.dygraph.to_variable(input_1)
output = paddle.unsqueeze(input, axes=1) output = fluid.layers.unsqueeze(input, axes=1)
out_np = output.numpy() out_np = output.numpy()
self.assertTrue(np.allclose(input1, out_np)) self.assertTrue(np.allclose(input1, out_np))
......
...@@ -23,58 +23,6 @@ class TestVarianceLayer(unittest.TestCase): ...@@ -23,58 +23,6 @@ class TestVarianceLayer(unittest.TestCase):
self._dtype = "float64" self._dtype = "float64"
self._input = np.random.random([2, 3, 4, 5]).astype(self._dtype) self._input = np.random.random([2, 3, 4, 5]).astype(self._dtype)
def static(self, axis=None, keepdim=False, unbiased=True):
prog = fluid.Program()
with fluid.program_guard(prog):
data = fluid.data(
name="data", dtype=self._dtype, shape=[None, 3, 4, 5])
out = prog.current_block().create_var(
dtype=self._dtype, shape=[2, 3, 4, 5])
paddle.var(input=data,
axis=axis,
keepdim=keepdim,
unbiased=unbiased,
out=out)
exe = fluid.Executor(self._place)
return exe.run(feed={"data": self._input},
program=prog,
fetch_list=[out])[0]
def dynamic(self, axis=None, keepdim=False, unbiased=True):
with fluid.dygraph.guard(self._place):
data = fluid.dygraph.to_variable(self._input)
out = paddle.var(input=data,
axis=axis,
keepdim=keepdim,
unbiased=unbiased)
return out.numpy()
def numpy(self, axis=None, keepdim=False, unbiased=True):
ddof = 1 if unbiased else 0
axis = tuple(axis) if isinstance(axis, list) else axis
return np.var(self._input, axis=axis, keepdims=keepdim, ddof=ddof)
def test_equal(self):
places = []
if fluid.core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for place in places:
self._place = place
self.assertTrue(np.allclose(self.numpy(), self.static()))
self.assertTrue(
np.allclose(
self.numpy(axis=[0, 2]), self.dynamic(axis=[0, 2])))
self.assertTrue(
np.allclose(
self.numpy(
axis=[1, 3], keepdim=True),
self.dynamic(
axis=[1, 3], keepdim=True)))
self.assertTrue(
np.allclose(
self.numpy(unbiased=False), self.dynamic(unbiased=False)))
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -59,121 +59,5 @@ class TestWhereOp3(TestWhereOp): ...@@ -59,121 +59,5 @@ class TestWhereOp3(TestWhereOp):
self.cond = np.array(np.random.randint(2, size=(20, 2, 4)), dtype=bool) self.cond = np.array(np.random.randint(2, size=(20, 2, 4)), dtype=bool)
class TestWhereAPI(unittest.TestCase):
def setUp(self):
self.init_data()
def init_data(self):
self.shape = [10, 15]
self.cond = np.array(np.random.randint(2, size=self.shape), dtype=bool)
self.x = np.random.uniform(-2, 3, self.shape).astype(np.float32)
self.y = np.random.uniform(-2, 3, self.shape).astype(np.float32)
self.out = np.where(self.cond, self.x, self.y)
def ref_x_backward(self, dout):
return np.where(self.cond == True, dout, 0)
def ref_y_backward(self, dout):
return np.where(self.cond == False, dout, 0)
def test_api(self, use_cuda=False):
for x_stop_gradient in [False, True]:
for y_stop_gradient in [False, True]:
with fluid.program_guard(Program(), Program()):
cond = fluid.layers.data(
name='cond', shape=self.shape, dtype='bool')
x = fluid.layers.data(
name='x', shape=self.shape, dtype='float32')
y = fluid.layers.data(
name='y', shape=self.shape, dtype='float32')
x.stop_gradient = x_stop_gradient
y.stop_gradient = y_stop_gradient
result = paddle.where(cond, x, y)
append_backward(layers.mean(result))
for use_cuda in [False, True]:
if use_cuda and not fluid.core.is_compiled_with_cuda():
break
place = fluid.CUDAPlace(
0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
fetch_list = [result, result.grad_name]
if x_stop_gradient is False:
fetch_list.append(x.grad_name)
if y_stop_gradient is False:
fetch_list.append(y.grad_name)
out = exe.run(
fluid.default_main_program(),
feed={'cond': self.cond,
'x': self.x,
'y': self.y},
fetch_list=fetch_list)
assert np.array_equal(out[0], self.out)
if x_stop_gradient is False:
assert np.array_equal(out[2],
self.ref_x_backward(out[1]))
if y.stop_gradient is False:
assert np.array_equal(
out[3], self.ref_y_backward(out[1]))
elif y.stop_gradient is False:
assert np.array_equal(out[2],
self.ref_y_backward(out[1]))
def test_api_broadcast(self, use_cuda=False):
main_program = Program()
with fluid.program_guard(main_program):
x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32')
y = fluid.layers.data(name='y', shape=[4, 2], dtype='float32')
x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype("float32")
y_i = np.array([[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0]]).astype("float32")
result = paddle.where(x > 1, x=x, y=y)
for use_cuda in [False, True]:
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
out = exe.run(fluid.default_main_program(),
feed={'x': x_i,
'y': y_i},
fetch_list=[result])
assert np.array_equal(out[0], np.where(x_i > 1, x_i, y_i))
class TestWhereDygraphAPI(unittest.TestCase):
def test_api(self):
with fluid.dygraph.guard():
x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64")
y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float64")
cond_i = np.array([False, False, True, True]).astype("bool")
x = fluid.dygraph.to_variable(x_i)
y = fluid.dygraph.to_variable(y_i)
cond = fluid.dygraph.to_variable(cond_i)
out = paddle.where(cond, x, y)
assert np.array_equal(out.numpy(), np.where(cond_i, x_i, y_i))
class TestWhereOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64")
y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float64")
cond_i = np.array([False, False, True, True]).astype("bool")
def test_Variable():
paddle.where(cond_i, x_i, y_i)
self.assertRaises(TypeError, test_Variable)
def test_type():
x = fluid.layers.data(name='x', shape=[4], dtype='bool')
y = fluid.layers.data(name='y', shape=[4], dtype='float16')
cond = fluid.layers.data(name='cond', shape=[4], dtype='int32')
paddle.where(cond, x, y)
self.assertRaises(TypeError, test_type)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -16,11 +16,9 @@ ...@@ -16,11 +16,9 @@
# including layers, linear, conv, rnn etc. # including layers, linear, conv, rnn etc.
from .layer import norm from .layer import norm
from .functional import extension
__all__ = [] __all__ = []
__all__ += norm.__all__ __all__ += norm.__all__
__all__ += extension.__all__
# TODO: define alias in nn directory # TODO: define alias in nn directory
# from .clip import ErrorClipByValue #DEFINE_ALIAS # from .clip import ErrorClipByValue #DEFINE_ALIAS
...@@ -206,22 +204,6 @@ from .functional.activation import sigmoid #DEFINE_ALIAS ...@@ -206,22 +204,6 @@ from .functional.activation import sigmoid #DEFINE_ALIAS
# from .functional.activation import tanh_shrink #DEFINE_ALIAS # from .functional.activation import tanh_shrink #DEFINE_ALIAS
# from .functional.activation import thresholded_relu #DEFINE_ALIAS # from .functional.activation import thresholded_relu #DEFINE_ALIAS
from .functional.activation import log_softmax #DEFINE_ALIAS from .functional.activation import log_softmax #DEFINE_ALIAS
# from .functional.extension import add_position_encoding #DEFINE_ALIAS
# from .functional.extension import autoincreased_step_counter #DEFINE_ALIAS
# from .functional.extension import continuous_value_model #DEFINE_ALIAS
# from .functional.extension import filter_by_instag #DEFINE_ALIAS
# from .functional.extension import linear_chain_crf #DEFINE_ALIAS
# from .functional.extension import merge_selected_rows #DEFINE_ALIAS
# from .functional.extension import multiclass_nms #DEFINE_ALIAS
# from .functional.extension import polygon_box_transform #DEFINE_ALIAS
# from .functional.extension import random_crop #DEFINE_ALIAS
from .functional.extension import row_conv #DEFINE_ALIAS
# from .functional.extension import rpn_target_assign #DEFINE_ALIAS
# from .functional.extension import similarity_focus #DEFINE_ALIAS
# from .functional.extension import target_assign #DEFINE_ALIAS
# from .functional.extension import temporal_shift #DEFINE_ALIAS
# from .functional.extension import warpctc #DEFINE_ALIAS
from .functional.extension import diag_embed #DEFINE_ALIAS
# from .functional.rnn import gru_unit #DEFINE_ALIAS # from .functional.rnn import gru_unit #DEFINE_ALIAS
# from .functional.rnn import lstm #DEFINE_ALIAS # from .functional.rnn import lstm #DEFINE_ALIAS
# from .functional.rnn import lstm_unit #DEFINE_ALIAS # from .functional.rnn import lstm_unit #DEFINE_ALIAS
......
...@@ -130,24 +130,6 @@ from .activation import sigmoid #DEFINE_ALIAS ...@@ -130,24 +130,6 @@ from .activation import sigmoid #DEFINE_ALIAS
# from .activation import tanh_shrink #DEFINE_ALIAS # from .activation import tanh_shrink #DEFINE_ALIAS
# from .activation import thresholded_relu #DEFINE_ALIAS # from .activation import thresholded_relu #DEFINE_ALIAS
from .activation import log_softmax #DEFINE_ALIAS from .activation import log_softmax #DEFINE_ALIAS
from . import extension
__all__ += extension.__all__
# from .extension import add_position_encoding #DEFINE_ALIAS
# from .extension import autoincreased_step_counter #DEFINE_ALIAS
# from .extension import continuous_value_model #DEFINE_ALIAS
# from .extension import filter_by_instag #DEFINE_ALIAS
# from .extension import linear_chain_crf #DEFINE_ALIAS
# from .extension import merge_selected_rows #DEFINE_ALIAS
# from .extension import multiclass_nms #DEFINE_ALIAS
# from .extension import polygon_box_transform #DEFINE_ALIAS
# from .extension import random_crop #DEFINE_ALIAS
from .extension import row_conv #DEFINE_ALIAS
# from .extension import rpn_target_assign #DEFINE_ALIAS
# from .extension import similarity_focus #DEFINE_ALIAS
# from .extension import target_assign #DEFINE_ALIAS
# from .extension import temporal_shift #DEFINE_ALIAS
# from .extension import warpctc #DEFINE_ALIAS
from .extension import diag_embed #DEFINE_ALIAS
# from .rnn import gru_unit #DEFINE_ALIAS # from .rnn import gru_unit #DEFINE_ALIAS
# from .rnn import lstm #DEFINE_ALIAS # from .rnn import lstm #DEFINE_ALIAS
# from .rnn import lstm_unit #DEFINE_ALIAS # from .rnn import lstm_unit #DEFINE_ALIAS
...@@ -180,17 +162,3 @@ from .extension import diag_embed #DEFINE_ALIAS ...@@ -180,17 +162,3 @@ from .extension import diag_embed #DEFINE_ALIAS
# from .lod import dynamic_gru #DEFINE_ALIAS # from .lod import dynamic_gru #DEFINE_ALIAS
# from .lod import dynamic_lstm #DEFINE_ALIAS # from .lod import dynamic_lstm #DEFINE_ALIAS
# from .lod import dynamic_lstmp #DEFINE_ALIAS # from .lod import dynamic_lstmp #DEFINE_ALIAS
from . import common
#__all__ += common.__all__
# from .common import dropout #DEFINE_ALIAS
# from .common import embedding #DEFINE_ALIAS
# from .common import fc #DEFINE_ALIAS
# from .common import label_smooth #DEFINE_ALIAS
# from .common import one_hot #DEFINE_ALIAS
# from .common import pad #DEFINE_ALIAS
# from .common import pad_constant_like #DEFINE_ALIAS
# from .common import pad2d #DEFINE_ALIAS
# from .common import unfold #DEFINE_ALIAS
# from .common import bilinear_tensor_product #DEFINE_ALIAS
# from .common import assign #DEFINE_ALIAS
from .common import interpolate #DEFINE_ALIAS
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.tensor import Variable, fill_constant
# TODO: define the common functions to build a neural network
# __all__ = ['dropout',
# 'embedding',
# 'fc',
# 'label_smooth',
# 'one_hot',
# 'pad',
# 'pad_constant_like',
# 'pad2d',
# 'unfold',
# 'bilinear_tensor_product',
# 'assign',
# 'interpolate']
__all__ = ['interpolate']
def interpolate(input,
out_shape=None,
scale=None,
name=None,
resample='BILINEAR',
actual_shape=None,
align_corners=True,
align_mode=1,
data_format='NCHW'):
"""
This op resizes a batch of images.
The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w)
or (num_batches, in_h, in_w, channels), or a 5-D Tensor of the shape
(num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
and the resizing only applies on the three dimensions(depth, height and width).
**Warning:** the parameter :attr:`actual_shape` will be deprecated in the
future and only use :attr:`out_shape` instead.
Supporting resample methods:
'BILINEAR' : Bilinear interpolation
'TRILINEAR' : Trilinear interpolation
'NEAREST' : Nearest neighbor interpolation
'BICUBIC' : Bicubic interpolation
Nearest neighbor interpolation is to perform nearest neighbor interpolation
in both the 3rd dimension(in height direction) and the 4th dimension(in width
direction) on input tensor.
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this op) on a rectilinear 2D grid. The key idea is
to perform linear interpolation first in one direction, and then
again in the other direction.
Trilinear interpolation is an extension of linear interpolation for
interpolating functions of three variables (e.g. D-direction,
H-direction and W-direction in this op) on a rectilinear 3D grid.
The linear interpolation is performed on three directions.
Align_corners and align_mode are optional parameters,the calculation method
of interpolation can be selected by them.
Bicubic interpolation is an extension of cubic interpolation for interpolating
data points on a two-dimensional regular grid. The interpolated surface is
smoother than corresponding surfaces obtained by bilinear interpolation or
nearest-neighbor interpolation.
Example:
.. code-block:: text
For scale:
if align_corners = True && out_size > 1 :
scale_factor = (in_size-1.0)/(out_size-1.0)
else:
scale_factor = float(in_size/out_size)
Nearest neighbor interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = floor (H_{in} * scale_{factor})
W_out = floor (W_{in} * scale_{factor})
else:
align_corners = True
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = round(H_{in} * scale_{factor})
W_out = round(W_{in} * scale_{factor})
Bilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Bicubic interpolation:
if:
align_corners = False
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,H_in,W_in)
output: (N,C,H_out,W_out) where:
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
Trilinear interpolation:
if:
align_corners = False , align_mode = 0
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = (D_{in}+0.5) * scale_{factor} - 0.5
H_out = (H_{in}+0.5) * scale_{factor} - 0.5
W_out = (W_{in}+0.5) * scale_{factor} - 0.5
else:
input : (N,C,D_in,H_in,W_in)
output: (N,C,D_out,H_out,W_out) where:
D_out = D_{in} * scale_{factor}
H_out = H_{in} * scale_{factor}
W_out = W_{in} * scale_{factor}
For details of nearest neighbor interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
For details of bilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation.
For details of trilinear interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Trilinear_interpolation.
For details of bicubic interpolation, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bicubic_interpolation
Parameters:
input (Variable): 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
its data format is specified by :attr:`data_format`.
out_shape(list|tuple|Variable|None): Output shape of image resize
layer, the shape is (out_h, out_w) when input is a 4-D Tensor and is
(out_d, out_h, out_w) when input is a 5-D Tensor. Default: None. If
a list, each element can be an integer or a Tensor Variable of shape: [1].
If a Tensor Variable, its dimensions size should be a 1.
scale(float|Variable|None): The multiplier for the input height or width. At
least one of :attr:`out_shape` or :attr:`scale` must be set.
And :attr:`out_shape` has a higher priority than :attr:`scale`.
Default: None.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
resample(str): The resample method. It supports 'BILINEAR', 'TRILINEAR' ,
'BICUBIC' and 'NEAREST' currently. Default: 'BILINEAR'
actual_shape(Variable): An optional input to specify output shape
dynamically. If provided, image resize
according to this given shape rather than
:attr:`out_shape` and :attr:`scale` specifying
shape. That is to say actual_shape has the
highest priority. It is recommended to use
:attr:`out_shape` if you want to specify output
shape dynamically, because :attr:`actual_shape`
will be deprecated. When using actual_shape to
specify output shape, one of :attr:`out_shape`
and :attr:`scale` should also be set, otherwise
errors would be occurred in graph constructing stage.
Default: None
align_corners(bool) : An optional bool, If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the
corner pixels.
Default: True
align_mode(int) : An optional for bilinear interpolation. can be \'0\'
for src_idx = scale*(dst_indx+0.5)-0.5 , can be \'1\' for
src_idx = scale*dst_index.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`, `"NCDHW"`,
`"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
Returns:
A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
Raises:
TypeError: out_shape should be a list or tuple or Variable.
TypeError: actual_shape should either be Variable or None.
ValueError: The 'resample' of image_resize can only be 'BILINEAR',
'TRILINEAR', 'BICUBIC', or 'NEAREST' currently.
ValueError: 'BILINEAR', 'BICUBIC' and 'NEAREST' only support 4-D tensor.
ValueError: 'TRILINEAR' only support 5-D tensor.
ValueError: One of out_shape and scale must not be None.
ValueError: out_shape length should be 2 for input 4-D tensor.
ValueError: out_shape length should be 3 for input 5-D tensor.
ValueError: scale should be greater than zero.
TypeError: align_corners should be a bool value
ValueError: align_mode can only be '0' or '1'
ValueError: data_format can only be 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
Examples:
.. code-block:: python
#declarative mode
import paddle
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,10])
#1
output = paddle.nn.functional.interpolate(input=input,out_shape=[12,12])
#2
#x = np.array([2]).astype("int32")
#dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
#fluid.layers.assign(input=x, output=dim1)
#output = paddle.nn.functional.interpolate(input=input,out_shape=[12,dim1])
#3
#x = np.array([3,12]).astype("int32")
#shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
#fluid.layers.assign(input=x, output=shape_tensor)
#output = paddle.nn.functional.interpolate(input=input,out_shape=shape_tensor)
#4
#x = np.array([0.5]).astype("float32")
#scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
#fluid.layers.assign(x,scale_tensor)
#output = paddle.nn.functional.interpolate(input=input,scale=scale_tensor)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.random.rand(2,3,6,10).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data},
fetch_list=[output],
return_numpy=True)
print(output_data[0].shape)
#1
# (2, 3, 12, 12)
#2
# (2, 3, 12, 2)
#3
# (2, 3, 3, 12)
#4
# (2, 3, 3, 5)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
output = paddle.nn.functional.interpolate(input=input, out_shape=[12,12])
print(output.shape)
# [2L, 3L, 12L, 12L]
"""
resample_methods = {
'BILINEAR': 'bilinear',
'TRILINEAR': 'trilinear',
'NEAREST': 'nearest',
'BICUBIC': 'bicubic',
}
if resample not in resample_methods:
raise ValueError(
"The 'resample' of image_resize can only be 'BILINEAR', 'TRILINEAR', "
" 'BICUBIC' or 'NEAREST' currently.")
resample_type = resample_methods[resample]
if resample in ['BILINEAR', 'NEAREST', 'BICUBIC'] and len(input.shape) != 4:
raise ValueError(
"'BILINEAR', 'BICUBIC' and 'NEAREST' only support 4-D tensor.")
if resample == 'TRILINEAR' and len(input.shape) != 5:
raise ValueError("'TRILINEAR'only support 5-D tensor.")
if not isinstance(align_corners, bool):
raise TypeError("Attr align_corners should be a bool value")
if align_mode != 0 and align_mode != 1:
raise ValueError("align_mode can only be 0 or 1")
if out_shape is None and scale is None:
raise ValueError("One of out_shape and scale must not be None.")
helper = LayerHelper('{}_interp'.format(resample_type), **locals())
dtype = helper.input_dtype()
if len(input.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCHW` or `NHWC` supported for 4-D input.")
elif len(input.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Got wrong value for param `data_format`: " + data_format +
" received but only `NCDHW` or `NDHWC` supported for 5-D input.")
def _is_list_or_turple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if data_format == 'NCHW' or data_format == 'NCDHW':
data_layout = 'NCHW'
if data_format == 'NHWC' or data_format == 'NDHWC':
data_layout = 'NHWC'
inputs = {"X": input}
attrs = {
"out_d": -1,
"out_h": -1,
"out_w": -1,
"interp_method": resample_type,
"align_corners": align_corners,
"align_mode": align_mode,
"data_layout": data_layout
}
if out_shape is not None:
if isinstance(out_shape, Variable):
out_shape.stop_gradient = True
inputs['OutSize'] = out_shape
else:
if not (_is_list_or_turple_(out_shape)):
raise TypeError(
"out_shape should be a list or tuple or Variable.")
# Validate the shape
contain_var = False
for dim_idx, dim_size in enumerate(out_shape):
if isinstance(dim_size, Variable):
contain_var = True
continue
assert dim_size > 0, (
"Each dimension size given in out_shape must be greater than 0."
)
if contain_var:
new_size_tensor = []
size_list = []
for dim in out_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_size_tensor.append(dim)
size_list.append(-1)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference(
'int32')
fill_constant(
[1], 'int32', dim, force_cpu=True, out=temp_out)
new_size_tensor.append(temp_out)
size_list.append(dim)
inputs['SizeTensor'] = new_size_tensor
if len(input.shape) == 4:
if len(out_shape) != 2:
raise ValueError("out_shape length should be 2 for "
"input 4-D tensor.")
if contain_var:
attrs['out_h'] = size_list[0]
attrs['out_w'] = size_list[1]
else:
out_shape = list(map(int, out_shape))
attrs['out_h'] = out_shape[0]
attrs['out_w'] = out_shape[1]
if len(input.shape) == 5:
if len(out_shape) != 3:
raise ValueError("out_shape length should be 3 for "
"input 5-D tensor.")
if contain_var:
attrs['out_d'] = size_list[0]
attrs['out_h'] = size_list[1]
attrs['out_w'] = size_list[2]
else:
out_shape = list(map(int, out_shape))
attrs['out_d'] = out_shape[0]
attrs['out_h'] = out_shape[1]
attrs['out_w'] = out_shape[2]
else:
if isinstance(scale, Variable):
scale.stop_gradient = True
inputs["Scale"] = scale
elif isinstance(scale, float) or isinstance(scale, int):
if scale <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
attrs['scale'] = float(scale)
else:
raise TypeError(
"Attr(scale)'s type should be float, int or Variable.")
if isinstance(actual_shape, Variable):
warnings.warn(
"actual_shape will be deprecated, it is recommended to use "
"out_shape instead of actual_shape to specify output shape dynamically."
)
actual_shape.stop_gradient = True
inputs["OutSize"] = actual_shape
elif actual_shape is not None:
raise TypeError("actual_shape should either be Variable or None.")
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='{}_interp'.format(resample_type),
inputs=inputs,
outputs={"Out": out},
attrs=attrs)
return out
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define the extention functions
__all__ = [
# 'add_position_encoding',
# 'autoincreased_step_counter',
# 'continuous_value_model',
# 'filter_by_instag',
# 'linear_chain_crf',
# 'merge_selected_rows',
# 'multiclass_nms',
# 'polygon_box_transform',
# 'random_crop',
'row_conv',
# 'rpn_target_assign',
# 'similarity_focus',
# 'target_assign',
# 'temporal_shift',
# 'warpctc',
'diag_embed'
]
import numpy as np
from ...fluid.data_feeder import check_dtype
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import Variable, in_dygraph_mode
from ...fluid.layers.tensor import assign
from ...fluid import core, dygraph_utils
from ...fluid.layers.layer_function_generator import templatedoc
def diag_embed(input, offset=0, dim1=-2, dim2=-1):
"""
This OP creates a tensor whose diagonals of certain 2D planes (specified by dim1 and dim2)
are filled by ``input``. By default, a 2D plane formed by the last two dimensions
of the returned tensor will be selected.
The argument ``offset`` determines which diagonal is generated:
- If offset = 0, it is the main diagonal.
- If offset > 0, it is above the main diagonal.
- If offset < 0, it is below the main diagonal.
Args:
input(Variable|numpy.ndarray): The input tensor. Must be at least 1-dimensional. The input data type should be float32, float64, int32, int64.
offset(int, optional): Which diagonal to consider. Default: 0 (main diagonal).
dim1(int, optional): The first dimension with respect to which to take diagonal. Default: -2.
dim2(int, optional): The second dimension with respect to which to take diagonal. Default: -1.
Returns:
Variable, the output data type is the same as input data type.
Examples:
.. code-block:: python
import paddle.nn.functional as F
import paddle.fluid.dygraph as dg
import numpy as np
diag_embed = np.random.randn(2, 3).astype('float32')
# [[ 0.7545889 , -0.25074545, 0.5929117 ],
# [-0.6097662 , -0.01753256, 0.619769 ]]
with dg.guard():
data1 = F.diag_embed(diag_embed)
data1.numpy()
# [[[ 0.7545889 , 0. , 0. ],
# [ 0. , -0.25074545, 0. ],
# [ 0. , 0. , 0.5929117 ]],
# [[-0.6097662 , 0. , 0. ],
# [ 0. , -0.01753256, 0. ],
# [ 0. , 0. , 0.619769 ]]]
data2 = F.diag_embed(diag_embed, offset=-1, dim1=0, dim2=2)
data2.numpy()
# [[[ 0. , 0. , 0. , 0. ],
# [ 0.7545889 , 0. , 0. , 0. ],
# [ 0. , -0.25074545, 0. , 0. ],
# [ 0. , 0. , 0.5929117 , 0. ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [-0.6097662 , 0. , 0. , 0. ],
# [ 0. , -0.01753256, 0. , 0. ],
# [ 0. , 0. , 0.619769 , 0. ]]]
data3 = F.diag_embed(diag_embed, offset=1, dim1=0, dim2=2)
data3.numpy()
# [[[ 0. , 0.7545889 , 0. , 0. ],
# [ 0. , -0.6097662 , 0. , 0. ]],
#
# [[ 0. , 0. , -0.25074545, 0. ],
# [ 0. , 0. , -0.01753256, 0. ]],
#
# [[ 0. , 0. , 0. , 0.5929117 ],
# [ 0. , 0. , 0. , 0.619769 ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [ 0. , 0. , 0. , 0. ]]]
"""
inputs = {'Input': [input]}
attrs = {'offset': offset, 'dim1': dim1, 'dim2': dim2}
if not isinstance(input, Variable):
input = assign(input)
def __check_input(input, offset, dim1, dim2):
check_dtype(input.dtype, 'Input',
['int32', 'int64', 'float16', 'float32', 'float64'],
'diag_embed')
input_shape = list(input.shape)
assert len(input_shape) >= 1, \
"Input must be at least 1-dimensional, " \
"But received Input's dimensional: %s.\n" % \
len(input_shape)
assert np.abs(dim1) <= len(input_shape), \
"Dim1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim1)
assert np.abs(dim2) <= len(input_shape), \
"Dim2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim2)
dim1_ = dim1 if dim1 >= 0 else len(input_shape) + dim1 + 1
dim2_ = dim2 if dim2 >= 0 else len(input_shape) + dim2 + 1
assert dim1_ != dim2_, \
"dim1 and dim2 cannot be the same dimension." \
"But received dim1 = %d, dim2 = %d\n"%(dim1, dim2)
if not in_dygraph_mode():
__check_input(input, offset, dim1, dim2)
helper = LayerHelper("diag_embed", **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='diag_embed',
inputs={'Input': [input]},
attrs={'offset': offset,
'dim1': dim1,
'dim2': dim2},
outputs={'Out': [out]})
out.stop_gradient = True
return out
@templatedoc()
def row_conv(input, weight, act=None):
"""
${comment}
Args:
input (Variable): the input(X) is a LodTensor or tensor, LodTensor(X)
supports variable time-length input sequences. The underlying
tensor in this LoDTensor is a matrix with shape (T, D), where
T is the total time steps in this mini-batch and D is the input
data dimension.
If the input is a padded minibatch, the shape of the input is
(N, T, D), N is batch size, T is the max time steps in the batch,
D is the input data dimension.
weight (Variable): The weight. A Tensor with shape
(future_context_size + 1, D), where future_context_size is the
context size of the RowConv operator.
act (str): Non-linear activation to be applied to output variable.
Returns:
${out_comment}.
Examples:
.. code-block:: python
from paddle import fluid, nn
import paddle.fluid.dygraph as dg
import paddle.nn.functional as F
import numpy as np
batch_size = 4
time_steps = 8
feature_size = 6
context_size = 4
x = np.random.randn(batch_size, time_steps, feature_size).astype(np.float32)
weight = np.random.randn(context_size + 1, feature_size).astype(np.float32)
place = fluid.CPUPlace()
with dg.guard(place):
x_var = dg.to_variable(x)
w_var = dg.to_variable(weight)
y_var = F.row_conv(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape)
# (4, 8, 6)
"""
if in_dygraph_mode():
pre_act = core.ops.row_conv(input, weight)
out = dygraph_utils._append_activation_in_dygraph(pre_act, act)
return out
else:
helper = LayerHelper('row_conv', **locals())
dtype = helper.input_dtype()
inputs = {'X': [input], 'Filter': [weight]}
pre_act = helper.create_variable_for_type_inference(dtype)
outputs = {'Out': [pre_act]}
helper.append_op(type='row_conv', inputs=inputs, outputs=outputs)
out = helper.append_activation(pre_act)
return out
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
#from .math import *
#from .creation import *
#from .linalg import *
# TODO: define alias in tensor and framework directory
# from .creation import create_tensor #DEFINE_ALIAS
# from .creation import create_lod_tensor #DEFINE_ALIAS
# from .creation import create_random_int_lod #DEFINE_ALIAS
# from .creation import crop_tensor #DEFINE_ALIAS
# from .creation import diag #DEFINE_ALIAS
from .creation import eye #DEFINE_ALIAS
# from .creation import fill_constant #DEFINE_ALIAS
# from .creation import get__from_selected_rows #DEFINE_ALIAS
from .creation import linspace #DEFINE_ALIAS
# from .creation import ones #DEFINE_ALIAS
# from .creation import ones_like #DEFINE_ALIAS
# from .creation import range #DEFINE_ALIAS
# from .creation import zeros #DEFINE_ALIAS
# from .creation import zeros_like #DEFINE_ALIAS
from .creation import arange #DEFINE_ALIAS
# from .creation import eye #DEFINE_ALIAS
from .creation import full # DEFINE_ALIAS
# from .creation import linspace #DEFINE_ALIAS
# from .creation import full_like #DEFINE_ALIAS
from .creation import triu #DEFINE_ALIAS
from .creation import tril #DEFINE_ALIAS
from .creation import meshgrid #DEFINE_ALIAS
# from .stat import mean #DEFINE_ALIAS
# from .stat import reduce_mean #DEFINE_ALIAS
# from .stat import std #DEFINE_ALIAS
# from .stat import var #DEFINE_ALIAS
from .logic import equal #DEFINE_ALIAS
# from .logic import greater_equal #DEFINE_ALIAS
# from .logic import greater_than #DEFINE_ALIAS
# from .logic import is_empty #DEFINE_ALIAS
# from .logic import isfinite #DEFINE_ALIAS
# from .logic import less_equal #DEFINE_ALIAS
# from .logic import less_than #DEFINE_ALIAS
# from .logic import logical_and #DEFINE_ALIAS
# from .logic import logical_not #DEFINE_ALIAS
# from .logic import logical_or #DEFINE_ALIAS
# from .logic import logical_xor #DEFINE_ALIAS
# from .logic import not_equal #DEFINE_ALIAS
# from .logic import reduce_all #DEFINE_ALIAS
# from .logic import reduce_any #DEFINE_ALIAS
from .logic import allclose #DEFINE_ALIAS
from .logic import elementwise_equal #DEFINE_ALIAS
# from .logic import isnan #DEFINE_ALIAS
# from . import Tensor #DEFINE_ALIAS
# from . import LoDTensor #DEFINE_ALIAS
# from . import LoDTensorArray #DEFINE_ALIAS
# from .random import gaussin #DEFINE_ALIAS
# from .random import uniform #DEFINE_ALIAS
# from .random import shuffle #DEFINE_ALIAS
from .random import randn #DEFINE_ALIAS
# from .random import rand #DEFINE_ALIAS
from .random import randint #DEFINE_ALIAS
from .random import randperm
# from .math import abs #DEFINE_ALIAS
# from .math import acos #DEFINE_ALIAS
# from .math import asin #DEFINE_ALIAS
from .math import atan #DEFINE_ALIAS
# from .math import ceil #DEFINE_ALIAS
# from .math import cos #DEFINE_ALIAS
# from .math import cumsum #DEFINE_ALIAS
# from .math import elementwise_add #DEFINE_ALIAS
# from .math import elementwise_div #DEFINE_ALIAS
# from .math import elementwise_floordiv #DEFINE_ALIAS
# from .math import elementwise_max #DEFINE_ALIAS
# from .math import elementwise_min #DEFINE_ALIAS
# from .math import elementwise_mod #DEFINE_ALIAS
# from .math import elementwise_mul #DEFINE_ALIAS
# from .math import elementwise_pow #DEFINE_ALIAS
# from .math import elementwise_sub #DEFINE_ALIAS
# from .math import exp #DEFINE_ALIAS
# from .math import floor #DEFINE_ALIAS
# from .math import increment #DEFINE_ALIAS
# from .math import log #DEFINE_ALIAS
from .math import mul #DEFINE_ALIAS
# from .math import multiplex #DEFINE_ALIAS
from .math import pow #DEFINE_ALIAS
# from .math import reciprocal #DEFINE_ALIAS
# from .math import reduce_max #DEFINE_ALIAS
# from .math import reduce_min #DEFINE_ALIAS
# from .math import reduce_prod #DEFINE_ALIAS
# from .math import reduce_sum #DEFINE_ALIAS
# from .math import round #DEFINE_ALIAS
# from .math import rsqrt #DEFINE_ALIAS
# from .math import scale #DEFINE_ALIAS
# from .math import sign #DEFINE_ALIAS
from .math import sin #DEFINE_ALIAS
from .math import sqrt #DEFINE_ALIAS
# from .math import square #DEFINE_ALIAS
# from .math import stanh #DEFINE_ALIAS
from .math import sum #DEFINE_ALIAS
# from .math import sums #DEFINE_ALIAS
from .math import tanh #DEFINE_ALIAS
from .math import elementwise_sum #DEFINE_ALIAS
from .math import max #DEFINE_ALIAS
from .math import min #DEFINE_ALIAS
from .math import mm #DEFINE_ALIAS
from .math import div #DEFINE_ALIAS
from .math import add #DEFINE_ALIAS
# from .math import atan #DEFINE_ALIAS
from .math import logsumexp #DEFINE_ALIAS
# from .math import inverse #DEFINE_ALIAS
from .math import log1p #DEFINE_ALIAS
# from .math import erf #DEFINE_ALIAS
from .math import addcmul #DEFINE_ALIAS
from .math import addmm #DEFINE_ALIAS
from .math import clamp #DEFINE_ALIAS
# from .attribute import rank #DEFINE_ALIAS
# from .attribute import shape #DEFINE_ALIAS
# from .io import save #DEFINE_ALIAS
# from .io import load #DEFINE_ALIAS
from .linalg import matmul #DEFINE_ALIAS
from .linalg import dot #DEFINE_ALIAS
from .linalg import bmm #DEFINE_ALIAS
# from .linalg import einsum #DEFINE_ALIAS
from .linalg import norm #DEFINE_ALIAS
# from .linalg import transpose #DEFINE_ALIAS
from .linalg import dist #DEFINE_ALIAS
from .linalg import t #DEFINE_ALIAS
from .linalg import cross #DEFINE_ALIAS
# from .linalg import cholesky #DEFINE_ALIAS
# from .manipulation import cast #DEFINE_ALIAS
# from .manipulation import concat #DEFINE_ALIAS
# from .manipulation import expand #DEFINE_ALIAS
# from .manipulation import expand_as #DEFINE_ALIAS
# from .manipulation import flatten #DEFINE_ALIAS
from .manipulation import gather #DEFINE_ALIAS
# from .manipulation import gather_nd #DEFINE_ALIAS
# from .manipulation import reshape #DEFINE_ALIAS
# from .manipulation import reverse #DEFINE_ALIAS
# from .manipulation import scatter #DEFINE_ALIAS
# from .manipulation import scatter_nd_add #DEFINE_ALIAS
# from .manipulation import scatter_nd #DEFINE_ALIAS
# from .manipulation import shard_index #DEFINE_ALIAS
# from .manipulation import slice #DEFINE_ALIAS
from .manipulation import split #DEFINE_ALIAS
from .manipulation import squeeze #DEFINE_ALIAS
from .manipulation import stack #DEFINE_ALIAS
# from .manipulation import strided_slice #DEFINE_ALIAS
# from .manipulation import transpose #DEFINE_ALIAS
# from .manipulation import unique #DEFINE_ALIAS
# from .manipulation import unique_with_counts #DEFINE_ALIAS
from .manipulation import unsqueeze #DEFINE_ALIAS
# from .manipulation import unstack #DEFINE_ALIAS
from .manipulation import flip #DEFINE_ALIAS
# from .manipulation import unbind #DEFINE_ALIAS
from .manipulation import roll #DEFINE_ALIAS
from .search import argmax #DEFINE_ALIAS
# from .search import argmin #DEFINE_ALIAS
# from .search import argsort #DEFINE_ALIAS
# from .search import has_inf #DEFINE_ALIAS
# from .search import has_nan #DEFINE_ALIAS
# from .search import masked_select #DEFINE_ALIAS
# from .search import topk #DEFINE_ALIAS
from .search import where #DEFINE_ALIAS
from .search import index_select #DEFINE_ALIAS
from .search import index_sample # DEFINE_ALIAS
from .search import nonzero #DEFINE_ALIAS
from .search import sort #DEFINE_ALIAS
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define functions to get tensor attributes
# __all__ = ['rank', 'shape']
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ..fluid.framework import Variable, in_dygraph_mode
from ..fluid.initializer import Constant
from ..fluid.layers import core
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
from ..fluid.framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varbase_creator, device_guard, OpProtoHolder
from ..fluid.layers import fill_constant
from paddle.common_ops_import import *
# TODO: define functions to get create a tensor
__all__ = [
'create_tensor',
# 'create_lod_tensor',
# 'create_random_int_lodtensor',
# 'crop_tensor',
# 'diag', 'eye',
# 'fill_constant',
# 'get_tensor_from_selected_rows',
'linspace',
'ones',
'ones_like',
# 'range',
'zeros',
'zeros_like',
'arrange',
'eye',
'full',
#'full_like',
'triu',
'tril',
'meshgrid',
]
def linspace(start, stop, num, dtype, out=None, device=None, name=None):
"""
This OP return fixed number of evenly spaced values within a given interval.
**NOTICE**: The output of this OP has no gradient.
Args:
start(float|Variable): The input :attr:`start` is start variable of range. It is a float scalar, \
or a tensor of shape [1] with input data type float32, float64.
stop(float|Variable): The input :attr:`stop` is start variable of range. It is a float scalar, \
or a tensor of shape [1] with input data type float32, float64.
num(int|Variable): The input :attr:`num` is given num of the sequence. It is an int scalar, \
or a tensor of shape [1] with type int32.
dtype(string): The data type of output tensor, it could be 'float32' and 'float64'.
out (Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result. Default: None.
device (string, optional): Which device to run the operator. The :attr:`device` must be
None, 'cpu', 'gpu'. If :attr:`device` is None, it will be choose the device that the user set in
the paddle program. Default: None.
name(str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.Default: None.
Returns:
Variable, the output data type will be float32, float64.: The 1-D tensor with fixed number of evenly spaced values, \
the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
the value with input :attr:`start`.
Examples:
.. code-block:: python
import paddle
data = paddle.linspace(0, 10, 5, dtype='float32') # [0.0, 2.5, 5.0, 7.5, 10.0]
data = paddle.linspace(0, 10, 1, dtype='float32') # [0.0]
"""
helper = LayerHelper("linspace", **locals())
if not isinstance(start, Variable):
start = fill_constant([1], dtype, start)
if not isinstance(stop, Variable):
stop = fill_constant([1], dtype, stop)
if not isinstance(num, Variable):
num = fill_constant([1], 'int32', num)
if out is None:
out = helper.create_variable_for_type_inference(dtype=start.dtype)
else:
check_dtype(
out.dtype, out.name,
convert_dtype(start.dtype), 'linspace',
"The out data type '%s' in linspace must be the same with '%s' seted by parameter 'dtype'."
% (out.dtype, dtype))
if name:
warning.warn(
"The output Variable name of the paddle.tensor.linspace operation can only be given by parameter out or name.\
When parameter out and name are set at the same time, out has a higher priority than name. \
Finally, the output Variable name is same as the out name %s." %
out.name,
category=UserWarning,
stacklevel=2)
if device is not None:
if device not in ['cpu', 'gpu']:
raise ValueError(
"The value of 'device' in linspace operation must be cpu or gpu, but received %s."
% (device))
else:
with device_guard(device):
helper.append_op(
type='linspace',
inputs={'Start': start,
'Stop': stop,
'Num': num},
outputs={'Out': [out]})
else:
helper.append_op(
type='linspace',
inputs={'Start': start,
'Stop': stop,
'Num': num},
outputs={'Out': [out]})
return out
def ones(shape, dtype=None, out=None, device=None):
"""
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
Args:
shape(tuple|list): Shape of output tensor.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor, it supports
bool, float16, float32, float64, int32 and int64.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
device(str, optional): Which device to run the operator. The :attr:`device` must be
None,'cpu', 'gpu'. If :attr:`device` is None, it will be choose the device that the user set in
the paddle program. Default value is False.
Returns:
Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1.
Examples:
.. code-block:: python
import paddle
data = paddle.ones(shape=[3, 2], dtype='float32') # [[1., 1.], [1., 1.], [1., 1.]]
data = paddle.ones(shape=[2, 2], dtype='float32', device='cpu') # [[1., 1.], [1., 1.]]
"""
check_dtype(dtype, 'create data type',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'zeros')
if device is not None:
if device not in ['cpu', 'gpu']:
raise ValueError(
"The value of 'device' in zeros_op must be cpu or gpu, but received %s."
% (device))
with fluid.device_guard(device):
return fill_constant(value=1.0, shape=shape, dtype=dtype, out=out)
return fill_constant(value=1.0, shape=shape, dtype=dtype, out=out)
def ones_like(input, dtype=None, device=None, name=None):
"""
This function creates a ones tensor which has identical shape and dtype
with `input`.
Args:
input(Variable): The input tensor which specifies shape and dtype.The dtype of input can be
float32, float64, int32, int64.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type can be set bool, float32, float64, int32, int64.
The default value is None, the dtype is the same as input.
device(str, optional): Which device to run the operator. The :attr:`device` must be
None, 'cpu', 'gpu'. If :attr:`device` is None, it will be choose the device that the user set in
the paddle program. Default value is None.
name(str, optional): The name of output variable, normally there is no need for user to set this this property.
Default value is None, the framework set the name of output variable.
Returns:
out(Variable): The tensor variable storing the output.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
x = fluid.data(name='x', dtype='float32', shape=[3])
data = paddle.ones_like(x) # data=[1.0, 1.0, 1.0]
data1 = paddle.ones_like(input=x, device="gpu") data1=[1.0, 1.0. 1.0]
"""
helper = LayerHelper("zeros_like", **locals())
attrs = {"value": 1.0}
var_dtype = None
if dtype is not None:
check_dtype(
dtype, 'create data type',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'zeros_like')
var_dtype = convert_np_dtype_to_dtype_(dtype)
attrs["dtype"] = var_dtype
else:
var_dtype = input.dtype
out = helper.create_variable_for_type_inference(dtype=var_dtype)
if device is not None:
if device not in ['cpu', 'gpu']:
raise ValueError(
"The value of 'device' in zeros_op must be cpu or gpu, but received %s."
% (device))
with fluid.device_guard(device):
helper.append_op(
type='fill_any_like',
inputs={'X': [input]},
attrs=attrs,
outputs={'Out': [out]})
return out
helper.append_op(
type='fill_any_like',
inputs={'X': [input]},
attrs=attrs,
outputs={'Out': [out]})
out.stop_gradient = True
return out
def zeros(shape, dtype, out=None, device=None):
"""
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
Args:
shape(tuple|list): Shape of output tensor.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor, it supports
bool, float16, float32, float64, int32 and int64.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
device(str, optional): Which device to run the operator. The :attr:`device` must be
None,'cpu', 'gpu'. If :attr:`device` is None, it will be choose the device that the user set in
the paddle program. Default value is False.
Returns:
Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
Examples:
.. code-block:: python
import paddle
data = paddle.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
data = paddle.zeros(shape=[2, 2], dtype='float32', device='cpu') # [[0., 0.], [0., 0.]]
"""
check_dtype(dtype, 'create data type',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'zeros')
if device is not None:
if device not in ['cpu', 'gpu']:
raise ValueError(
"The value of 'device' in zeros_op must be cpu or gpu, but received %s."
% (device))
with fluid.device_guard(device):
return fill_constant(value=0.0, shape=shape, dtype=dtype, out=out)
return fill_constant(value=0.0, shape=shape, dtype=dtype, out=out)
def zeros_like(input, dtype=None, device=None, name=None):
"""
This function creates a zeros tensor which has identical shape and dtype
with `input`.
Args:
input(Variable): The input tensor which specifies shape and dtype.The dtype of input can be
bool, float32, float64, int32, int64.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type can be set bool, float32, float64, int32, int64.
The default value is None, the dtype is the same as input.
device(str, optional): Which device to run the operator. The :attr:`device` must be
None, 'cpu', 'gpu'. If :attr:`device` is None, it will be choose the device that the user set in
the paddle program. Default value is None.
name(str, optional): The name of output variable, normally there is no need for user to set this this property.
Default value is None, the framework set the name of output variable.
Returns:
out(Variable): The tensor variable storing the output.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
x = fluid.data(name='x', dtype='float32', shape=[3])
data = paddle.ones_like(x) # data=[1.0, 1.0, 1.0]
data1 = paddle.ones_like(input=x, device="gpu") #data1=[1.0, 1.0. 1.0]
"""
helper = LayerHelper("zeros_like", **locals())
attrs = {"value": 0.0}
var_dtype = None
if dtype is not None:
check_dtype(dtype, 'create data type',
['bool', 'float32', 'float64', 'int32', 'int64'],
'zeros_like')
var_dtype = convert_np_dtype_to_dtype_(dtype)
attrs["dtype"] = var_dtype
else:
var_dtype = input.dtype
out = helper.create_variable_for_type_inference(dtype=var_dtype)
if device is not None:
if device not in ['cpu', 'gpu']:
raise ValueError(
"The value of 'device' in zeros_op must be cpu or gpu, but received %s."
% (device))
with fluid.device_guard(device):
helper.append_op(
type='fill_any_like',
inputs={'X': [input]},
attrs=attrs,
outputs={'Out': [out]})
return out
helper.append_op(
type='fill_any_like',
inputs={'X': [input]},
attrs=attrs,
outputs={'Out': [out]})
out.stop_gradient = True
return out
def eye(num_rows,
num_columns=None,
out=None,
dtype='float32',
stop_gradient=True,
name=None):
"""
**eye**
This function constructs an identity tensor, or a batch of tensor.
Args:
num_rows(int): the number of rows in each batch tensor.
num_columns(int, optional): the number of columns in each batch tensor.
If None, default: num_rows.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
dtype(string, optional): The data type of the returned tensor.
It should be int32, int64, float16, float32, float64.
stop_gradient(bool, optional): Whether stop calculating gradients. Default:True.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: An identity Tensor or LoDTensor of shape [num_rows, num_columns].
Examples:
.. code-block:: python
import paddle
data = paddle.eye(3, dtype='int32')
# [[1, 0, 0]
# [0, 1, 0]
# [0, 0, 1]]
data = paddle.eye(2, 3, dtype='int32')
# [[1, 0, 0]
# [0, 1, 0]]
"""
helper = LayerHelper("eye", **locals())
if not isinstance(num_rows, int) or num_rows < 0:
raise TypeError("num_rows should be a non-negative int")
if num_columns is not None:
if not isinstance(num_columns, int) or num_columns < 0:
raise TypeError("num_columns should be a non-negative int")
else:
num_columns = num_rows
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='eye',
inputs={},
outputs={'Out': [out]},
attrs={
'num_rows': num_rows,
'num_columns': num_columns,
'dtype': c_dtype
},
stop_gradient=True)
out.stop_gradient = stop_gradient
return out
def full(shape,
fill_value,
out=None,
dtype=None,
device=None,
stop_gradient=True,
name=None):
"""
This Op return a Tensor with the `fill_value` which size is same as `shape`
Args:
shape(list|tuple|Variable): Shape of the Tensor to be created.
The data type is ``int32`` or ``int64`` . If ``shape`` is a list or tuple,
the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Variable, it should be an 1-D Tensor .
fill_value(bool|float16|float32|float64|int32|int64|Variable): The constant value
used to initialize the Tensor to be created. If fill_value is an Variable, it must be an 1-D Tensor.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
dtype(np.dtype|core.VarDesc.VarType|str, optional): Data type of the output tensor
which can be float16, float32, float64, int32, int64, if dytpe is `None`, the data
type of created tensor is `float32`
device(str, optional): On which device to run this Op. The :attr:`device` must be
None, 'cpu' or 'gpu'. If :attr:`device` is None, the device that the user set in
the paddle program will be chosen. Default value is None.
stop_gradient(bool, optional): Indicating if we stop gradient from current(out) Variable,
default value is True.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Tensor which is created according to shape and dtype.
Raises:
TypeError: The `dtype` must be one of None, bool, float16, float32, float64, int32 and int64.
TypeError: The `out` must be a Variable.
TypeError: The `shape` must be one of Variable, list tuple.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
data1 = paddle.full(shape=[2,1], fill_value=0, dtype='int64') # data1=[[0],[0]]
data2 = paddle.full(shape=[2,1], fill_value=5, dtype='int64', device='gpu') # data2=[[5],[5]]
# attr shape is a list which contains Variable Tensor.
positive_2 = fluid.layers.fill_constant([1], "int32", 2)
data3 = paddle.full(shape=[1, positive_2], dtype='float32', fill_value=1.5) # data3=[1.5, 1.5]
# attr shape is an Variable Tensor.
shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
data4 = paddle.full(shape=shape, dtype='bool', fill_value=True) # data4=[[True,True],[True,True]]
# attr value is an Variable Tensor.
val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
data5 = paddle.full(shape=[2,1], fill_value=val, dtype='float32') #data5=[[2.0],[2.0]]
"""
helper = LayerHelper("full", **locals())
if dtype is None:
dtype = 'float32'
check_dtype(dtype, 'create data type',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'full')
check_type(shape, 'shape', (Variable, list, tuple), 'full')
if out is not None:
check_type(shape, 'out', (Variable), 'full')
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
out.stop_gradient = stop_gradient
with device_guard(device):
out = fill_constant(shape=shape, dtype=dtype, value=fill_value, out=out)
return out
def _tril_triu_op(helper):
"""Base op of tril_op and triu_op
"""
op_type = helper.layer_type
x = helper.kwargs.get('input', None)
assert x is not None, 'x cannot be None in {}'.format(op_type)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
op_type)
if len(x.shape) < 2:
raise ValueError("input shape in {} must be at least 2-D".format(
op_type))
diagonal = helper.kwargs.get('diagonal', 0)
if not isinstance(diagonal, (int, )):
raise TypeError("diagonal in {} must be a python Int".format(op_type))
name = helper.kwargs.get('name', None)
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="tril_triu",
inputs={"X": x},
attrs={
"diagonal": diagonal,
"lower": True if op_type == 'tril' else False,
},
outputs={"Out": out}, )
return out
def tril(input, diagonal=0, name=None):
"""
This op returns the lower triangular part of a matrix (2-D tensor) or batch
of matrices :attr:`input`, the other elements of the result tensor are set
to 0. The lower triangular part of the matrix is defined as the elements
on and below the diagonal.
Args:
input (Variable): The input variable which is a Tensor.
Support data types: ``float64``, ``float32``, ``int32``, ``int64``.
diagonal (int, optional): The diagonal to consider, default value is 0.
If :attr:`diagonal` = 0, all elements on and below the main diagonal are
retained. A positive value includes just as many diagonals above the main
diagonal, and similarly a negative value excludes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\{(i, i)\}` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
:math:`d_{1}, d_{2}` are the dimensions of the matrix.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Tensor, results of lower triangular operation by the specified diagonal of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError: diagonal is not a int type.
ValueError: dimension of :attr:`input` is less than 2.
Examples:
.. code-block:: python
import numpy as np
import paddle.tensor as tensor
import paddle.fluid as fluid
data = np.arange(1, 13, dtype="int64").reshape(3,-1)
# array([[ 1, 2, 3, 4],
# [ 5, 6, 7, 8],
# [ 9, 10, 11, 12]])
x = fluid.data(shape=(-1, 4), dtype='int64', name='x')
exe = fluid.Executor(fluid.CPUPlace())
# example 1, default diagonal
tril = tensor.tril(x)
tril_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[tril], return_numpy=True)
# array([[ 1, 0, 0, 0],
# [ 5, 6, 0, 0],
# [ 9, 10, 11, 0]])
.. code-block:: python
# example 2, positive diagonal value
tril = tensor.tril(x, diagonal=2)
tril_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[tril], return_numpy=True)
# array([[ 1, 2, 3, 0],
# [ 5, 6, 7, 8],
# [ 9, 10, 11, 12]])
.. code-block:: python
# example 3, negative diagonal value
tril = tensor.tril(x, diagonal=-1)
tril_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[tril], return_numpy=True)
# array([[ 0, 0, 0, 0],
# [ 5, 0, 0, 0],
# [ 9, 10, 0, 0]])
"""
return _tril_triu_op(LayerHelper('tril', **locals()))
def triu(input, diagonal=0, name=None):
"""
This op returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
:attr:`input`, the other elements of the result tensor are set to 0.
The upper triangular part of the matrix is defined as the elements on and
above the diagonal.
Args:
input (Variable): The input variable which is a Tensor.
Support data types: ``float64``, ``float32``, ``int32``, ``int64``.
diagonal (int, optional): The diagonal to consider, default value is 0.
If :attr:`diagonal` = 0, all elements on and above the main diagonal are
retained. A positive value excludes just as many diagonals above the main
diagonal, and similarly a negative value includes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\{(i, i)\}` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
:math:`d_{1}, d_{2}` are the dimensions of the matrix.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Tensor, results of upper triangular operation by the specified diagonal of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError: diagonal is not a int type.
ValueError: dimension of :attr:`input` is less than 2.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
import paddle.tensor as tensor
data = np.arange(1, 13, dtype="int64").reshape(3,-1)
# array([[ 1, 2, 3, 4],
# [ 5, 6, 7, 8],
# [ 9, 10, 11, 12]])
x = fluid.data(shape=(-1, 4), dtype='int64', name='x')
exe = fluid.Executor(fluid.CPUPlace())
# example 1, default diagonal
triu = tensor.triu(x)
triu_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[triu], return_numpy=True)
# array([[ 1, 2, 3, 4],
# [ 0, 6, 7, 8],
# [ 0, 0, 11, 12]])
.. code-block:: python
# example 2, positive diagonal value
triu = tensor.triu(x, diagonal=2)
triu_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[triu], return_numpy=True)
# array([[0, 0, 3, 4],
# [0, 0, 0, 8],
# [0, 0, 0, 0]])
.. code-block:: python
# example 3, negative diagonal value
triu = tensor.triu(x, diagonal=-1)
triu_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[triu], return_numpy=True)
# array([[ 1, 2, 3, 4],
# [ 5, 6, 7, 8],
# [ 0, 10, 11, 12]])
"""
return _tril_triu_op(LayerHelper('triu', **locals()))
def meshgrid(input, name=None):
"""
This op takes a list of N tensors as input, each of which is 1-dimensional
vector, and creates N-dimensional grids.
Args:
input(Variable) : tensors (list of tensor): the shapes of input k tensors are (N1,),
(N2,),..., (Nk,). Support data types: ``float64``, ``float32``, ``int32``, ``int64``.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: k tensors. The shape of each tensor is (N1, N2, ..., Nk)
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
x = fluid.data(name='x', shape=[100], dtype='int32')
y = fluid.data(name='y', shape=[200], dtype='int32')
input_1 = np.random.randint(0, 100, [100, ]).astype('int32')
input_2 = np.random.randint(0, 100, [200, ]).astype('int32')
exe = fluid.Executor(place=fluid.CPUPlace())
grid_x, grid_y = paddle.tensor.meshgrid([x, y])
res_1, res_2 = exe.run(fluid.default_main_program(),
feed={'x': input_1,
'y': input_2},
fetch_list=[grid_x, grid_y])
#the shape of res_1 is (100, 200)
#the shape of res_2 is (100, 200)
.. code-block:: python
#example 2: in dygraph mode
import paddle
import paddle.fluid as fluid
import numpy as np
input_3 = np.random.randint(0, 100, [100, ]).astype('int32')
input_4 = np.random.randint(0, 100, [200, ]).astype('int32')
with fluid.dygraph.guard():
tensor_3 = fluid.dygraph.to_variable(input_3)
tensor_4 = fluid.dygraph.to_variable(input_4)
grid_x, grid_y = paddle.tensor.meshgrid([tensor_3, tensor_4])
#the shape of grid_x is (100, 200)
#the shape of grid_y is (100, 200)
"""
if in_dygraph_mode():
num = len(input)
out = core.ops.meshgrid(input, num)
return out
helper = LayerHelper('meshgrid', **locals())
if not isinstance(input, list):
raise TypeError("The type of input in meshgrid should be list.")
for id, input_ in enumerate(input):
check_dtype(input_.dtype, 'create data type',
['float16', 'float32', 'float64', 'int32', 'int64'],
'meshgrid')
num = len(input)
out = [
helper.create_variable_for_type_inference(dtype=input[i].dtype)
for i in range(num)
]
helper.append_op(type='meshgrid', inputs={'X': input}, outputs={'Out': out})
return out
def arange(start, end, step=1, dtype=None, name=None):
"""
Return evenly spaced values within a given interval.
Values are generated within the half-open interval [start, stop) (in other words,
the interval including start but excluding stop).
Parameters:
start(float32 | float64 | int32 | int64 | Variable): Start of interval. The interval includes this value.
when start is Variable, it is a 1-D Tensor with shape [1].
end(float32 | float64 | int32 | int64 | Variable): End of interval. The interval does not include this
value, except in some cases where step is not an integer
and floating point round-off affects the length of out. When end is Variable,
it is a 1-D Tensor with shape [1].
step(float32 | float64 | int32 | int64 | Variable): Spacing between values. For any output out, this is the
distance between two adjacent values, out[i+1] - out[i].
dtype(str|core.VarDesc.VarType): the data type of the output tensor, can be float32, float64, int32, int64.
Returns: a 1-D Tensor which is evenly spaced values within a given interval. Its data type is set by dtype.
Return type: Variable
examples:
.. code-block:: python
import paddle
# expected out put: [0, 2, 4, 6, 8]
data = paddle.arange(0, 10, 2, 'int32')
#dygraph mode
import paddle
import paddle.fluid as fluid
with fluid.dygraph.guard():
x = paddle.arange(0, 6, 2)
# x: [0, 2, 4]
# x dtype: float32
"""
helper = LayerHelper("range", **locals())
if dtype is None:
dtype = 'float32'
check_dtype(dtype, 'create data type',
['float32', 'float64', 'int32', 'int64'], 'range')
dtype = convert_dtype(dtype)
if not isinstance(start, Variable):
start = fill_constant([1], dtype, start)
if not isinstance(end, Variable):
end = fill_constant([1], dtype, end)
if not isinstance(step, Variable):
step = fill_constant([1], dtype, step)
out = helper.create_variable_for_type_inference(dtype=start.dtype)
helper.append_op(
type='range',
inputs={'Start': start,
'End': end,
'Step': step},
outputs={'Out': [out]})
out.stop_gradient = True
return out
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define functions to save & load a tensor
# __all__ = ['save', 'load']
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.common_ops_import import *
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type
from ..fluid.framework import in_dygraph_mode, _varbase_creator
__all__ = [
'matmul',
'dot',
# 'einsum',
'norm',
# 'transpose',
'dist',
't',
'cross',
# 'cholesky',
# 'tensordot',
'bmm'
]
def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
"""
Applies matrix multiplication to two tensors.
Currently, the input tensors' rank can be any, but when the rank of any
inputs is bigger than 3, this two inputs' rank should be equal.
The actual behavior depends on the shapes of :math:`x`, :math:`y` and the
flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:
- If a transpose flag is specified, the last two dimensions of the tensor
are transposed. If the tensor is rank-1 of shape :math:`[D]`, then for
:math:`x` it is treated as :math:`[1, D]` in nontransposed form and as
:math:`[D, 1]` in transposed form, whereas for :math:`y` it is the
opposite: It is treated as :math:`[D, 1]` in nontransposed form and as
:math:`[1, D]` in transposed form.
- After transpose, the two tensors are 2-D or n-D and matrix multiplication
performs in the following way.
- If both are 2-D, they are multiplied like conventional matrices.
- If either is n-D, it is treated as a stack of matrices residing in the
last two dimensions and a batched matrix multiply supporting broadcast
applies on the two tensors.
Also note that if the raw tensor :math:`x` or :math:`y` is rank-1 and
nontransposed, the prepended or appended dimension :math:`1` will be
removed after matrix multiplication.
Args:
x (Variable): The input variable which is a Tensor or LoDTensor.
y (Variable): The input variable which is a Tensor or LoDTensor.
transpose_x (bool): Whether to transpose :math:`x` before multiplication.
transpose_y (bool): Whether to transpose :math:`y` before multiplication.
alpha (float): The scale of output. Default 1.0.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: The product Tensor (or LoDTensor) variable.
Examples:
.. code-block:: python
# Examples to clarify shapes of the inputs and output
# x: [B, ..., M, K], y: [B, ..., K, N]
# paddle.matmul(x, y) # out: [B, ..., M, N]
# x: [B, M, K], y: [B, K, N]
# paddle.matmul(x, y) # out: [B, M, N]
# x: [B, M, K], y: [K, N]
# paddle.matmul(x, y) # out: [B, M, N]
# x: [M, K], y: [K, N]
# paddle.matmul(x, y) # out: [M, N]
# x: [B, M, K], y: [K]
# paddle.matmul(x, y) # out: [B, M]
# x: [K], y: [K]
# paddle.matmul(x, y) # out: [1]
# x: [M], y: [N]
# paddle.matmul(x, y, True, True) # out: [M, N]
import paddle
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2, 3], dtype='float32')
y = fluid.data(name='y', shape=[3, 2], dtype='float32')
out = paddle.matmul(x, y, True, True)
"""
attrs = {
'transpose_X': transpose_x,
'transpose_Y': transpose_y,
'alpha': float(alpha),
}
if in_dygraph_mode():
out = _varbase_creator(dtype=x.dtype)
core.ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y',
transpose_y, 'alpha', float(alpha))
return out
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(
val, name, ['float16', 'float32', 'float64'], 'matmul')
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
x_shape = [1] + x_shape
if len(y_shape) == 1:
y_shape = y_shape + [1]
# check the inner 2 dimensions
if transpose_x:
x_shape[-2], x_shape[-1] = x_shape[-1], x_shape[-2]
if transpose_y:
y_shape[-2], y_shape[-1] = y_shape[-1], y_shape[-2]
if x_shape[-1] != y_shape[-2]:
assert (x_shape[-1] == -1) or (y_shape[-2] == -1), \
"After performing an optional transpose, Input X's width should be " \
"equal to Y's width for multiplication " \
"prerequisites. But received X's shape: %s, Y's shape: %s\n" % \
(x_shape, y_shape)
if len(y_shape) > 2 and len(x_shape) > 2:
for i, dim_x in enumerate(x_shape[:-2]):
# don't check neg shape
if dim_x < 0 or y_shape[i] < 0:
continue
if dim_x != y_shape[i]:
raise ValueError(
"When the matrix is larger than 2 dimensions, the higher "
"dimensional values of the two matrices need to be equal. "
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.\n" % (i, i, x_shape, y_shape))
__check_input(x, y)
helper = LayerHelper('matmul', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matmul',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
def norm(input, p='fro', axis=None, keepdim=False, out=None, name=None):
"""
Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean
or 2-norm, and in general the p-norm for p > 0) of a given tensor.
Args:
input (Variable): The input tensor could be N-D tensor, and the input data
type could be float32 or float64.
p (float|string, optional): Order of the norm. Supported values are `fro`, `1`, `2`,
and any positive real number yielding the corresponding p-norm.
axis (int|list, optional): The axis on which to apply norm operation. If axis is int
or list with only one element, the vector norm is computed over the axis.
If axis is a list with two elements, the matrix norm is computed over the axis.
If `axis < 0`, the dimension to norm operation is rank(input) + axis.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have fewer dimension
than the :attr:`input` unless :attr:`keepdim` is true, default
value is False.
out (Variable, optional): The output tensor, default value is None. It's data type
must be the same as the input Tensor.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Tensor, results of norm operation on the specified axis of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError, if out data type is different with the input data type.
ValueError, If `p` or `axis` is invalid.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2, 3, 5], dtype='float64')
# compute frobenius norm along last two dimensions.
out_fro = paddle.norm(x, p='fro', axis=[1,2])
# compute 2-order vector norm along last dimension.
out_pnorm = paddle.norm(x, p=2, axis=-1)
"""
def frobenius_norm(input, dim=None, keepdim=False, out=None, name=None):
"""
The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.
Args:
input (Variable): Tensor, data type float32, float64.
dim (list, optional): None for last two dimensions.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
out (Variable, optional): The tensor variable storing the output.
"""
if dim is not None and not (isinstance(dim, list) and len(dim) == 2):
raise ValueError(
"The dim of frobenius norm op should be None or two elements list!"
)
attrs = {
'dim': dim if dim != None else [-2, -1],
'keep_dim': keepdim,
'reduce_all': False
}
if len(attrs['dim']) == len(input.shape):
attrs['reduce_all'] = True
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'frobenius_norm')
helper = LayerHelper('frobenius_norm', **locals())
if out is None:
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
else:
check_type(out, 'out', (Variable), 'frobenius_norm')
check_dtype(
out.dtype, out.name,
convert_dtype(input.dtype), 'frobenius_norm',
'(The out data type in frobenius_norm must be the same with input data type.)'
)
helper.append_op(
type='frobenius_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def vector_norm(input,
porder=None,
axis=None,
keepdim=False,
out=None,
name=None):
"""
Calculate the p-order vector norm for certain dimension of Tensor `input`.
Args:
input (Variable): Tensor, data type float32, float64.
porder (float, optional): None for porder=2.0.
axis (int, optional): None for last dimension.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
out (Variable, optional): The tensor variable storing the output.
"""
if porder is not None:
check_type(porder, 'porder', (float, int), 'p_norm')
if axis is not None:
check_type(axis, 'axis', (int), 'p_norm')
attrs = {
'axis': axis if axis is not None else -1,
'porder': float(porder) if porder is not None else 2.0,
'keepdim': keepdim,
'epsilon': 1e-12,
}
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'p_norm')
helper = LayerHelper('p_norm', **locals())
if out is None:
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
else:
check_type(out, 'out', (Variable), 'p_norm')
check_dtype(
out.dtype, out.name,
convert_dtype(input.dtype), 'p_norm',
'(The out data type in p_norm must be the same with input data type.)'
)
helper.append_op(
type='p_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
if axis is None and p is not None:
if isinstance(p, str):
if p == "fro":
return frobenius_norm(
input, dim=axis, keepdim=keepdim, out=out, name=name)
else:
raise ValueError(
"only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(
input, porder=p, axis=axis, keepdim=keepdim, out=out, name=name)
else:
raise ValueError("only valid p type is string or float, found {}".
format(type(p)))
if isinstance(axis, list) and len(axis) == 1:
axis = axis[0]
#calculate vector norm, where axis is int or list with only one integer
if isinstance(axis, int):
if isinstance(p, (int, float)):
return vector_norm(
input, axis=axis, porder=p, keepdim=keepdim, out=out, name=name)
else:
raise ValueError(
"unspport p for p-order vector norm. except float, found {}".
format(p))
#calculate matrix norm, where axis is list with two integers
elif isinstance(axis, list) and len(axis) == 2:
if p == "fro":
return frobenius_norm(
input, dim=axis, keepdim=keepdim, out=out, name=name)
else:
raise ValueError(
"unspport p for matrix norm, expcept 'fro', found {}".format(p))
else:
raise ValueError(
"except axis type int or list (length of list <=2), found {}".
format(axis))
def dist(x, y, p=2):
"""
This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure
of distance. The shapes of x and y must be broadcastable.
Where, z = x - y,
When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z.
.. math::
||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p}
When p = inf, the inf-norm of z is the maximum element of z.
.. math::
||z||_\infty=\max_i |z_i|
When p = -inf, the negative-inf-norm of z is the minimum element of z.
.. math::
||z||_{-\infty}=\min_i |z_i|
Otherwise, the p-norm of z follows the formula,
.. math::
||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}}
Args:
x (Variable): 1-D to 6-D Tensor, its data type is float32 or float64.
y (Variable): 1-D to 6-D Tensor, its data type is float32 or float64.
p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2.
Returns:
Variable: Tensor that is the p-norm of (x - y).
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.array([[3, 3],[3, 3]]).astype(np.float32))
y = fluid.dygraph.to_variable(np.array([[3, 3],[3, 1]]).astype(np.float32))
out = paddle.dist(x, y, 0)
print(out.numpy()) # out = [1.]
out = paddle.dist(x, y, 2)
print(out.numpy()) # out = [2.]
out = paddle.dist(x, y, float("inf"))
print(out.numpy()) # out = [2.]
out = paddle.dist(x, y, float("-inf"))
print(out.numpy()) # out = [0.]
"""
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist')
check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist')
check_type(p, 'p', (float, int), 'dist')
helper = LayerHelper("dist", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
inputs = {"X": [x], "Y": [y]}
outputs = {'Out': [out]}
attrs = {"p": float(p)}
helper.append_op(
type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def dot(x, y, name=None):
"""
This operator calculates inner product for vectors.
.. note::
Only support 1-d Tensor(vector).
Parameters:
x(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype should be ``float32``, ``float64``, ``int32``, ``int64``
y(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype soulde be ``float32``, ``float64``, ``int32``, ``int64``
name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`
Returns:
Variable: the calculated result Tensor/LoDTensor.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.random.uniform(0.1, 1, [10]).astype(np.float32))
y = fluid.dygraph.to_variable(np.random.uniform(1, 3, [10]).astype(np.float32))
z = paddle.dot(x, y)
print(z.numpy())
"""
op_type = 'dot'
# skip var type check in dygraph mode to improve efficiency
if in_dygraph_mode():
op = getattr(core.ops, op_type)
return op(x, y)
assert x is not None, 'x cannot be None in {}'.format(op_type)
assert y is not None, 'y cannot be None in {}'.format(op_type)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
op_type)
check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'],
op_type)
helper = LayerHelper(op_type, **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="dot", inputs={'X': x,
'Y': y}, attrs={}, outputs={"Out": out})
return out
def t(input, name=None):
"""
Transpose <=2-D tensor.
0-D and 1-D tensors are returned as it is and 2-D tensor is equal to
the fluid.layers.transpose function which perm dimensions set 0 and 1.
Args:
input (Variable): The input Tensor. It is a N-D (N<=2) Tensor of data types float32, float64, int32.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: A transposed n-D Tensor, with data type being float32, float64, int32, int64.
For Example:
.. code-block:: text
# Example 1 (0-D tensor)
x = tensor([0.79])
paddle.t(x) = tensor([0.79])
# Example 2 (1-D tensor)
x = tensor([0.79, 0.84, 0.32])
paddle.t(x) = tensor([0.79, 0.84, 0.32])
# Example 3 (2-D tensor)
x = tensor([0.79, 0.84, 0.32],
[0.64, 0.14, 0.57])
paddle.t(x) = tensor([0.79, 0.64],
[0.84, 0.14],
[0.32, 0.57])
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2, 3],
dtype='float32')
x_transposed = paddle.t(x)
print x_transposed.shape
#(3L, 2L)
"""
if len(input.shape) > 2:
raise ValueError(
"Input(input) only support N-D (N<=2) tensor, but received "
"length of Input(input) is %s. Perhaps you can use paddle."
"tensor.transpose() instead." % len(input.shape))
if in_dygraph_mode():
if len(input.shape) == 1:
return input
# 2-D tensor
perm = [1, 0]
out, _ = core.ops.transpose2(input, 'axis', perm)
return out
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'transpose')
helper = LayerHelper('t', **locals())
out = helper.create_variable_for_type_inference(input.dtype)
input_shape = helper.create_variable_for_type_inference(input.dtype)
if len(input.shape) == 1:
out = input
else:
helper.append_op(
type='transpose2',
inputs={'X': [input]},
outputs={'Out': [out],
'XShape': [input_shape]},
attrs={'axis': [1, 0]})
return out
def cross(input, other, dim=None):
"""
Returns the cross product of vectors in dimension `dim` of the `input` and `other` tensor.
Inputs must have the same shape, and the size of their dim-th dimension should be equla to 3.
If `dim` is not given, it defaults to the first dimension found with the size 3.
Args:
input (Variable): The first input tensor variable.
other (Variable): The second input tensor variable.
dim (int): The dimension to take the cross-product in.
Returns:
Variable: A Tensor with same data type as `input`.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
data_x = np.array([[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
[3.0, 3.0, 3.0]])
data_y = np.array([[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(data_x)
y = fluid.dygraph.to_variable(data_y)
out_z1 = paddle.cross(x, y)
print(out_z1.numpy())
#[[-1. -1. -1.]
# [ 2. 2. 2.]
# [-1. -1. -1.]]
out_z2 = paddle.cross(x, y, dim=1)
print(out_z2.numpy())
#[[0. 0. 0.]
# [0. 0. 0.]
# [0. 0. 0.]]
"""
helper = LayerHelper("cross", **locals())
if in_dygraph_mode():
if dim:
return core.ops.cross(input, other, 'dim', dim)
else:
return core.ops.cross(input, other)
out = helper.create_variable_for_type_inference(input.dtype)
attrs = dict()
if dim:
attrs['dim'] = dim
helper.append_op(
type='cross',
inputs={'X': input,
'Y': other},
outputs={'Out': out},
attrs=attrs)
return out
def bmm(x, y, name=None):
"""
Applies batched matrix multiplication to two tensors.
Both of the two input tensors must be three-dementional and share the same batch size.
if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor.
Args:
x (Variable): The input variable which is a Tensor or LoDTensor.
y (Variable): The input variable which is a Tensor or LoDTensor.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: The product Tensor (or LoDTensor) variable.
Examples:
import paddle
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10, 3, 4], dtype='float32')
y = fluid.layers.data(name='y', shape=[10, 4, 5], dtype='float32')
out = paddle.bmm(x, y)
# In dygraph mode:
# size input1: (2, 2, 3) and input2: (2, 3, 2)
input1 = np.array([[[1.0, 1.0, 1.0],[2.0, 2.0, 2.0]],[[3.0, 3.0, 3.0],[4.0, 4.0, 4.0]]])
input2 = np.array([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(input1)
y = fluid.dygraph.to_variable(input2)
out = paddle.bmm(x, y)
#output size: (2, 2, 2)
#output value:
#[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]]
out_np = out.numpy()
"""
helper = LayerHelper('bmm', **locals())
if in_dygraph_mode():
return core.ops.bmm(x, y)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_type
from ..fluid.layers.layer_function_generator import templatedoc
# TODO: define logic functions of a tensor
__all__ = [
'equal',
# 'greater_equal',
# 'greater_than',
# 'is_empty',
# 'isfinite',
# 'less_equal',
# 'less_than',
# 'logical_and',
# 'logical_not',
# 'logical_or',
# 'logical_xor',
# 'not_equal',
# 'reduce_all',
# 'reduce_any',
'allclose',
'elementwise_equal',
# 'isnan'
]
def equal(x, y, axis=-1, name=None):
"""
This OP returns the truth value of :math:`x == y`. True if two inputs have the same elements, False otherwise.
**NOTICE**: The output of this OP has no gradient, and this OP supports broadcasting by :attr:`axis`.
Args:
x(Variable): Tensor, data type is float32, float64, int32, int64.
y(Variable): Tensor, data type is float32, float64, int32, int64.
axis(int32, optional): If X.dimension != Y.dimension, Y.dimension
must be a subsequence of x.dimension. And axis is the start
dimension index for broadcasting Y onto X. For more detail,
please refer to OP:`elementwise_add`.
name(str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.Default: None.
Returns:
Variable: output Tensor, data type is bool, value is [False] or [True].
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
import numpy as np
label = fluid.layers.assign(np.array([3, 4], dtype="int32"))
label_1 = fluid.layers.assign(np.array([1, 2], dtype="int32"))
limit = fluid.layers.assign(np.array([3, 4], dtype="int32"))
out1 = paddle.equal(x=label, y=limit) #out1=[True]
out2 = paddle.equal(x=label_1, y=limit) #out2=[False]
.. code-block:: python
import paddle.fluid as fluid
import paddle
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((3, 4)).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[3,4], dtype='float32')
out = paddle.equal(x, y, axis=1)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
res = exe.run(feed=gen_data(),
fetch_list=[out])
print(res[0]) #[False]
"""
helper = LayerHelper("equal_reduce", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
attrs = {}
attrs['axis'] = axis
helper.append_op(
type='equal_reduce',
inputs={'X': [x],
'Y': [y]},
attrs=attrs,
outputs={'Out': [out]})
return out
@templatedoc()
def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
"""
${comment}
Args:
input(inputtype):{input_comment}.
other(othertype):{other_comment}.
rtol(rtoltype,optional):{rtol_comment}.
atol(atoltype,optional):{atol_comment}.
equal_nan(equalnantype,optional):{equal_nan_comment}.
name(STR, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_comment}.
Return Type:
${out_type}
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
use_cuda = fluid.core.is_compiled_with_cuda()
a = fluid.data(name="a", shape=[2], dtype='float32')
b = fluid.data(name="b", shape=[2], dtype='float32')
result = paddle.allclose(a, b, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
result_nan = paddle.allclose(a, b, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x = np.array([10000., 1e-07]).astype("float32")
y = np.array([10000.1, 1e-08]).astype("float32")
result_v, result_nan_v = exe.run(
feed={'a': x, 'b': y},
fetch_list=[result, result_nan])
print(result_v, result_nan_v)
# Output: (array([False]), array([False]))
x = np.array([10000., 1e-08]).astype("float32")
y = np.array([10000.1, 1e-09]).astype("float32")
result_v, result_nan_v = exe.run(
feed={'a': x, 'b': y},
fetch_list=[result, result_nan])
print(result_v, result_nan_v)
# Output: (array([ True]), array([ True]))
x = np.array([1.0, float('nan')]).astype("float32")
y = np.array([1.0, float('nan')]).astype("float32")
result_v, result_nan_v = exe.run(
feed={'a': x, 'b': y},
fetch_list=[result, result_nan])
print(result_v, result_nan_v)
# Output: (array([False]), array([ True]))
"""
check_type(rtol, 'rtol', float, 'allclose')
check_type(atol, 'atol', float, 'allclose')
check_type(equal_nan, 'equal_nan', bool, 'allclose')
helper = LayerHelper("allclose", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
inputs = {'Input': input, 'Other': other}
outputs = {'Out': out}
attrs = {'rtol': rtol, 'atol': atol, 'equal_nan': equal_nan}
helper.append_op(
type='allclose', inputs=inputs, outputs=outputs, attrs=attrs)
return out
def elementwise_equal(x, y, name=None):
"""
This layer returns the truth value of :math:`x == y` elementwise.
Args:
x(Variable): Tensor, data type is float32, float64, int32, int64.
y(Variable): Tensor, data type is float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: output Tensor, it's shape is the same as the input's Tensor,
and the data type is bool. The result of this op is stop_gradient.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
label = fluid.layers.assign(np.array([3, 3], dtype="int32"))
limit = fluid.layers.assign(np.array([3, 2], dtype="int32"))
out1 = paddle.elementwise_equal(x=label, y=limit) #out1=[True, False]
"""
helper = LayerHelper("elementwise_equal", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
out.stop_gradient = True
helper.append_op(
type='equal',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [out]},
attrs={'force_cpu': False})
return out
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ..fluid.layers import core, reshape
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import Variable, OpProtoHolder, in_dygraph_mode, convert_np_dtype_to_dtype_
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ..fluid.layers.tensor import fill_constant
from ..fluid.layers import utils
# TODO: define functions to manipulate a tensor
__all__ = [
# 'cast',
# 'concat',
# 'expand',
# 'expand_as',
# 'flatten',
'gather',
# 'gather_nd',
# 'reshape',
# 'reverse',
# 'scatter',
# 'scatter_nd_add',
# 'scatter_nd',
# 'shard_index',
# 'slice',
'split',
'squeeze',
'stack',
# 'strided_slice',
# 'transpose',
# 'unique',
# 'unique_with_counts',
'unsqueeze',
# 'unstack',
'flip',
# 'unbind',
'roll'
]
def flip(input, dims, name=None):
"""
Reverse the order of a n-D tensor along given axis in dims.
Args:
input (Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor
should be float32, float64, int32, int64, bool.
dims (list): The axis to flip on.
name (str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: Tensor or LoDTensor calculated by flip layer. The data type is same with input.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="x", shape=[-1, 2, 2], dtype='float32')
output = paddle.flip(input, dims=[0, 1])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
img = np.arange(12).reshape((3,2,2)).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [[[10,11][8, 9]],[[6, 7],[4, 5]] [[2, 3],[0, 1]]]
"""
helper = LayerHelper("flip", **locals())
check_type(input, 'X', (Variable), 'flip')
dtype = helper.input_dtype()
check_dtype(dtype, 'X',
['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
'flip')
check_type(dims, 'dims', (list, tuple), 'flip')
assert len(dims) > 0, 'len(dims) must be greater than 0.'
if name is None:
out = helper.create_variable_for_type_inference(dtype)
else:
out = helper.create_variable(name=name, dtype=dtype, persistable=False)
helper.append_op(
type="flip",
inputs={"X": input},
outputs={"Out": out},
attrs={"dims": dims})
return out
def roll(input, shifts, dims=None):
"""
Roll the `input` tensor along the given dimension(s). Elements that are shifted beyond
the last position are re-introduced at the first position. If a dimension is not specified,
the tensor will be flattened before rolling and then restored to the original shape.
Args:
input (Variable): The input tensor variable.
shifts (int|list|tuple): The number of places by which the elements
of the `input` tensor are shifted.
dims (int|list|tuple|None): Dimentions along which to roll.
Returns:
Variable: A Tensor with same data type as `input`.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
data = np.array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(data)
out_z1 = paddle.roll(x, shifts=1)
print(out_z1.numpy())
#[[9. 1. 2.]
# [3. 4. 5.]
# [6. 7. 8.]]
out_z2 = paddle.roll(x, shifts=1, dims=0)
print(out_z2.numpy())
#[[7. 8. 9.]
# [1. 2. 3.]
# [4. 5. 6.]]
"""
helper = LayerHelper("roll", **locals())
origin_shape = input.shape
if type(shifts) == int:
shifts = [shifts]
if type(dims) == int:
dims = [dims]
if dims:
check_type(dims, 'dims', (list, tuple), 'roll')
check_type(shifts, 'shifts', (list, tuple), 'roll')
if in_dygraph_mode():
if dims is None:
input = core.ops.reshape(input, 'shape', [-1, 1])
dims = [0]
out = core.ops.roll(input, 'dims', dims, 'shifts', shifts)
return core.ops.reshape(out, 'shape', origin_shape)
out = helper.create_variable_for_type_inference(input.dtype)
if dims is None:
input = reshape(input, shape=[-1, 1])
dims = [0]
helper.append_op(
type='roll',
inputs={'X': input},
outputs={'Out': out},
attrs={'dims': dims,
'shifts': shifts})
out = reshape(out, shape=origin_shape, inplace=True)
return out
def stack(x, axis=0, out=None, name=None):
"""
This OP stacks all the inputs :code:`x` along axis.
.. code-block:: text
Case 1:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 0
Output:
Out.dims = [3, 1, 2]
Out.data =[ [ [1.0, 2.0] ],
[ [3.0, 4.0] ],
[ [5.0, 6.0] ] ]
Case 2:
Input:
x[0].shape = [1, 2]
x[0].data = [ [1.0 , 2.0 ] ]
x[1].shape = [1, 2]
x[1].data = [ [3.0 , 4.0 ] ]
x[2].shape = [1, 2]
x[2].data = [ [5.0 , 6.0 ] ]
Attrs:
axis = 1 or axis = -2
Output:
Out.shape = [1, 3, 2]
Out.data =[ [ [1.0, 2.0]
[3.0, 4.0]
[5.0, 6.0] ] ]
Args:
x (Variable|list(Variable)): Input :code:`x` can be a single Tensor, a :code:`list` of Tensors.
If :code:`x` is a :code:`list`, the shapes of all these Tensors
must be the same. Supposing input is N dims
Tensors :math:`[d_0, d_1, ..., d_{n-1}]`, the output is N+1 dims
Tensor :math:`[d_0, d_1, d_{axis-1}, len(x), d_{axis}, ..., d_{n-1}]`.
Support data types: float32, float64, int32, int64.
axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is :math:`[-(R+1), R+1)`.
R is the first tensor of inputs. If ``axis`` < 0, :math:`axis=axis+rank(x[0])+1`.
The default value of axis is 0.
Returns:
Variable: The stacked Tensor, has same data type with input Tensors. Output dim is :math:`rank(x[0])+1`.
Example:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
data1 = np.array([[1.0, 2.0]])
data2 = np.array([[3.0, 4.0]])
data3 = np.array([[5.0, 6.0]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(data1)
x2 = fluid.dygraph.to_variable(data2)
x3 = fluid.dygraph.to_variable(data3)
result = paddle.stack([x1, x2, x3], axis=0)
# result shape: [3, 1, 2]
# result value: [[[1.0, 2.0]],
# [[3.0, 4.0]],
# [[5.0, 6.0]]]
"""
helper = LayerHelper('stack', **locals())
axis = 0 if axis is None else axis
if not isinstance(x, list) and not isinstance(x, tuple):
x = [x]
out = helper.create_variable_for_type_inference(x[0].dtype)
if not in_dygraph_mode() and \
x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
assert len(x) == 1, "If the elements of 'x' in stack are Variable(LoDTensorArray), " \
"number of the elements must be 1, but received %s." % len(x)
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': x[0]},
outputs={'Out': [out],
'OutIndex': [out_index]},
attrs={'axis': axis,
'use_stack': True})
else:
helper.append_op(
type='stack',
inputs={'X': x},
outputs={'Y': out},
attrs={'axis': axis})
return out
def split(input, num_or_sections, dim=-1, name=None):
"""
Split the input tensor into multiple sub-Tensors.
Args:
input (Variable): The input variable which is an N-D Tensor or LoDTensor, data type being float32, float64, int32 or int64.
num_or_sections (int|list|tuple): If :attr:`num_or_sections` is an integer,
then the integer indicates the number of equal sized sub-Tensors
that the Tensor will be divided into. If :attr:`num_or_sections`
is a list or tuple, the length of it indicates the number of
sub-Tensors and the elements in it indicate the sizes of sub-Tensors'
:attr:`dim` dimension orderly. The length of the list mustn't be larger than the Tensor's size of :attr:`dim` .
dim (int32|Varible, optional): A scalar with type ``int32`` or a ``Tensor`` with shape [1] and type ``int32``. The dimension along which to split. If :math:`dim < 0`, the
dimension to split along is :math:`rank(input) + dim`. Default is -1.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
list(Variable): The list of segmented Tensor variables.
Raises:
TypeError: num_or_sections is not int, list or tuple.
TypeError: dim is not int or Variable.
Example:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
with fluid.dygraph.guard():
input_1 = np.random.random([4, 6, 6]).astype("int32")
# input is a variable which shape is [4, 6, 6]
input = fluid.dygraph.to_variable(input_1)
x0, x1, x2 = paddle.split(input, num_or_sections=3, dim=1)
# x0.shape [4, 2, 6]
# x1.shape [4, 2, 6]
# x2.shape [4, 2, 6]
"""
if in_dygraph_mode():
num = None
attrs = ()
if isinstance(dim, Variable):
dim = dim.numpy()
assert dim.shape == (1,
), "dim of type Variable should have shape [1]"
dim = dim[0]
dim = (len(input.shape) + dim) if dim < 0 else dim
attrs += ('axis', dim)
if isinstance(num_or_sections, int):
num = num_or_sections
attrs += ('num', num_or_sections)
elif isinstance(num_or_sections, (list, tuple)):
num = len(num_or_sections)
if utils._contain_var(num_or_sections):
raise TypeError(
"The type of 'num_or_sections' in split must be int or list[int] or tuple[int] in Dygraph mode, but "
"received %s, which contains Variable." %
(type(num_or_sections)))
else:
attrs += ('sections', list(num_or_sections))
else:
raise TypeError(
"The type of 'num_or_sections' in split must be int or list in Dygraph mode, but "
"received %s." % (type(num_or_sections)))
return core.ops.split(input, num, *attrs)
if not isinstance(num_or_sections, (int, list, tuple)):
raise TypeError(
"The type of 'num_or_sections' in split must be int, list or "
"tuple, but received %s." % (type(num_or_sections)))
if not isinstance(dim, (int, Variable)):
raise TypeError(
"The type of 'dim' in split must be int or Variable, but "
"received %s." % (type(dim)))
helper = LayerHelper('split', **locals())
input_shape = input.shape
inputs = {'X': input}
attrs = {'num': num_or_sections if isinstance(num_or_sections, int) else 0}
def _get_SectionsTensorList(one_list):
tensor_list = []
unk_dim_idx = -1
for idx, dim_size in enumerate(one_list):
if isinstance(dim_size, Variable):
dim_size.stop_gradient = True
tensor_list.append(dim_size)
else:
assert (isinstance(dim_size, int))
if dim_size == -1:
assert unk_dim_idx == -1, (
"Only one value of 'num_or_section' in split can "
"be -1. But received num_or_section[%d] is also -1." %
idx)
unk_dim_idx = idx
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant(
[1], 'int32', dim_size, force_cpu=True, out=temp_out)
tensor_list.append(temp_out)
return tensor_list
if isinstance(dim, Variable):
dim.stop_gradient = True
inputs['AxisTensor'] = dim
else:
dim = (len(input_shape) + dim) if dim < 0 else dim
attrs['axis'] = dim
if isinstance(num_or_sections, int):
assert num_or_sections > 1, 'num_or_sections must be more than 1.'
if isinstance(dim, int) and input_shape[dim] > 0:
assert input_shape[dim] % num_or_sections ==0, \
"The input's size along the split dimension " \
"must be evenly divisible by Attr(num_or_sections). " \
"But %d is not evenly divisible by %d. " % (num_or_sections,input_shape[dim])
num = num_or_sections
else:
if isinstance(dim, int) and input_shape[dim] > 0:
assert len(num_or_sections) <= input_shape[
dim], 'len(num_or_sections) must not be more than input.shape[dim].'
num = len(num_or_sections)
attrs['sections'] = list(
map(lambda ele: -1 if isinstance(ele, Variable) else ele,
num_or_sections))
if utils._contain_var(num_or_sections):
inputs['SectionsTensorList'] = _get_SectionsTensorList(
num_or_sections)
outs = [
helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num)
]
helper.append_op(
type='split', inputs=inputs, outputs={'Out': outs}, attrs=attrs)
return outs
def squeeze(input, axes, out=None, name=None):
"""
This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will
remove the dims by axes, the dims selected by axes should be one. If not provide axes, all dims equal
to one will be deleted.
.. code-block:: text
Case1:
Input:
X.shape = (1, 3, 1, 5)
axes = [0]
Output:
Out.shape = (3, 1, 5)
Case2:
Input:
X.shape = (1, 3, 1, 5)
axes = []
Output:
Out.shape = (3, 5)
Case3:
Input:
X.shape = [1,3,1,5]
axes = [-2]
Output:
Out.shape = [1,3,5]
Args:
input (Variable): The input Tensor. Support data type: float32, float64, int8, int32, int64.
axes (list): One integer or List of integers, indicating the dimensions to be squeezed.
Axes range is :math:`[-rank(input), rank(input))`.
If axes is negative, :math:`axes=axes+rank(input)`.
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Returns:
Variable: Output squeezed Tensor. Data type is same as input Tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
with fluid.dygraph.guard():
input_1 = np.random.random([5, 1, 10]).astype("int32")
# input is a variable which shape is [5, 1, 10]
input = fluid.dygraph.to_variable(input_1)
output = paddle.squeeze(input, axes=[1])
# output.shape [5, 10]
"""
helper = LayerHelper("squeeze", **locals())
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int8', 'int32', 'int64'],
'squeeze')
check_type(axes, 'axes', list, 'squeeze')
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="squeeze2",
inputs={"X": input},
attrs={"axes": axes},
outputs={"Out": out,
"XShape": x_shape})
return out
def unsqueeze(input, axes, out=None, name=None):
"""
Insert single-dimensional entries to the shape of a Tensor. Takes one
required argument axes, a list of dimensions that will be inserted.
Dimension indices in axes are as seen in the output tensor.
For example:
.. code-block:: text
Given a tensor such that tensor with shape [3, 4, 5],
then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1].
Args:
input (Variable): The input Tensor to be unsqueezed. It is a N-D Tensor of data types float32, float64, int32.
axes (int|list|tuple|Variable): Indicates the dimensions to be inserted. The data type is ``int32`` . If ``axes`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``axes`` is an Variable, it should be an 1-D Tensor .
name (str|None): Name for this layer.
Returns:
Variable: Output unsqueezed Tensor, with data type being float32, float64, int32, int64.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
with fluid.dygraph.guard():
input_1 = np.random.random([5, 10]).astype("int32")
# input is a variable which shape is [5, 10]
input = fluid.dygraph.to_variable(input_1)
output = paddle.unsqueeze(input, axes=[1])
# output.shape [5, 1, 10]
"""
if not isinstance(axes, (int, list, tuple, Variable)):
raise TypeError(
"The type of 'axes' in unsqueeze must be int, list, tuple or Variable, but "
"received %s." % (type(axes)))
helper = LayerHelper("unsqueeze2", **locals())
inputs = {"X": input}
attrs = {}
def _to_Variable_list(one_list):
Variable_list = []
for ele in one_list:
if isinstance(ele, Variable):
ele.stop_gradient = True
Variable_list.append(ele)
else:
assert (isinstance(ele, int))
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', ele, force_cpu=True, out=temp_out)
Variable_list.append(temp_out)
return Variable_list
if isinstance(axes, int):
axes = [axes]
if isinstance(axes, Variable):
axes.stop_gradient = True
inputs["AxesTensor"] = axes
elif isinstance(axes, (list, tuple)):
contain_var = not all(not isinstance(ele, Variable) for ele in axes)
if contain_var:
inputs["AxesTensorList"] = _to_Variable_list(axes)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="unsqueeze2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out,
"XShape": x_shape})
return out
def gather(input, index, overwrite=True):
"""
**Gather Layer**
Output is obtained by gathering entries of the outer-most dimension
of X indexed by `index` and concatenate them together.
.. math::
Out = X[Index]
.. code-block:: text
Given:
X = [[1, 2],
[3, 4],
[5, 6]]
Index = [1, 2]
Then:
Out = [[3, 4],
[5, 6]]
Args:
input (Variable): The source input tensor with rank>=1. Supported data type is
int32, int64, float32, float64 and uint8 (only for CPU),
float16 (only for GPU).
index (Variable): The index input tensor with rank=1. Data type is int32 or int64.
overwrite (bool, optional): The mode that updating the grad when has same index.
If True, use the overwrite mode to update the grad of the same index,
if False, use the accumulate mode to update the grad of the same index.
Default value is True.
Returns:
output (Variable): The output is a tensor with the same rank as input.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
with fluid.dygraph.guard():
input_1 = np.array([[1,2],[3,4],[5,6]])
index_1 = np.array([0,1])
input = fluid.dygraph.to_variable(input_1)
index = fluid.dygraph.to_variable(index_1)
output = paddle.gather(input, index)
# expected output: [[1,2],[3,4]]
"""
helper = LayerHelper('gather', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather",
inputs={"X": input,
"Index": index},
outputs={"Out": out},
attrs={'overwrite': overwrite})
return out
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
math functions
"""
from __future__ import print_function
from paddle.common_ops_import import *
from ..fluid import layers
from ..fluid.framework import core, _varbase_creator
from ..fluid.layers.layer_function_generator import _generate_doc_string_
import sys
# TODO: define math functions
# yapf: disable
__all__ = [
# 'abs',
# 'acos',
# 'asin',
'atan',
# 'ceil',
# 'cos',
# 'cumsum',
# 'elementwise_add',
# 'elementwise_div',
# 'elementwise_floordiv',
# 'elementwise_max',
# 'elementwise_min',
# 'elementwise_mod',
# 'elementwise_mul',
# 'elementwise_pow',
# 'elementwise_sub',
# 'exp',
# 'floor',
# 'increment',
# 'log',
'mul',
# 'multiplex',
'pow',
# 'reciprocal',
# 'reduce_max',
# 'reduce_min',
# 'reduce_prod',
# 'reduce_sum',
# 'round',
# 'rsqrt',
# 'scale',
# 'sign',
'sin',
'sqrt',
# 'square',
# 'stanh',
'sum',
# 'sums',
'tanh',
'elementwise_sum',
'max',
'min',
'mm',
'div',
'add',
# 'atan',
'logsumexp',
# 'inverse',
'log1p',
# 'erf',
'addcmul',
'addmm',
'clamp',
]
# yapf: enable.
def generate_op_noattr(op_type):
"""Register the Python layer for an Operator without Attribute..
Args:
op_type: The name of the operator to be created.
This function takes in the operator type (sin, tanh etc) and
creates the operator functionality.
"""
op_proto = OpProtoHolder.instance().get_op_proto(op_type)
def func(x, name=None, out=None):
if in_dygraph_mode():
op = getattr(core.ops, op_type)
return op(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
op_type)
helper = LayerHelper(op_type, **locals())
if name and out:
warnings.warn(
"Both name and out parameters have been set in fluid.tensor.math.%s(), only out will take effect to specify the result storage. "
"You can discard either one to solve this warning." % op_type,
category=UserWarning,
stacklevel=2)
if not out:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": out})
return out
func.__name__ = op_type
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=[
"name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.\n "
"out(Variable, optional): The default value is None. Optional output can be any created Variable that meets the requirements to store the result of operation. if out is None, a new Varibale will be create to store the result."
])
func.__doc__ = func.__doc__ + """
Return type
Variable
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
inputs = fluid.data(name="x", shape = [None, 4], dtype='float32')
output = paddle.%s(inputs)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
#input.shape=1X4, batch_size=1
img = np.array([[1.0, 2.0, 3.0, 4.0]]).astype(np.float32)
res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res)
""" % op_type
return func
@templatedoc()
def pow(input, exponent, out=None, name=None):
"""
This is Pow Activation Operator.
:math:`out = input^{exponent}`
Args:
input(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``.
exponent(float32|Variable): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``.
out (Variable, optional): The Variable that stores results of the operation.
If out is None, a new Variable will be created to store the results.
name(str, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``input``.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[32,32], dtype="float32")
# example 1: argument exponent is float
res = fluid.data(name="output", shape=[32,32], dtype="float32")
y_1 = paddle.pow(x, 2.0, out=res)
# y_1 is x^{2.0}
# example 2: argument exponent is Variable
exponet_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
res = fluid.data(name="output", shape=[32,32], dtype="float32")
y_2 = paddle.pow(x, exponet_tensor, out=res)
# y_2 is x^{3.0}
"""
helper = LayerHelper('pow', **locals())
inputs = {'X': input}
attrs = {}
if isinstance(exponent, Variable):
exponent.stop_gradient = True
inputs['FactorTensor'] = exponent
else:
attrs['factor'] = exponent
if out is None:
out = helper.create_variable_for_type_inference(dtype=input.dtype)
else:
check_dtype(
out.dtype, out.name,
convert_dtype(input.dtype), 'pow',
'(The out data type in pow must be the same with input data type.)')
if name:
warnings.warn(
"The output Variable name of the paddle.tensor.pow operation can only be given by parameter out or name. \
When parameter out and name are set at the same time, out has a higher priority than name. \
Finally, the output Variable name is same as the out name %s"
%
out.name,
category=UserWarning,
stacklevel=2)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, out=None, name=None):
"""
Mul Operator.
This operator is used to perform matrix multiplication for input $x$ and $y$.
The equation is:
.. math::
Out = x * y
Both the input $x$ and $y$ can carry the LoD (Level of Details) information, or not.
But the output only shares the LoD information with input $x$.
Args:
x (Variable): The first input Tensor/LoDTensor of mul_op.
y (Variable): The second input Tensor/LoDTensor of mul_op.
x_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs.
If the input $x$ is a tensor with more than two dimensions, $x$ will be flattened into a two-dimensional
matrix first. The flattening rule is: the first `num_col_dims` will be flattened to form the first
dimension of the final matrix (the height of the matrix), and the rest `rank(x) - num_col_dims`
dimensions are flattened to form the second dimension of the final matrix (the width of the matrix).
As a result, height of the flattened matrix is equal to the product of $x$'s first `x_num_col_dims` dimensions'
sizes, and width of the flattened matrix is equal to the product of $x$'s last `rank(x) - num_col_dims`
dimensions' size. For example, suppose $x$ is a 6-dimensional tensor with the shape [2, 3, 4, 5, 6],
and `x_num_col_dims` = 3. Thus, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default is 1.
y_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the
input $y$ is a tensor with more than two dimensions, $y$ will be flattened into a two-dimensional matrix first.
The attribute `y_num_col_dims` determines how $y$ is flattened. See comments of `x_num_col_dims` for more details.
Default is 1.
out(Variable, optinal): The Variable that stores results of the operation. If out is None,
a new Variable will be created to store the results.
name (str, optional): Name of the output. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`. Default is None. If both of out and name are not None,
the output name will be same as out.
Returns:
Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of mul op.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
dataX = fluid.data(name="dataX", shape=[2, 5], dtype="float32")
dataY = fluid.data(name="dataY", shape=[5, 3], dtype="float32")
res = fluid.data(name="output", shape=[2, 3], dtype="float32")
output = paddle.mul(dataX, dataY,
x_num_col_dims = 1,
y_num_col_dims = 1,
out=res)
"""
inputs = {"X": [x], "Y": [y]}
attrs = {"x_num_col_dims": x_num_col_dims, "y_num_col_dims": y_num_col_dims}
if in_dygraph_mode():
outs = core.ops.mul(inputs, attrs)
return outs['Out'][0]
helper = LayerHelper("mul", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
check_dtype(
out.dtype, out.name,
convert_dtype(x.dtype), 'mul',
'(The out data type in pow must be the same with input data type.)')
if name:
warnings.warn(
"The output Variable name of the paddle.tensor.pow operation can only be given by parameter out or name.\
When parameter out and name are set at the same time, out has a higher priority than name. \
Finally, the output Variable name is same as the out name %s"
%
out.name,
category=UserWarning,
stacklevel=2)
helper.append_op(
type="mul", inputs={"X": x,
"Y": y}, attrs=attrs, outputs={"Out": out})
return out
__ops__noattr__ = [
'atan',
'sin',
'sqrt',
'tanh',
]
for _OP in set(__ops__noattr__):
globals()[_OP] = generate_op_noattr(_OP)
@dygraph_only
def _elementwise_op_in_dygraph(x,
y,
axis=-1,
act=None,
use_mkldnn=False,
op_name=None):
op = getattr(core.ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn)
def _elementwise_op(helper):
op_type = helper.layer_type
original_op_type = helper.kwargs.get('original_op_type', op_type)
x = helper.kwargs.get('x', None)
y = helper.kwargs.get('y', None)
assert x is not None, 'x cannot be None in {}'.format(original_op_type)
assert y is not None, 'y cannot be None in {}'.format(original_op_type)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'],
original_op_type)
check_variable_and_dtype(
y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'],
original_op_type)
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None)
out = helper.kwargs.get('out', None)
if out is None:
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type=op_type,
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'axis': axis,
'use_mkldnn': use_mkldnn})
return helper.append_activation(out)
def add(x, y, alpha=1, out=None, name=None):
"""
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z1 = paddle.add(x, y)
z2 = paddle.add(x, y, alpha=10)
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z1.name, z2.name])
print(z_value[0]) # [3., 8., 6.]
print(z_value[1]) # [12. 53. 24.]
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((4, 5)).astype('float32')
}
x = fluid.data(name="x", shape=[2, 3, 4, 5], dtype='float32')
y = fluid.data(name="y", shape=[4, 5], dtype='float32')
z = paddle.add(x, y, name='z')
# z = x + y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value[0])
print(z_value[0].shape) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
z = paddle.add(x, y)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value[0])
print(z_value[0].shape) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
x = fluid.data(name="x", shape=[3], dtype="float32")
y = fluid.data(name='y', shape=[3], dtype='float32')
output = fluid.data(name="output", shape=[3], dtype="float32")
z = paddle.add(x, y, out=output)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
data1 = np.array([2, 3, 4], dtype='float32')
data2 = np.array([1, 5, 2], dtype='float32')
z_value = exe.run(feed={'x': data1,
'y': data2},
fetch_list=[z])
print(z_value[0]) # [3. 8. 6.]
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
np_x = np.array([2, 3, 4]).astype('float64')
np_y = np.array([1, 5, 2]).astype('float64')
x = fluid.dygraph.to_variable(np_x)
y = fluid.dygraph.to_variable(np_y)
z = paddle.add(x, y, alpha=-0.5)
np_z = z.numpy()
print(np_z) # [1.5, 0.5, 3. ]
"""
op_type = 'elementwise_add'
axis = -1
act = None
if alpha != 1:
y = scale(y, scale=alpha)
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
original_op_type = 'add'
if name and out:
warnings.warn(
"Both name and out parameters have been set in paddle.tensor.%s, only out will take effect to specify the result storage. "
"You can discard either one to solve this warning." %
original_op_type,
category=UserWarning,
stacklevel=2)
return _elementwise_op(LayerHelper(op_type, **locals()))
def div(x, y, out=None, name=None):
"""
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.array([2, 3, 4]).astype('float32'),
"y": np.array([1, 5, 2]).astype('float32')
}
x = fluid.data(name="x", shape=[3], dtype='float32')
y = fluid.data(name="y", shape=[3], dtype='float32')
z = paddle.div(x, y)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value) # [2., 0.6, 2.]
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.ones((2, 3, 4, 5)).astype('float32'),
"y": np.zeros((4, 5)).astype('float32')
}
x = fluid.data(name="x", shape=[2, 3, 4, 5], dtype='float32')
y = fluid.data(name="y", shape=[4, 5], dtype='float32')
z = paddle.div(x, y, name='z')
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value[0])
print(z_value[0].shape) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
def gen_data():
return {
"x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
"y": np.random.randint(1, 5, size=[5]).astype('float32')
}
x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
y = fluid.data(name="y", shape=[5], dtype='float32')
output = fluid.data(name="output", shape=[2,3,4,5], dtype="float32")
z = paddle.div(x, y, out=output)
# z = x / y
place = fluid.CPUPlace()
exe = fluid.Executor(place)
z_value = exe.run(feed=gen_data(),
fetch_list=[z.name])
print(z_value[0])
print(z_value[0].shape) # z.shape=[2,3,4,5]
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard(fluid.CPUPlace()):
np_x = np.array([2, 3, 4]).astype('float64')
np_y = np.array([1, 5, 2]).astype('float64')
x = fluid.dygraph.to_variable(np_x)
y = fluid.dygraph.to_variable(np_y)
z = paddle.div(x, y)
np_z = z.numpy()
print(np_z) # [2., 0.6, 2.]
"""
op_type = 'elementwise_div'
axis = -1
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
original_op_type = 'div'
if name and out:
warnings.warn(
"Both name and out parameters have been set in paddle.tensor.%s, only out will take effect to specify the result storage. "
"You can discard either one to solve this warning." %
original_op_type,
category=UserWarning,
stacklevel=2)
return _elementwise_op(LayerHelper(op_type, **locals()))
for func in [
add,
div,
]:
proto_dict = {'add': 'elementwise_add', 'div': 'elementwise_div'}
op_proto = OpProtoHolder.instance().get_op_proto(proto_dict[func.__name__])
if func.__name__ in ['add']:
additional_args_lines = [
"alpha (int|float, optional): The alpha factor of the input. Default is 1. If alpha is not 1, the equation becomes Out = X + alpha * Y.",
"out (Variable, optinal): The Variable that stores results of the operation. Default is None. If out is None, \
a new Variable will be created to store the results."
,
"name (string, optional): Name of the output. \
Default is None. It's used to print debug info for developers. Details: \
:ref:`api_guide_Name` "
]
else:
additional_args_lines = [
"out (Variable, optinal): The Variable that stores results of the operation. If out is None, \
a new Variable will be created to store the results."
,
"name (string, optional): Name of the output. \
Default is None. It's used to print debug info for developers. Details: \
:ref:`api_guide_Name` "
]
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=additional_args_lines,
skip_attrs_set={"x_data_format", "y_data_format", "axis"
}) + """\n""" + str(func.__doc__)
def sum(input, dim=None, dtype=None, keep_dim=False, name=None):
"""
Computes the sum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the sum is performed. If
:attr:`None`, sum all elements of :attr:`input` and return a
Tensor variable with a single element, otherwise must be in the
range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
the dimension to reduce is :math:`rank + dim[i]`.
dtype(str, optional): The dtype of output tensor. The default value is None, the dtype
of output is the same as input tensor.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of summation operation on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Raises:
ValueError, the :attr:`dtype` must be float64 or int64.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
out1 = paddle.sum(x) # [3.5]
out2 = paddle.sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6]
out3 = paddle.sum(x, dim=-1) # [1.9, 1.6]
out4 = paddle.sum(x, dim=1, keep_dim=True) # [[1.9], [1.6]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1, 2], [3, 4]],
# [[5, 6], [7, 8]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
out5 = paddle.sum(y, dim=[1, 2]) # [10, 26]
out6 = paddle.sum(y, dim=[0, 1]) # [16, 20]
"""
if dim is not None and not isinstance(dim, list):
dim = [dim]
attrs = {
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'reduce_all': True if dim == None or dim == [] else False,
}
dtype_flag = False
if dtype is not None:
if dtype in ['float64', 'int64']:
if (convert_dtype(input.dtype) == "float32" and dtype == "float64") or \
(convert_dtype(input.dtype) == "int32" and dtype == "int64"):
attrs.update({
'in_dtype': input.dtype,
'out_dtype': convert_np_dtype_to_dtype_(dtype)
})
dtype_flag = True
else:
raise ValueError(
"The value of 'dtype' in sum op must be float64, int64, but received of {}".
format(dtype))
if in_dygraph_mode():
reduce_all = True if dim == None or dim == [] else False
dim = dim if dim != None and dim != [] else [0]
if dtype_flag:
return core.ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
'reduce_all', reduce_all, 'in_dtype',
input.dtype, 'out_dtype',
convert_np_dtype_to_dtype_(dtype))
else:
return core.ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
'reduce_all', reduce_all)
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_sum')
helper = LayerHelper('sum', **locals())
if dtype_flag:
out = helper.create_variable_for_type_inference(
dtype=convert_np_dtype_to_dtype_(dtype))
else:
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='reduce_sum',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
@templatedoc(op_type="sum")
def elementwise_sum(inputs, name=None):
"""
${comment}
Case 1:
::
Input:
Input. Shape = [2, 3]
Input = [[1, 2, 3],
[4, 5, 6]]
Output:
The output. Shape = [2, 3]
Output = [[1, 2, 3],
[4, 5, 6]]
Case 2:
::
Input:
First input:
Input1. Shape = [2, 3]
Input1 = [[1, 2, 3],
[4, 5, 6]]
The second input:
Input2. Shape = [2, 3]
Input2 = [[7, 8, 9],
[10, 11, 12]]
Output:
The output. Shape = [2, 3]
Output = [[8, 10, 12],
[14, 16, 18]]
Args:
inputs (Variable|list(Variable)): A Varaible list. The shape and data type of the list elementsshould be consistent.
Variable can be multi-dimensional Tensoror LoDTensor, and data types can be: float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: the sum of input :math:`inputs` . its shape and data types are consistent with :math:`inputs` .
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5)
input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3)
sum = paddle.elementwise_sum([input0, input1])
# You can print out 'sum' via executor.
out = fluid.layers.Print(sum, message="the sum of input0 and input1: ")
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_main_program())
# The printed result is:
# 1570701754 the sum of input0 and input1: The place is:CPUPlace
# Tensor[elementwise_sum_0.tmp_0]
# shape: [2,3,]
# dtype: l
# data: 8,8,8,8,8,8,
# the sum of input0 and input1 is 2-D Tensor with shape [2,3].
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
helper = LayerHelper('elementwise_sum', **locals())
check_type(inputs, 'inputs', (Variable, tuple, list), 'elementwise_sum')
if isinstance(inputs, list) or isinstance(inputs, tuple):
if len(inputs) > 0:
for input in inputs:
check_variable_and_dtype(input, "inputs", \
['float32', 'float64', 'int32', 'int64'], 'elementwise_sum')
else:
check_variable_and_dtype(inputs, "inputs", \
['float32', 'float64', 'int32', 'int64'], 'elementwise_sum')
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('inputs'))
helper.append_op(
type='sum',
inputs={'X': inputs},
outputs={'Out': out},
attrs={'use_mkldnn': False})
return out
def mm(input, mat2, out=None, name=None):
"""
Applies matrix multiplication to two tensors.
Currently, the input tensors' rank can be any, but when the rank of any
inputs is bigger than 3, this two inputs' rank should be equal.
Also note that if the raw tensor :math:`x` or :math:`mat2` is rank-1 and
nontransposed, the prepended or appended dimension :math:`1` will be
removed after matrix multiplication.
Args:
x (Variable): The input variable which is a Tensor or LoDTensor.
mat2 (Variable): The input variable which is a Tensor or LoDTensor.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The product Tensor (or LoDTensor) variable.
Examples:
.. code-block:: python
# Examples to clarify shapes of the inputs and output
# x: [B, ..., M, K], mat2: [B, ..., K, N]
# fluid.layers.matmul(x, mat2) # out: [B, ..., M, N]
# x: [B, M, K], mat2: [B, K, N]
# fluid.layers.matmul(x, mat2) # out: [B, M, N]
# x: [B, M, K], mat2: [K, N]
# fluid.layers.matmul(x, mat2) # out: [B, M, N]
# x: [M, K], mat2: [K, N]
# fluid.layers.matmul(x, mat2) # out: [M, N]
# x: [B, M, K], mat2: [K]
# fluid.layers.matmul(x, mat2) # out: [B, M]
# x: [K], mat2: [K]
# fluid.layers.matmul(x, mat2) # out: [1]
import paddle
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[2, 3], dtype='float32')
mat2 = fluid.data(name='mat2', shape=[3, 2], dtype='float32')
out = paddle.mm(x, mat2) # out shape is [2, 2]
"""
if in_dygraph_mode():
if out is None:
out = _varbase_creator(dtype=input.dtype)
core.ops.matmul(input, mat2, out)
return out
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(val, name,
['float16', 'float32', 'float64'], 'mm')
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
x_shape = [1] + x_shape
if len(y_shape) == 1:
y_shape = y_shape + [1]
# check the inner 2 dimensions
if x_shape[-1] != y_shape[-2]:
if not ((x_shape[-1] == -1) or (y_shape[-2] == -1)):
raise ValueError(
"After performing an optional transpose, Input X's width should be "
"equal to Y's width for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s\n"
% (x_shape, y_shape))
if len(y_shape) > 2 and len(x_shape) > 2:
for i, dim_x in enumerate(x_shape[:-2]):
# don't check neg shape
if dim_x < 0 or y_shape[i] < 0:
continue
if dim_x != y_shape[i]:
raise ValueError(
"When the matrix is larger than 2 dimensions, the higher "
"dimensional values of the two matrices need to be equal. "
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.\n" % (i, i, x_shape, y_shape))
__check_input(input, mat2)
helper = LayerHelper('mm', **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='matmul', inputs={'X': input,
'Y': mat2}, outputs={'Out': out})
return out
def addmm(input, x, y, alpha=1.0, beta=1.0, name=None):
"""
**addmm**
This operator is used to perform matrix multiplication for input $x$ and $y$.
$input$ is added to the final result.
The equation is:
.. math::
Out = alpha * x * y + beta * input
$Input$, $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $input$.
Args:
input (Variable): The input Tensor/LoDTensor to be added to the final result.
x (Variable): The first input Tensor/LoDTensor for matrix multiplication.
y (Variable): The second input Tensor/LoDTensor for matrix multiplication.
alpha (float): Coefficient of $x*y$.
beta (float): Coefficient of $input$.
name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
Returns:
Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of addmm op.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[2, 2], dtype='float32')
x = fluid.data(name='x', shape=[2, 2], dtype='float32')
y = fluid.data(name='y', shape=[2, 2], dtype='float32')
out = paddle.addmm( input=input, x=x, y=y, alpha=5.0, beta=0.5 )
data_x = np.ones((2, 2)).astype(np.float32)
data_y = np.ones((2, 2)).astype(np.float32)
data_input = np.ones((2, 2)).astype(np.float32)
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place)
results = exe.run(fluid.default_main_program(),
fetch_list=[out], feed={"input": data_input, 'x': data_x, "y": data_y})
print( np.array(results[0]) )
# [[10.5 10.5]
# [10.5 10.5]]
"""
if in_dygraph_mode():
out = core.ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta)
return out
inputs = {'Input': input, "X": x, "Y": y}
attrs = {'Alpha': alpha, 'Beta': beta}
helper = LayerHelper("addmm", **locals())
check_variable_and_dtype(x, 'Input', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="addmm", inputs=inputs, attrs=attrs, outputs={"Out": out})
return out
def logsumexp(x, dim=None, keepdim=False, out=None, name=None):
"""
This operator calculates the log of the sum of exponentials of the input Tensor.
.. math::
logsumexp(x) = \log\sum exp(x)
Parameters:
x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64.
dim (list|int, optional): The dimensions along which the sum is performed. If :attr:`None`,
sum all elements of :attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor.
The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim`
is true, default value is False.
out (Variable), optional): Enable user to explicitly specify an output variable to save result.
name (str, optional): The default value is None. Normally there is no need for user to
set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The calcuated result Tensor/LoDTensor.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
np_x = np.random.uniform(0.1, 1, [10]).astype(np.float32)
x = fluid.dygraph.to_variable(np_x)
print(paddle.logsumexp(x).numpy())
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
np_x = np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float32)
x = fluid.dygraph.to_variable(np_x)
print(paddle.logsumexp(x, dim=1).numpy())
print(paddle.logsumexp(x, dim=[0, 2]).numpy())
"""
op_type = 'logsumexp'
assert x is not None, 'x cannot be None in {}'.format(op_type)
# reduce_sum does not support float16
check_variable_and_dtype(x, 'x', ['float32', 'float64'], op_type)
exp_out = layers.exp(x)
sum_out = layers.reduce_sum(exp_out, dim, keepdim)
if out is not None:
check_variable_and_dtype(out, 'out', [x.dtype], op_type)
helper = LayerHelper(op_type, **locals())
helper.append_op(type="log", inputs={"X": sum_out}, outputs={"Out": out})
return out
return layers.log(sum_out, name)
def max(input, dim=None, keep_dim=False, out=None, name=None):
"""
Computes the maximum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimension along which the maximum is computed.
If :attr:`None`, compute the maximum over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, results of maximum on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
paddle.max(x) # [0.9]
paddle.max(x, dim=0) # [0.2, 0.3, 0.6, 0.9]
paddle.max(x, dim=-1) # [0.9, 0.7]
paddle.max(x, dim=1, keep_dim=True) # [[0.9], [0.7]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
paddle.max(y, dim=[1, 2]) # [4.0, 8.0]
paddle.max(y, dim=[0, 1]) # [7.0, 8.0]
"""
helper = LayerHelper('max', **locals())
if out is None:
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'max')
reduce_all = True if dim == None or dim == [] else False
dim = dim if dim != None and dim != [] else [0]
if in_dygraph_mode():
return core.ops.reduce_max(input, 'dim', dim, 'keep_dim', keep_dim,
'reduce_all', reduce_all)
helper.append_op(
type='reduce_max',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim,
'keep_dim': keep_dim,
'reduce_all': reduce_all
})
return out
def min(input, dim=None, keep_dim=False, out=None, name=None):
"""
Computes the minimum of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor, the data type is float32,
float64, int32, int64.
dim (list|int, optional): The dimensions along which the minimum is computed.
If :attr:`None`, compute the minimum over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true, default
value is False.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: Tensor, result of minimum on the specified dim of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
paddle.min(x) # [0.1]
paddle.min(x, dim=0) # [0.1, 0.2, 0.5, 0.7]
paddle.min(x, dim=-1) # [0.2, 0.1]
paddle.min(x, dim=1, keep_dim=True) # [[0.2], [0.1]]
# y is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
# [[5.0, 6.0], [7.0, 8.0]]]
# Each example is followed by the corresponding output tensor.
y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
paddle.min(y, dim=[1, 2]) # [1.0, 5.0]
paddle.min(y, dim=[0, 1]) # [1.0, 2.0]
"""
helper = LayerHelper('min', **locals())
if out is None:
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'max')
reduce_all = True if dim == None or dim == [] else False
dim = dim if dim != None and dim != [] else [0]
if in_dygraph_mode():
return core.ops.reduce_min(input, 'dim', dim, 'keep_dim', keep_dim,
'reduce_all', reduce_all)
helper.append_op(
type='reduce_min',
inputs={'X': input},
outputs={'Out': out},
attrs={
'dim': dim,
'keep_dim': keep_dim,
'reduce_all': reduce_all
})
return out
def log1p(x, out=None, name=None):
"""
Calculates the natural log of the given input tensor, element-wise.
.. math::
Out = \\ln(x+1)
Args:
x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The natural log of the input LoDTensor or Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
# Graph Organizing
x = fluid.data(name="x", shape=[2,1], dtype="float32")
res = paddle.log1p(x)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[0], [1]]).astype(np.float32)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[0.], [0.6931472]]
"""
if in_dygraph_mode():
return core.ops.log1p(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log1p")
inputs = {'X': [x]}
helper = LayerHelper('log1p', **locals())
dtype = helper.input_dtype(input_param_name='x')
if out is None:
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out})
return out
def addcmul(input, tensor1, tensor2, value=1.0, out=None, name=None):
"""
Calculate the element-wise multiplication of tensor1 and tensor2,
then multiply the result by value, and add it to input. The shape of input,
tensor1, tensor2 should be broadcastable.
The equation is:
.. math::
out = input + value * tensor1 * tensor2
Args:
input(Variable): The input to be added. A Tensor with type float32, float64, int32, int64.
tensor1(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64.
tensor2(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64.
value(int|float): The multiplier for tensor1*tensor2. For float32 and float64 type input, value must be float, otherwise an integer.
out(Variable, Optional): The variable that specifies the output of the
operator, which can be Variable that has been created in the
program. The default value is None, and a new Variable will be
created to save the output. Default: None.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
out(Variable): The output result. A Tensor with the same data type as input's.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
input = fluid.data(name='input', dtype='float32', shape=[3, 4])
tensor1 = fluid.data(name='tenosr1', dtype='float32', shape=[1, 4])
tensor2 = fluid.data(name='tensor2', dtype='float32', shape=[3, 4])
data = paddle.addcmul(input, tensor1, tensor2, value=1.0)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64', 'int32', 'int64'], 'addcmul')
check_variable_and_dtype(tensor1, 'tensor1', ['float32', 'float64', 'int32', 'int64'], 'addcmul')
check_variable_and_dtype(tensor2, 'tensor2', ['float32', 'float64', 'int32', 'int64'], 'addcmul')
if convert_dtype(input.dtype) in ['float32', 'float64']:
check_type(value, 'value', float, 'addcmul')
if convert_dtype(input.dtype) in ['int32', 'int64']:
check_type(value, 'value', int, 'addcmul')
if out is not None:
layers.assign(layers.elementwise_add(input, layers.elementwise_mul(tensor1, tensor2) * value), out)
else:
out = layers.elementwise_add(input, layers.elementwise_mul(tensor1, tensor2) * value)
return out
def clamp(input, min=None, max=None, output=None, name=None):
"""
**clampe layer**
This operator clamps all elements in input into the range [ min, max ] and return
a resulting tensor as the following equation:
.. math::
Out = MIN(MAX(x, min), max)
Args:
input (Variable): An input N-D Tensor or LoDTensor
with data type float32, float64.
min (float32|Variable): The lower bound with type ``float32`` or a ``Tensor``
with shape [1] and type ``int32``, ``float32``, ``float64``.
max (float32|Variable): The upper bound with type ``float32`` or a ``Tensor``
with shape [1] and type ``int32``, ``float32``, ``float64``.
output (Variable, optional): A tensor or LoDTensor. If :attr:`output` is None,
a new tensor will be created as :attr:`output`. Default: None.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: A Tensor or LodTensor with the same data type and data shape as input's.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[1.2,3.5],
[4.5,6.4]]).astype('float32')
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
out1 = paddle.tensor.clamp(x1, min=3.5, max=5.0)
out2 = paddle.tensor.clamp(x1, min=2.5)
print(out1.numpy())
# [[3.5, 3.5]
# [4.5, 5.0]]
print(out2.numpy())
# [[2.5, 3.5]
# [[4.5, 6.4]
"""
assert min is not None or max is not None, "either min or max should be defined."
if min is not None:
check_type(min, 'min', (float, Variable), 'clamp')
if isinstance(min, Variable):
check_dtype(min.dtype, 'min', ['float32', 'float64', 'int32'],
'clamp', '(When the type of min in clamp is Variable.)')
if max is not None:
check_type(max, 'max', (float, Variable), 'clamp')
if isinstance(max, Variable):
check_dtype(max.dtype, 'max', ['float32', 'float64', 'int32'],
'clamp', '(When the type of max in clamp is Variable.)')
inputs = {'X': input}
attrs = {'min': sys.float_info.min, 'max': sys.float_info.max}
if isinstance(min, Variable):
min.stop_gradient = True
inputs['Min'] = min
elif min is not None:
attrs['min'] = min
if isinstance(max, Variable):
max.stop_gradient = True
inputs['Max'] = max
elif max is not None:
attrs['max'] = max
helper = LayerHelper('clamp', **locals())
if output is None:
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(
type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs)
return output
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define random functions
# __all__ = ['gaussin',
# 'uniform',
# 'shuffle',
# 'randn',
# 'rand',
# 'randint']
import numpy as np
from ..fluid import core
from ..fluid.framework import device_guard, in_dygraph_mode, _varbase_creator, Variable, convert_np_dtype_to_dtype_
from ..fluid.layers.layer_function_generator import templatedoc
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ..fluid.layers import utils
from ..fluid.layers.tensor import fill_constant
__all__ = ['randperm', 'randint']
def randint(low,
high=None,
shape=None,
out=None,
dtype=None,
device=None,
stop_gradient=False,
seed=0,
name=None):
"""
This function returns a Tensor filled with random integers from the "discrete uniform" distribution of the
specified data type in the interval [low, high). If high is None (the default), then results are from [0, low).
Args:
low (int): The lower bound on the range of random values to generate, the low is included in the range.
(unless high=None, in which case this parameter is one above the highest such integer).
high (int, optional): The upper bound on the range of random values to generate, the high is excluded
in the range. Default None(see above for behavior if high=None).
shape (list|tuple|Variable, optional): The shape of the output Tensor, if the shape is a list or tuple,
its elements can be an integer
or a Tensor with the shape [1], and the type of the Tensor must be int32 or int64.
If the shape is a Variable, it is a 1-D Tensor, and the type of the Tensor must be
int32 or int64. Default is None, in which case the shape is [1].
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
dtype(np.dtype|core.VarDesc.VarType|str, optional): Data type of the output Tensor
which can be int32, int64, if dytpe is `None`, the data
type of created Tensor is `int64`
device(str, optional): This parameter specifies that the Tensor is created
on the GPU or CPU.
stop_gradient(bool, optional): Indicating if we stop gradient from current(out) Variable,
default value is False.
seed (int, optional): Random seed used for permute samples. If seed is
equal to 0, it means use a seed generated by the system. Note that
if seed is not 0, this operator will always generate the same random
permutation every time. Default: 0.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: A Tensor of the specified shape filled with random integers.
Raises:
TypeError: Randint's low must less then high.
Examples:
.. code-block:: python
import paddle
import paddle.tensor as tensor
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
result_1 = paddle.randint(low=-5, high=5, shape=[3, 4], dtype="int64")
# example 2:
# attr shape is a list which contains tensor Variable.
dim_1 = fluid.layers.fill_constant([1],"int64",3)
dim_2 = fluid.layers.fill_constant([1],"int32",5)
result_2 = paddle.randint(low=-5, high=5, shape=[dim_1, dim_2], dtype="int32")
# example 3:
# attr shape is a Variable, the data type must be int64 or int32.
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = padddle.randint(low=-5, high=5, shape=var_shape, dtype="int32")
var_shape_int32 = fluid.data(name='var_shape_int32', shape=[2], dtype="int32")
result_4 = paddle.randint(low=-5, high=5, shape=var_shape_int32, dtype="int64")
# example 4:
# Input only one parameter
# low=0, high=10, shape=[1], dtype='int64'
result_4 = paddle.randint(10)
"""
def get_new_shape_tensor(list_shape):
new_shape_tensor = []
for dim in list_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_shape_tensor.append(dim)
else:
assert isinstance(dim, int) or isinstance(dim, long)
temp_out = helper.create_variable_for_type_inference('int64')
fill_constant([1], 'int64', dim, force_cpu=True, out=temp_out)
new_shape_tensor.append(temp_out)
return new_shape_tensor
def get_attr_shape(list_shape):
unk_dim_idx = -1
attrs_shape = []
for dim_idx, dim_size in enumerate(list_shape):
if isinstance(dim_size, Variable):
attrs_shape.append(-1)
else:
attrs_shape.append(dim_size)
assert dim_size > 0, (
"Each dimension size given in shape must not be negative "
"except one unknown dimension.")
return attrs_shape
if dtype is None:
dtype = 'int64'
check_dtype(dtype, 'dtype', ['int32', 'int64'], 'randint')
inputs = dict()
attrs = dict()
if shape is None:
shape = [1]
assert len(shape) > 0, ("The size of argument(shape) can't be zero.")
helper = LayerHelper("randint", **locals())
if in_dygraph_mode():
attrs['shape'] = shape
else:
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs["ShapeTensor"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, (
"The size of argument(shape) can't be zero.")
if utils._contain_var(shape):
inputs['ShapeTensorList'] = get_new_shape_tensor(shape)
else:
attrs["shape"] = get_attr_shape(shape)
check_type(shape, 'shape', (list, tuple, Variable), 'randint')
if high is None:
high = low
low = 0
attrs['low'] = low
attrs['high'] = high
attrs['seed'] = seed
if (low >= high):
raise ValueError(
"randint's low must less then high, but received low = {0}, "
"high = {1}".format(low, high))
if out is None:
if name is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable(
name=name, dtype=dtype, persistable=False)
else:
check_dtype(dtype, 'dtype',
convert_dtype(out.dtype), 'randint',
"(The dtype in randint must be the same with out's dtype.)")
attrs['dtype'] = out.dtype
out.stop_gradient = stop_gradient
if device is None:
helper.append_op(
type='randint', inputs=inputs, outputs={'Out': out}, attrs=attrs)
else:
with device_guard(device):
helper.append_op(
type='randint',
inputs=inputs,
outputs={'Out': out},
attrs=attrs)
return out
def randn(shape,
out=None,
dtype=None,
device=None,
stop_gradient=True,
name=None):
"""
This function returns a tensor filled with random numbers from a normal
distribution with mean 0 and variance 1 (also called the standard normal
distribution).
Args:
shape(list|tuple): Shape of the generated random tensor.
out(Variable, optional): Optional output which can be any created Variable
that meets the requirements to store the result of operation. If the
out is `None`, a new Variable wiil be returned to store the result.
Default is None.
dtype(np.dtype|core.VarDesc.VarType|str, optional): Data type of the output
tensor, which can be float32, float64. if dtype is `None` , the data
type of output tensor is `float32` .
Default is None.
device(str, optional): Specific the output variable to be saved in cpu
or gpu memory. Supported None, 'cpu', 'gpu'. If it is None, the output
variable will be automatically assigned devices.
Default: None.
stop_gradient(bool, optional): Indicating if we stop gradient from current(out)
Variable. Default is True.
name(str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Default is None.
Returns:
Random tensor whose data is drawn from a Gaussian distribution,
dtype: flaot32 or float64 as specified.
Return type:
Variable
Raises:
TypeError: If the type of `shape` is not list or tuple.
TypeError: If the data type of `dtype` is not float32 or float64.
ValueError: If the length of `shape` is not bigger than 0.
Examples:
.. code-block:: python
# declarative mode
import paddle
import paddle.fluid as fluid
data = paddle.randn([2, 4])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
res, = exe.run(fluid.default_main_program(), feed={}, fetch_list=[data])
print(res)
# [[-1.4187592 0.7368311 -0.53748125 -0.0146909 ]
# [-0.66294265 -1.3090698 0.1898754 -0.14065823]]
.. code-block:: python
# imperative mode
import paddle
import paddle.fluid as fluid
import paddle.fluid.dygraph as dg
place = fluid.CPUPlace()
with dg.guard(place) as g:
x = paddle.randn([2, 4])
x_np = x.numpy()
print(x_np)
# [[ 1.5149173 -0.26234224 -0.592486 1.4523455 ]
# [ 0.04581212 -0.85345626 1.1687907 -0.02512913]]
"""
helper = LayerHelper("randn", **locals())
check_type(shape, 'shape', (list, tuple), 'randn')
assert len(shape) > 0, ("The size of argument(shape) can't be zero.")
if dtype is None:
dtype = 'float32'
check_dtype(dtype, 'create data type', ['float32', 'float64'], 'randn')
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
check_variable_and_dtype(out, 'out', [dtype], 'randn')
out.stop_gradient = stop_gradient
dtype = convert_np_dtype_to_dtype_(dtype)
seed = np.random.randint(0, 100)
with device_guard(device):
helper.append_op(
type='gaussian_random',
outputs={'Out': out},
attrs={
'shape': shape,
'mean': 0.0,
'std': 1.0,
'seed': seed,
'dtype': dtype,
'use_mkldnn': False
})
return out
@templatedoc()
def randperm(n,
out=None,
dtype="int64",
device=None,
stop_gradient=True,
seed=0):
"""
${comment}
Args:
n (int): The upper bound (exclusive), and it should be greater than 0.
out (Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
If out is None, a new Varibale will be create to store the result.
Default: None.
dtype (np.dtype|core.VarDesc.VarType|str, optional): The type of the
output Tensor. Supported data types: int64, int32. Default: int32.
device (str, optional): Specific the output variable to be saved in cpu
or gpu memory. Supported None, 'cpu', 'gpu'. If it is None, the output
variable will be automatically assigned devices.
Default: None.
stop_gradient (bool, optional): Whether grad should record operations
on the returned tensor. Default: True.
seed (int, optional): Random seed used for permute samples. If seed is
equal to 0, it means use a seed generated by the system. Note that
if seed is not 0, this operator will always generate the same random
permutation every time. Default: 0.
Returns:
${out_comment}.
Return Type:
${out_type}
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
num = 6
is_use_gpu = False
data_1 = paddle.randperm(num)
fluid.layers.Print(data_1)
data_2 = paddle.randperm(num, dtype="int32", seed=1)
fluid.layers.Print(data_2)
data_3 = paddle.randperm(num, stop_gradient=False, device="cpu")
fluid.layers.Print(data_3)
paddle.randperm(num, out=data_3)
fluid.layers.Print(data_3)
place = fluid.CUDAPlace(0) if is_use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
exe.run()
"""
if n < 1:
raise ValueError("The input n should be greater than 0 in randperm op.")
check_dtype(dtype, 'dtype', ['int64', 'int32'], 'randperm')
dtype = convert_dtype(dtype)
if device not in [None, 'cpu', 'gpu']:
raise ValueError("The input device should in [None, 'cpu', 'gpu'].")
check_type(stop_gradient, 'stop_gradient', bool, 'randperm')
helper = LayerHelper("randperm", **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
check_variable_and_dtype(out, 'out', [dtype], 'randperm')
if stop_gradient:
out.stop_gradient = True
inputs = dict()
outputs = {'Out': [out]}
attrs = {'n': n, 'dtype': out.dtype, 'seed': seed}
with device_guard(device):
helper.append_op(
type='randperm', inputs=inputs, outputs=outputs, attrs=attrs)
return out
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..fluid import core, layers
# TODO: define searching & indexing functions of a tensor
__all__ = [
'argmax',
# 'argmin',
# 'argsort',
# 'has_inf',
# 'has_nan',
# 'masked_select',
# 'topk',
'where',
'index_select',
'nonzero',
'sort',
'index_sample'
]
from paddle.common_ops_import import *
def argmax(input, axis=None, dtype=None, out=None, keepdims=False, name=None):
"""
This OP computes the indices of the max elements of the input tensor's
element along the provided axis.
Args:
input(Variable): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(input). when axis<0, it works the same way
as axis+R. Default is None, it will use the last dim to select indices of max value.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor which can
be int32, int64. The default value is None, and it will
return the int64 indices.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result. Defalut is None.
keepdims(bool, optional): Keep the axis that do the select max.
name(str, optional): The name of output variable, normally there is no need for user to set this this property.
Default value is None, the framework set the name of output variable.
Returns:
Variable: A Tensor with data type int64.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = paddle.argmax(input=x, axis=-1)
out2 = paddle.argmax(input=x, axis=0)
out3 = paddle.argmax(input=x, axis=1)
out4 = paddle.argmax(input=x, axis=2)
out5 = paddle.argmax(input=x, axis=2, keepdims=True)
print(out1.numpy())
# [[2 3 1]
# [0 3 1]]
print(out2.numpy())
# [[0 0 0 0]
# [1 1 1 1]
# [0 0 0 1]]
print(out3.numpy())
# [[2 2 0 1]
# [0 1 1 1]]
print(out4.numpy())
# [[2 3 1]
# [0 3 1]]
print(out5.numpy())
#array([[[2],
# [3],
# [1]],
# [[0],
# [3],
# [1]]])
"""
helper = LayerHelper("arg_max", **locals())
var_dtype = None
attrs = {}
if dtype is not None:
check_dtype(dtype, 'create data type', ['int32', 'int64'], 'arg_max')
var_dtype = convert_np_dtype_to_dtype_(dtype)
attrs["dtype"] = var_dtype
else:
var_dtype = VarDesc.VarType.INT64
if out is None:
out = helper.create_variable_for_type_inference(var_dtype)
if axis is None:
axis = -1
attrs['keepdims'] = keepdims
attrs['axis'] = axis
helper.append_op(
type='arg_max',
inputs={'X': input},
outputs={'Out': [out]},
attrs=attrs)
out.stop_gradient = True
return out
def index_select(input, index, dim=0):
"""
Returns a new tensor which indexes the `input` tensor along dimension `dim` using
the entries in `index` which is a Tensor. The returned tensor has the same number
of dimensions as the original `input` tensor. The dim-th dimension has the same
size as the length of `index`; other dimensions have the same size as in the `input` tensor.
Args:
input (Variable): The input tensor variable.
index (Variable): The 1-D tensor containing the indices to index.
dim (int): The dimension in which we index.
Returns:
Variable: A Tensor with same data type as `input`.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
data = np.array([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]])
data_index = np.array([0, 1, 1]).astype('int32')
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(data)
index = fluid.dygraph.to_variable(data_index)
out_z1 = paddle.index_select(x, index)
print(out_z1.numpy())
#[[1. 2. 3. 4.]
# [5. 6. 7. 8.]
# [5. 6. 7. 8.]]
out_z2 = paddle.index_select(x, index, dim=1)
print(out_z2.numpy())
#[[ 1. 2. 2.]
# [ 5. 6. 6.]
# [ 9. 10. 10.]]
"""
helper = LayerHelper("index_select", **locals())
if in_dygraph_mode():
return core.ops.index_select(input, index, 'dim', dim)
check_variable_and_dtype(input, 'x',
['float32', 'float64', 'int32', 'int64'],
'paddle.tensor.search.index_sample')
check_variable_and_dtype(index, 'index', ['int32', 'int64'],
'paddle.tensor.search.index_sample')
out = helper.create_variable_for_type_inference(input.dtype)
helper.append_op(
type='index_select',
inputs={'X': input,
'Index': index},
outputs={'Out': out},
attrs={'dim': dim})
return out
def nonzero(input, as_tuple=False):
"""
Return a tensor containing the indices of all non-zero elements of the `input`
tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension
in `input`, each containing the indices (in that dimension) of all non-zero elements
of `input`. Given a n-Dimensional `input` tensor with shape [x_1, x_2, ..., x_n], If
as_tuple is False, we can get a output tensor with shape [z, n], where `z` is the
number of all non-zero elements in the `input` tensor. If as_tuple is True, we can get
a 1-D tensor tuple of length `n`, and the shape of each 1-D tensor is [z, 1].
Args:
inputs (Variable): The input tensor variable.
as_tuple (bool): Return type, Tensor or tuple of Tensor.
Returns:
Variable. The data type is int64.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
data1 = np.array([[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]])
data2 = np.array([0.0, 1.0, 0.0, 3.0])
data3 = np.array([0.0, 0.0, 0.0])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(data1)
x2 = fluid.dygraph.to_variable(data2)
x3 = fluid.dygraph.to_variable(data3)
out_z1 = paddle.nonzero(x1)
print(out_z1.numpy())
#[[0 0]
# [1 1]
# [2 2]]
out_z1_tuple = paddle.nonzero(x1, as_tuple=True)
for out in out_z1_tuple:
print(out.numpy())
#[[0]
# [1]
# [2]]
#[[0]
# [1]
# [2]]
out_z2 = paddle.nonzero(x2)
print(out_z2.numpy())
#[[1]
# [3]]
out_z2_tuple = paddle.nonzero(x2, as_tuple=True)
for out in out_z2_tuple:
print(out.numpy())
#[[1]
# [3]]
out_z3 = paddle.nonzero(x3)
print(out_z3.numpy())
#[]
out_z3_tuple = paddle.nonzero(x3, as_tuple=True)
for out in out_z3_tuple:
print(out.numpy())
#[]
"""
list_out = []
shape = input.shape
rank = len(shape)
if in_dygraph_mode():
outs = core.ops.where_index(input)
else:
outs = layers.where(input)
if not as_tuple:
return outs
elif rank == 1:
return tuple([outs])
else:
for i in range(rank):
list_out.append(
layers.slice(
outs, axes=[rank - 1], starts=[i], ends=[i + 1]))
return tuple(list_out)
def sort(input, axis=-1, descending=False, out=None, name=None):
"""
This OP sorts the input along the given axis, and returns sorted output
data Varibale and its corresponding index Variable with the same shape as
:attr:`input`.
**NOTICE**: The Variable in the output of this OP has gradient. You could\
set Variable :attr:`stop_gradient`.
Args:
input(Variable): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
descending(bool, optional) : Descending is a flag, if set to true,
algorithm will sort by descending order, else sort by
ascending order. Default is false.
out(Variable, optional): The default value is None. Optional output
which can be any created Variable that meets the requirements to
store the result of operation. if out is None, a new Varibale will
be create to store the result.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
tuple: A tuple of sorted data Variable(with the same shape and data
type as input) and the sorted indices(with the same shape as input's
and with data type int64).
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]]).astype(np.float32)
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = paddle.sort(input=x, axis=-1)
out2 = paddle.sort(input=x, axis=0)
out3 = paddle.sort(input=x, axis=1)
print(out1[0].numpy())
# [[[5. 5. 8. 9.]
# [0. 0. 1. 7.]
# [2. 4. 6. 9.]]
# [[2. 2. 4. 5.]
# [4. 7. 7. 9.]
# [0. 1. 6. 7.]]]
print(out1[1].numpy())
# [[[0 3 1 2]
# [0 1 2 3]
# [2 3 0 1]]
# [[1 3 2 0]
# [0 1 2 3]
# [2 0 3 1]]]
print(out2[0].numpy())
# [[[5. 2. 4. 2.]
# [0. 0. 1. 7.]
# [1. 7. 0. 4.]]
# [[5. 8. 9. 5.]
# [4. 7. 7. 9.]
# [6. 9. 2. 6.]]]
print(out3[0].numpy())
# [[[0. 0. 1. 4.]
# [5. 8. 2. 5.]
# [6. 9. 9. 7.]]
# [[1. 2. 0. 2.]
# [4. 7. 4. 6.]
# [5. 7. 7. 9.]]]
"""
helper = LayerHelper("sort", **locals())
if out is None:
out = helper.create_variable_for_type_inference(
dtype=input.dtype, stop_gradient=False)
ids = helper.create_variable_for_type_inference(
VarDesc.VarType.INT64, stop_gradient=True)
helper.append_op(
type='argsort',
inputs={'X': input},
outputs={'Out': out,
'Indices': ids},
attrs={'axis': axis,
'descending': descending})
return out, ids
def where(condition, x, y, name=None):
"""
Return a tensor of elements selected from either $x$ or $y$, depending on $condition$.
.. math::
out_i =
\\begin{cases}
x_i, \quad \\text{if} \\ condition_i \\ is \\ True \\\\
y_i, \quad \\text{if} \\ condition_i \\ is \\ False \\\\
\\end{cases}
Args:
condition(Variable): The condition to choose x or y.
x(Variable): x is a Tensor Variable with data type float32, float64, int32, int64.
y(Variable): y is a Tensor Variable with data type float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: A Tensor with the same data dype as x.
Examples:
.. code-block:: python
import paddle
import numpy as np
import paddle.fluid as fluid
x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float32")
y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float32")
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(x_i)
y = fluid.dygraph.to_variable(y_i)
out = paddle.where(x>1, x, y)
print(out.numpy())
#out: [1.0, 1.0, 3.2, 1.2]
"""
if not in_dygraph_mode():
check_variable_and_dtype(condition, 'condition', ['bool'], 'where')
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'where')
check_variable_and_dtype(
y, 'y', ['float32', 'float64', 'int32', 'int64'], 'where')
x_shape = list(x.shape)
y_shape = list(y.shape)
if x_shape == y_shape:
if in_dygraph_mode():
return core.ops.where(condition, x, y)
else:
helper = LayerHelper("where", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='where',
inputs={'Condition': condition,
'X': x,
'Y': y},
outputs={'Out': [out]})
return out
else:
cond_int = layers.cast(condition, x.dtype)
cond_not_int = layers.cast(layers.logical_not(condition), x.dtype)
out1 = layers.elementwise_mul(x, cond_int)
out2 = layers.elementwise_mul(y, cond_not_int)
out = layers.elementwise_add(out1, out2)
return out
def index_sample(x, index):
"""
**IndexSample Layer**
IndexSample OP returns the element of the specified location of X,
and the location is specified by Index.
.. code-block:: text
Given:
X = [[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]]
Index = [[0, 1, 3],
[0, 2, 4]]
Then:
Out = [[1, 2, 4],
[6, 8, 10]]
Args:
x (Variable): The source input tensor with 2-D shape. Supported data type is
int32, int64, float32, float64.
index (Variable): The index input tensor with 2-D shape, first dimension should be same with X.
Data type is int32 or int64.
Returns:
output (Variable): The output is a tensor with the same shape as index.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
data = np.array([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]]).astype('float32')
data_index = np.array([[0, 1, 2],
[1, 2, 3],
[0, 0, 0]]).astype('int32')
target_data = np.array([[100, 200, 300, 400],
[500, 600, 700, 800],
[900, 1000, 1100, 1200]]).astype('int32')
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(data)
index = fluid.dygraph.to_variable(data_index)
target = fluid.dygraph.to_variable(target_data)
out_z1 = paddle.index_sample(x, index)
print(out_z1.numpy())
#[[1. 2. 3.]
# [6. 7. 8.]
# [9. 9. 9.]]
# Use the index of the maximum value by topk op
# get the value of the element of the corresponding index in other tensors
top_value, top_index = fluid.layers.topk(x, k=2)
out_z2 = paddle.index_sample(target, top_index)
print(top_value.numpy())
#[[ 4. 3.]
# [ 8. 7.]
# [12. 11.]]
print(top_index.numpy())
#[[3 2]
# [3 2]
# [3 2]]
print(out_z2.numpy())
#[[ 400 300]
# [ 800 700]
# [1200 1100]]
"""
helper = LayerHelper("index_sample", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'paddle.tensor.search.index_sample')
check_variable_and_dtype(index, 'index', ['int32', 'int64'],
'paddle.tensor.search.index_sample')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='index_sample',
inputs={'X': x,
'Index': index},
outputs={'Out': out})
return out
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define statistical functions of a tensor
__all__ = [ #'mean',
#'reduce_mean',
#'std',
'var'
]
import numpy as np
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import in_dygraph_mode
from ..fluid import layers
from .search import where
from ..fluid.data_feeder import convert_dtype
def var(input, axis=None, keepdim=False, unbiased=True, out=None, name=None):
"""
Computes the variance of the input Variable's elements along the specified
axis.
Args:
input (Variable): The input Variable to be computed variance, with data
type float32 and float64 supported.
axis (list|int, optional): The axis along which the variance is computed.
If `None`, compute the variance over all elements of :attr:`input`
and return a Variable with a single element, otherwise it must be in
the range :math:`[-rank(input), rank(input))`. If :math:`axis[i] < 0`,
the axis to compute is :math:`rank(input) + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimensions in
the output Variable. The dimensions in :attr:`axis` will be squeezed
and the result Variable will have :attr:`len(axis)` fewer dimensions
than the :attr:`input` unless :attr:`keepdim` is true, default False.
unbiased (bool, optional): Whether to compute variance via the unbiased
estimator, in which the divisor used in the computation is
:math:`N - 1`, where :math:`N` represents the number of elements
along :attr:`axis`, otherwise the divisor is :math:`N`. Default True.
out (Variable, optional): Alternate output Variable to store the result
variance. Default None.
name (str, optional): The name for this layer. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`. Default None.
Returns:
Variable: The result variance with the same dtype as :attr:`input`.
If :attr:`out = None`, returns a new Variable containing the
variance, otherwise returns a reference to the output Variable.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid.dygraph as dg
a = np.array([[1.0, 2.0], [3.0, 4.0]]).astype("float32")
with dg.guard():
data = dg.to_variable(a)
variance = paddle.var(data, axis=[1])
print(variance.numpy())
# [0.5 0.5]
"""
dtype = convert_dtype(input.dtype)
if dtype not in ["float32", "float64"]:
raise ValueError("Layer tensor.var() only supports floating-point "
"dtypes, but received {}.".format(dtype))
rank = len(input.shape)
axes = axis if axis != None and axis != [] else range(rank)
axes = [e if e >= 0 else e + rank for e in axes]
inp_shape = input.shape if in_dygraph_mode() else layers.shape(input)
mean = layers.reduce_mean(input, dim=axis, keep_dim=True, name=name)
tmp = layers.reduce_mean(
(input - mean)**2, dim=axis, keep_dim=keepdim, name=name)
if unbiased:
n = 1
for i in axes:
n *= inp_shape[i]
if not in_dygraph_mode():
n = layers.cast(n, dtype)
zero_const = layers.fill_constant(shape=[1], dtype=dtype, value=0.0)
factor = where(n > 1.0, n / (n - 1.0), zero_const)
else:
factor = n / (n - 1.0) if n > 1.0 else 0.0
tmp *= factor
if out:
layers.assign(input=tmp, output=out)
return out
else:
return tmp
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define the basic tensor classes
# __all__ = ['Tensor', 'LoDTensor', 'LoDTensorArray']
...@@ -182,7 +182,6 @@ packages=['paddle', ...@@ -182,7 +182,6 @@ packages=['paddle',
'paddle.nn.functional', 'paddle.nn.functional',
'paddle.nn.layer', 'paddle.nn.layer',
'paddle.imperative', 'paddle.imperative',
'paddle.tensor',
] ]
with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f: with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册