提交 58cd4fda 编写于 作者: Y ying

add wrapper for transpose operator.

上级 c6b78e56
...@@ -22,13 +22,38 @@ from ..param_attr import ParamAttr ...@@ -22,13 +22,38 @@ from ..param_attr import ParamAttr
from tensor import concat from tensor import concat
__all__ = [ __all__ = [
'fc', 'embedding', 'dynamic_lstm', 'gru_unit', 'linear_chain_crf', 'fc',
'crf_decoding', 'cos_sim', 'cross_entropy', 'square_error_cost', 'accuracy', 'embedding',
'chunk_eval', 'sequence_conv', 'conv2d', 'sequence_pool', 'pool2d', 'dynamic_lstm',
'batch_norm', 'beam_search_decode', 'conv2d_transpose', 'sequence_expand', 'gru_unit',
'lstm_unit', 'reduce_sum', 'reduce_mean', 'reduce_max', 'reduce_min', 'linear_chain_crf',
'sequence_first_step', 'sequence_last_step', 'dropout', 'split', 'crf_decoding',
'l2_normalize', 'matmul', 'warpctc', 'sequence_reshape' 'cos_sim',
'cross_entropy',
'square_error_cost',
'accuracy',
'chunk_eval',
'sequence_conv',
'conv2d',
'sequence_pool',
'pool2d',
'batch_norm',
'beam_search_decode',
'conv2d_transpose',
'sequence_expand',
'lstm_unit',
'reduce_sum',
'reduce_mean',
'reduce_max',
'reduce_min',
'sequence_first_step',
'sequence_last_step',
'dropout',
'split',
'l2_normalize',
'matmul',
'warpctc',
'sequence_reshape',
] ]
...@@ -43,14 +68,14 @@ def fc(input, ...@@ -43,14 +68,14 @@ def fc(input,
**Fully Connected Layer** **Fully Connected Layer**
The fully connected layer can take multiple tensors as its inputs. It The fully connected layer can take multiple tensors as its inputs. It
creates a variable (one for each input tensor) called weights for each input creates a variable (one for each input tensor) called weights for each
tensor, which represents a fully connected weight matrix from each input input tensor, which represents a fully connected weight matrix from
unit to each output unit. The fully connected layer multiplies each input each input unit to each output unit. The fully connected layer
tensor with its coresponding weight to produce an output Tensor. If multiplies each input tensor with its coresponding weight to produce
multiple input tensors are given, the results of multiple multiplications an output Tensor. If multiple input tensors are given, the results of
will be sumed up. If bias_attr is not None, a biases variable will be multiple multiplications will be sumed up. If bias_attr is not None,
created and added to the output. Finally, if activation is not None, a biases variable will be created and added to the output. Finally,
it will be applied to the output as well. if activation is not None, it will be applied to the output as well.
This process can be formulated as follows: This process can be formulated as follows:
...@@ -1971,3 +1996,41 @@ def sequence_reshape(input, new_dim): ...@@ -1971,3 +1996,41 @@ def sequence_reshape(input, new_dim):
outputs={'Out': [out]}, outputs={'Out': [out]},
attrs={'new_dim': new_dim}) attrs={'new_dim': new_dim})
return out return out
def transpose(input, perm, name=None):
"""
**transpose Layer**
Permute the dimensions of `input` according to `perm`.
The `i`-th dimension of the returned tensor will correspond to the
perm[i]-th dimension of `input`.
Args:
input (Variable): (Tensor), A Tensor.
perm (list): A permutation of the dimensions of `input`.
Returns:
Variable: A transposed Tensor.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[5, 10, 15], dtype='float32')
x_transposed = layers.transpose(input=x, perm=[1, 0, 2])
"""
if len(perm) != len(input.shape):
raise ValueError(
"Input(perm) is the permutation of dimensions of Input(input). "
"It's length shoud be equal to Input(input)'s rank.")
helper = LayerHelper('transpose', **locals())
out = helper.create_tmp_variable(helper.input_dtype())
helper.append_op(
type='transpose',
inputs={'X': [input]},
outputs={'Out': [out]},
attrs={'axis': perm})
return out
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册