未验证 提交 1a6c7ba2 编写于 作者: X Xing Wu 提交者: GitHub

cherry pick error info (rnn apis) pr #23891 and #24346 to release/1.8 (#24391)

* Fix error info (#23891)

* fix error info for rnn related api

* passed local test, test=develop

* double check the code

* double check the code, test=develop

* update 'shape' check in RNNCell, test=develop

* add long dtype to RNNCell

* fix long type in python3
Co-authored-by: NXingWu01 <wuxing@iie.ac.cn>

* fix rnn check_type list error (#24346)

* fix rnn check_type list error

* tigger ci, test=release/1.8

* update modify, test=release/1.8
Co-authored-by: NXingWu01 <wuxing@iie.ac.cn>
上级 a93d9e88
......@@ -15639,6 +15639,9 @@ def gather_tree(ids, parents):
final_sequences = fluid.layers.gather_tree(ids, parents)
"""
helper = LayerHelper('gather_tree', **locals())
check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree')
check_variable_and_dtype(parents, 'parents', ['int32', 'int64'],
'gather_tree')
out = helper.create_variable_for_type_inference(dtype=ids.dtype)
helper.append_op(
......
......@@ -29,6 +29,7 @@ from ..data_feeder import convert_dtype
from ..layer_helper import LayerHelper
from ..framework import in_dygraph_mode
from ..param_attr import ParamAttr
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
__all__ = [
'RNNCell',
......@@ -89,7 +90,7 @@ class RNNCell(object):
def get_initial_states(self,
batch_ref,
shape=None,
dtype=None,
dtype='float32',
init_value=0,
batch_dim_idx=0):
"""
......@@ -106,9 +107,9 @@ class RNNCell(object):
property `state_shape` will be used. The default value is None.
dtype: A (possibly nested structure of) data type[s]. The structure
must be same as that of `shape`, except when all tensors' in states
has the same data type, a single data type can be used. If None and
has the same data type, a single data type can be used. If
property `cell.state_shape` is not available, float32 will be used
as the data type. The default value is None.
as the data type. The default value is float32.
init_value: A float value used to initialize states.
batch_dim_idx: An integer indicating which dimension of the tensor in
inputs represents batch size. The default value is 0.
......@@ -117,6 +118,26 @@ class RNNCell(object):
Variable: tensor variable[s] packed in the same structure provided \
by shape, representing the initialized states.
"""
if sys.version_info < (3, ):
integer_types = (
int,
long, )
else:
integer_types = (int, )
check_variable_and_dtype(batch_ref, 'batch_ref',
['float32', 'float64'], 'RNNCell')
check_type(shape, 'shape', (list, tuple, type(None), integer_types),
'RNNCell')
if isinstance(shape, (list, tuple)):
shapes = map_structure(lambda x: x, shape)
if isinstance(shape, list):
for i, _shape in enumerate(shapes):
check_type(_shape, 'shapes[' + str(i) + ']', integer_types,
'RNNCell')
else:
check_type(shapes, 'shapes', integer_types, 'RNNCell')
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'RNNCell')
# TODO: use inputs and batch_size
batch_ref = flatten(batch_ref)[0]
......@@ -250,6 +271,8 @@ class GRUCell(RNNCell):
dtype(string, optional): The data type used in this cell. Default float32.
name(string, optional) : The name scope used to identify parameters and biases.
"""
check_type(hidden_size, 'hidden_size', (int), 'GRUCell')
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'GRUCell')
self.hidden_size = hidden_size
from .. import contrib # TODO: resolve recurrent import
self.gru_unit = contrib.layers.rnn_impl.BasicGRUUnit(
......@@ -263,10 +286,10 @@ class GRUCell(RNNCell):
Parameters:
inputs(Variable): A tensor with shape `[batch_size, input_size]`,
corresponding to :math:`x_t` in the formula. The data type
should be float32.
should be float32 or float64.
states(Variable): A tensor with shape `[batch_size, hidden_size]`.
corresponding to :math:`h_{t-1}` in the formula. The data type
should be float32.
should be float32 or float64.
Returns:
tuple: A tuple( :code:`(outputs, new_states)` ), where `outputs` and \
......@@ -274,6 +297,11 @@ class GRUCell(RNNCell):
corresponding to :math:`h_t` in the formula. The data type of the \
tensor is same as that of `states`.
"""
check_variable_and_dtype(inputs, 'inputs', ['float32', 'float64'],
'GRUCell')
check_variable_and_dtype(states, 'states', ['float32', 'float64'],
'GRUCell')
new_hidden = self.gru_unit(inputs, states)
return new_hidden, new_hidden
......@@ -343,6 +371,9 @@ class LSTMCell(RNNCell):
dtype(string, optional): The data type used in this cell. Default float32.
name(string, optional) : The name scope used to identify parameters and biases.
"""
check_type(hidden_size, 'hidden_size', (int), 'LSTMCell')
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'LSTMCell')
self.hidden_size = hidden_size
from .. import contrib # TODO: resolve recurrent import
self.lstm_unit = contrib.layers.rnn_impl.BasicLSTMUnit(
......@@ -356,10 +387,10 @@ class LSTMCell(RNNCell):
Parameters:
inputs(Variable): A tensor with shape `[batch_size, input_size]`,
corresponding to :math:`x_t` in the formula. The data type
should be float32.
should be float32 or float64.
states(Variable): A list of containing two tensors, each shaped
`[batch_size, hidden_size]`, corresponding to :math:`h_{t-1}, c_{t-1}`
in the formula. The data type should be float32.
in the formula. The data type should be float32 or float64.
Returns:
tuple: A tuple( :code:`(outputs, new_states)` ), where `outputs` is \
......@@ -369,6 +400,15 @@ class LSTMCell(RNNCell):
to :math:`h_{t}, c_{t}` in the formula. The data type of these \
tensors all is same as that of `states`.
"""
check_variable_and_dtype(inputs, 'inputs', ['float32', 'float64'],
'LSTMCell')
check_type(states, 'states', list, 'LSTMCell')
if isinstance(states, list):
for i, state in enumerate(states):
check_variable_and_dtype(state, 'state[' + str(i) + ']',
['float32', 'float64'], 'LSTMCell')
pre_hidden, pre_cell = states
new_hidden, new_cell = self.lstm_unit(inputs, pre_hidden, pre_cell)
return new_hidden, [new_hidden, new_cell]
......@@ -444,6 +484,26 @@ def rnn(cell,
cell = fluid.layers.GRUCell(hidden_size=128)
outputs = fluid.layers.rnn(cell=cell, inputs=inputs)
"""
check_type(inputs, 'inputs', (Variable, list, tuple), 'rnn')
if isinstance(inputs, (list, tuple)):
for i, input_x in enumerate(inputs):
check_variable_and_dtype(input_x, 'inputs[' + str(i) + ']',
['float32', 'float64'], 'rnn')
check_type(initial_states, 'initial_states',
(Variable, list, tuple, type(None)), 'rnn')
if isinstance(initial_states, (list, tuple)):
states = map_structure(lambda x: x, initial_states)[0]
for i, state in enumerate(states):
if isinstance(state, (list, tuple)):
for j, state_j in enumerate(state):
check_variable_and_dtype(state_j, 'state_j[' + str(j) + ']',
['float32', 'float64'], 'rnn')
else:
check_variable_and_dtype(state, 'states[' + str(i) + ']',
['float32', 'float64'], 'rnn')
check_type(sequence_length, 'sequence_length', (Variable, type(None)),
'rnn')
def _maybe_copy(state, new_state, step_mask):
# TODO: use where_op
......@@ -753,7 +813,7 @@ class BeamSearchDecoder(Decoder):
Parameters:
probs(Variable): A tensor with shape `[batch_size, ...]`, representing
the log probabilities. Its data type should be float32.
the log probabilities. Its data type should be float32 or float64.
finished(Variable): A tensor with shape `[batch_size, beam_size]`,
representing the finished status for all beams. Its data type
should be bool.
......@@ -775,7 +835,7 @@ class BeamSearchDecoder(Decoder):
Parameters:
probs(Variable): A tensor with shape `[batch_size, beam_size, vocab_size]`,
representing the log probabilities. Its data type should be float32.
representing the log probabilities. Its data type should be float32 or float64.
finished(Variable): A tensor with shape `[batch_size, beam_size]`,
representing the finished status for all beams. Its data type
should be bool.
......@@ -2127,7 +2187,12 @@ def lstm(input,
"""
helper = LayerHelper('cudnn_lstm', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'lstm')
check_variable_and_dtype(init_h, 'init_h', ['float32', 'float64'], 'lstm')
check_variable_and_dtype(init_c, 'init_c', ['float32', 'float64'], 'lstm')
check_type(max_len, 'max_len', (int), 'lstm')
check_type(hidden_size, 'hidden_size', (int), 'lstm')
check_type(num_layers, 'num_layers', (int), 'lstm')
dtype = input.dtype
input_shape = list(input.shape)
input_size = input_shape[-1]
......@@ -2652,6 +2717,10 @@ def gru_unit(input,
input=x, hidden=pre_hidden, size=hidden_dim * 3)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'gru_unit')
check_variable_and_dtype(hidden, 'hidden', ['float32', 'float64'],
'gru_unit')
check_type(size, 'size', (int), 'gru_unit')
activation_dict = dict(
identity=0,
sigmoid=1,
......@@ -2741,7 +2810,7 @@ def beam_search(pre_ids,
pre_scores(Variable): A LodTensor variable has the same shape and lod
with ``pre_ids`` , representing the accumulated scores corresponding
to the selected ids of previous step. It is the output of
beam_search at previous step. The data type should be float32.
beam_search at previous step. The data type should be float32 or float64.
ids(Variable|None): A LodTensor variable containing the candidates ids.
It has the same lod with ``pre_ids`` and its shape should be
`[batch_size * beam_size, K]`, where `K` supposed to be greater than
......@@ -2751,7 +2820,7 @@ def beam_search(pre_ids,
ids.
scores(Variable): A LodTensor variable containing the accumulated
scores corresponding to ``ids`` . Both its shape and lod are same as
those of ``ids`` . The data type should be float32.
those of ``ids`` . The data type should be float32 or float64.
beam_size(int): The beam width used in beam search.
end_id(int): The id of end token.
level(int): **It can be ignored and mustn't change currently.**
......@@ -3007,7 +3076,11 @@ def lstm_unit(x_t,
cell_t_prev=pre_cell)
"""
helper = LayerHelper('lstm_unit', **locals())
check_variable_and_dtype(x_t, 'x_t', ['float32', 'float64'], 'lstm_unit')
check_variable_and_dtype(hidden_t_prev, 'hidden_t_prev',
['float32', 'float64'], 'lstm_unit')
check_variable_and_dtype(cell_t_prev, 'cell_t_prev',
['float32', 'float64'], 'lstm_unit')
if len(x_t.shape) != 2:
raise ValueError("Rank of x_t must be 2.")
......
......@@ -18,6 +18,7 @@ import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid.framework import program_guard, Program
class TestGatherTreeOp(OpTest):
......@@ -61,5 +62,56 @@ class TestGatherTreeOpAPI(unittest.TestCase):
final_sequences = fluid.layers.gather_tree(ids, parents)
class TestGatherTreeOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
ids = fluid.layers.data(
name='ids',
shape=[5, 2, 2],
dtype='int64',
append_batch_size=False)
parents = fluid.layers.data(
name='parents',
shape=[5, 2, 2],
dtype='int64',
append_batch_size=False)
def test_Variable_ids():
# the input type must be Variable
np_ids = np.random.random((5, 2, 2), dtype='int64')
fluid.layers.gather_tree(np_ids, parents)
self.assertRaises(TypeError, test_Variable_ids)
def test_Variable_parents():
# the input type must be Variable
np_parents = np.random.random((5, 2, 2), dtype='int64')
fluid.layers.gather_tree(ids, np_parents)
self.assertRaises(TypeError, test_Variable_parents)
def test_type_ids():
# dtype must be int32 or int64
bad_ids = fluid.layers.data(
name='bad_ids',
shape=[5, 2, 2],
dtype='float32',
append_batch_size=False)
fluid.layers.gather_tree(bad_ids, parents)
self.assertRaises(TypeError, test_type_ids)
def test_type_parents():
# dtype must be int32 or int64
bad_parents = fluid.layers.data(
name='bad_parents',
shape=[5, 2, 2],
dtype='float32',
append_batch_size=False)
fluid.layers.gather_tree(ids, bad_parents)
self.assertRaises(TypeError, test_type_parents)
if __name__ == "__main__":
unittest.main()
......@@ -19,6 +19,9 @@ import unittest
import numpy as np
import paddle.fluid as fluid
from op_test import OpTest
from paddle import fluid
from paddle.fluid.layers import gru_unit
from paddle.fluid.framework import program_guard, Program
class TestGRUUnitAPIError(unittest.TestCase):
......@@ -59,6 +62,49 @@ def relu(x):
return np.maximum(x, 0)
class TestGRUUnitOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size = 5
hidden_dim = 40
input = fluid.data(
name='input', shape=[None, hidden_dim * 3], dtype='float32')
pre_hidden = fluid.data(
name='pre_hidden', shape=[None, hidden_dim], dtype='float32')
np_input = np.random.uniform(
-0.1, 0.1, (batch_size, hidden_dim * 3)).astype('float64')
np_pre_hidden = np.random.uniform(
-0.1, 0.1, (batch_size, hidden_dim)).astype('float64')
def test_input_Variable():
gru_unit(np_input, pre_hidden, hidden_dim * 3)
self.assertRaises(TypeError, test_input_Variable)
def test_pre_hidden_Variable():
gru_unit(input, np_pre_hidden, hidden_dim * 3)
self.assertRaises(TypeError, test_pre_hidden_Variable)
def test_input_type():
error_input = fluid.data(
name='error_input',
shape=[None, hidden_dim * 3],
dtype='int32')
gru_unit(error_input, pre_hidden, hidden_dim * 3)
self.assertRaises(TypeError, test_input_type)
def test_pre_hidden_type():
error_pre_hidden = fluid.data(
name='error_pre_hidden',
shape=[None, hidden_dim],
dtype='int32')
gru_unit(input, error_pre_hidden, hidden_dim * 3)
self.assertRaises(TypeError, test_pre_hidden_type)
class TestGRUUnitOp(OpTest):
batch_size = 5
frame_size = 40
......
......@@ -17,6 +17,9 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from paddle import fluid
from paddle.fluid.layers import lstm, fill_constant
from paddle.fluid.framework import program_guard, Program
SIGMOID_THRESHOLD_MIN = -40.0
SIGMOID_THRESHOLD_MAX = 13.0
......@@ -126,6 +129,87 @@ def lstm(
return hidden, cell
class LstmUnitTestError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size = 20
seq_len = 100
dropout_prob = 0.2
hidden_size = 150
num_layers = 1
input = fluid.data(
name='input',
shape=[batch_size, seq_len, hidden_size],
dtype='float32')
pre_hidden = fill_constant([num_layers, batch_size, hidden_size],
'float32', 0.0)
pre_cell = fill_constant([num_layers, batch_size, hidden_size],
'float32', 0.0)
np_input = np.random.uniform(
-0.1, 0.1, (batch_size, seq_len, hidden_size)).astype('float64')
np_pre_hidden = np.random.uniform(
-0.1, 0.1,
(num_layers, batch_size, hidden_size)).astype('float64')
np_pre_cell = np.random.uniform(
-0.1, 0.1,
(num_layers, batch_size, hidden_size)).astype('float64')
def test_input_Variable():
lstm(np_input, pre_hidden, pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_input_Variable)
def test_pre_hidden_Variable():
lstm(np_input, np_pre_hidden, pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_pre_hidden_Variable)
def test_pre_cell_Variable():
lstm(np_input, pre_hidden, np_pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_pre_cell_Variable)
def test_input_type():
error_input = fluid.data(
name='error_input',
shape=[None, hidden_size * 3],
dtype='int32')
lstm(error_input, pre_hidden, pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_input_type)
def test_pre_hidden_type():
error_pre_hidden = fluid.data(
name='error_pre_hidden',
shape=[None, hidden_size],
dtype='int32')
lstm(input, error_pre_hidden, pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_pre_hidden_type)
def test_pre_cell_type():
error_pre_cell = fluid.data(
name='error_pre_cell',
shape=[None, hidden_size],
dtype='int32')
lstm(input, pre_hidden, error_pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_pre_cell_type)
class TestLstmOp(OpTest):
def set_lod(self):
self.lod = [[2, 3, 2]]
......
......@@ -17,6 +17,9 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from paddle import fluid
from paddle.fluid.layers import lstm_unit
from paddle.fluid.framework import program_guard, Program
def sigmoid_np(x):
......@@ -27,11 +30,77 @@ def tanh_np(x):
return 2 * sigmoid_np(2. * x) - 1.
class LstmUnitTestError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size, dict_dim, emb_dim, hidden_dim = 32, 128, 64, 512
data = fluid.data(
name='step_data', shape=[batch_size], dtype='int64')
inputs = fluid.embedding(input=data, size=[dict_dim, emb_dim])
pre_hidden = fluid.data(
name='pre_hidden',
shape=[batch_size, hidden_dim],
dtype='float32')
pre_cell = fluid.data(
name='pre_cell',
shape=[batch_size, hidden_dim],
dtype='float32')
np_input = np.random.uniform(
-0.1, 0.1, (batch_size, emb_dim)).astype('float64')
np_pre_hidden = np.random.uniform(
-0.1, 0.1, (batch_size, hidden_dim)).astype('float64')
np_pre_cell = np.random.uniform(
-0.1, 0.1, (batch_size, hidden_dim)).astype('float64')
def test_input_Variable():
lstm_unit(np_input, pre_hidden, pre_cell)
self.assertRaises(TypeError, test_input_Variable)
def test_pre_hidden_Variable():
lstm_unit(inputs, np_pre_hidden, pre_cell)
self.assertRaises(TypeError, test_pre_hidden_Variable)
def test_pre_cell_Variable():
lstm_unit(inputs, pre_hidden, np_pre_cell)
self.assertRaises(TypeError, test_pre_cell_Variable)
def test_input_type():
error_input = fluid.data(
name='error_input',
shape=[batch_size, emb_dim],
dtype='int32')
lstm_unit(error_input, pre_hidden, pre_cell)
self.assertRaises(TypeError, test_input_type)
def test_pre_hidden_type():
error_pre_hidden = fluid.data(
name='error_pre_hidden',
shape=[batch_size, hidden_dim],
dtype='int32')
lstm_unit(inputs, error_pre_hidden, pre_cell)
self.assertRaises(TypeError, test_pre_hidden_type)
def test_pre_cell_type():
error_pre_cell = fluid.data(
name='error_pre_cell',
shape=[batch_size, hidden_dim],
dtype='int32')
lstm_unit(inputs, pre_hidden, error_pre_cell)
self.assertRaises(TypeError, test_pre_cell_type)
class LstmUnitTest(OpTest):
def setUp(self):
self.op_type = "lstm_unit"
x_np = np.random.normal(size=(5, 16)).astype("float64")
c_np = np.random.normal(size=(5, 4)).astype("float64")
x_np = np.random.normal(size=(15, 160)).astype("float64")
c_np = np.random.normal(size=(15, 40)).astype("float64")
i_np, f_np, o_np, j_np = np.split(x_np, 4, axis=1)
forget_bias_np = 0.
self.attrs = {'forget_bias': 0.}
......
......@@ -20,6 +20,7 @@ import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
from paddle.fluid.framework import program_guard, Program
from paddle.fluid.executor import Executor
from paddle.fluid import framework
......@@ -33,6 +34,73 @@ import paddle.fluid.layers.utils as utils
import numpy as np
class TestLSTMCellError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size, input_size, hidden_size = 4, 16, 16
inputs = fluid.data(
name='inputs', shape=[None, input_size], dtype='float32')
pre_hidden = fluid.data(
name='pre_hidden', shape=[None, hidden_size], dtype='float32')
pre_cell = fluid.data(
name='pre_cell', shape=[None, hidden_size], dtype='float32')
cell = LSTMCell(hidden_size)
def test_input_Variable():
np_input = np.random.random(
(batch_size, input_size)).astype("float32")
cell(np_input, [pre_hidden, pre_cell])
self.assertRaises(TypeError, test_input_Variable)
def test_pre_hidden_Variable():
np_pre_hidden = np.random.random(
(batch_size, hidden_size)).astype("float32")
cell(inputs, [np_pre_hidden, pre_cell])
self.assertRaises(TypeError, test_pre_hidden_Variable)
def test_pre_cell_Variable():
np_pre_cell = np.random.random(
(batch_size, input_size)).astype("float32")
cell(inputs, [pre_hidden, np_pre_cell])
self.assertRaises(TypeError, test_pre_cell_Variable)
def test_input_type():
error_inputs = fluid.data(
name='error_inputs',
shape=[None, input_size],
dtype='int32')
cell(error_inputs, [pre_hidden, pre_cell])
self.assertRaises(TypeError, test_input_type)
def test_pre_hidden_type():
error_pre_hidden = fluid.data(
name='error_pre_hidden',
shape=[None, hidden_size],
dtype='int32')
cell(inputs, [error_pre_hidden, pre_cell])
self.assertRaises(TypeError, test_pre_hidden_type)
def test_pre_cell_type():
error_pre_cell = fluid.data(
name='error_pre_cell',
shape=[None, hidden_size],
dtype='int32')
cell(inputs, [pre_hidden, error_pre_cell])
self.assertRaises(TypeError, test_pre_cell_type)
def test_dtype():
# the input type must be Variable
LSTMCell(hidden_size, dtype="int32")
self.assertRaises(TypeError, test_dtype)
class TestLSTMCell(unittest.TestCase):
def setUp(self):
self.batch_size = 4
......@@ -93,6 +161,58 @@ class TestLSTMCell(unittest.TestCase):
self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4, atol=0))
class TestGRUCellError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size, input_size, hidden_size = 4, 16, 16
inputs = fluid.data(
name='inputs', shape=[None, input_size], dtype='float32')
pre_hidden = layers.data(
name='pre_hidden',
shape=[None, hidden_size],
append_batch_size=False,
dtype='float32')
cell = GRUCell(hidden_size)
def test_input_Variable():
np_input = np.random.random(
(batch_size, input_size)).astype("float32")
cell(np_input, pre_hidden)
self.assertRaises(TypeError, test_input_Variable)
def test_pre_hidden_Variable():
np_pre_hidden = np.random.random(
(batch_size, hidden_size)).astype("float32")
cell(inputs, np_pre_hidden)
self.assertRaises(TypeError, test_pre_hidden_Variable)
def test_input_type():
error_inputs = fluid.data(
name='error_inputs',
shape=[None, input_size],
dtype='int32')
cell(error_inputs, pre_hidden)
self.assertRaises(TypeError, test_input_type)
def test_pre_hidden_type():
error_pre_hidden = fluid.data(
name='error_pre_hidden',
shape=[None, hidden_size],
dtype='int32')
cell(inputs, error_pre_hidden)
self.assertRaises(TypeError, test_pre_hidden_type)
def test_dtype():
# the input type must be Variable
GRUCell(hidden_size, dtype="int32")
self.assertRaises(TypeError, test_dtype)
class TestGRUCell(unittest.TestCase):
def setUp(self):
self.batch_size = 4
......@@ -151,6 +271,92 @@ class TestGRUCell(unittest.TestCase):
self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4, atol=0))
class TestRnnError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size = 4
input_size = 16
hidden_size = 16
seq_len = 4
inputs = fluid.data(
name='inputs', shape=[None, input_size], dtype='float32')
pre_hidden = layers.data(
name='pre_hidden',
shape=[None, hidden_size],
append_batch_size=False,
dtype='float32')
inputs_basic_lstm = fluid.data(
name='inputs_basic_lstm',
shape=[None, None, input_size],
dtype='float32')
sequence_length = fluid.data(
name="sequence_length", shape=[None], dtype='int64')
inputs_dynamic_rnn = layers.transpose(
inputs_basic_lstm, perm=[1, 0, 2])
cell = LSTMCell(hidden_size, name="LSTMCell_for_rnn")
np_inputs_dynamic_rnn = np.random.random(
(seq_len, batch_size, input_size)).astype("float32")
def test_input_Variable():
dynamic_rnn(
cell=cell,
inputs=np_inputs_dynamic_rnn,
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_input_Variable)
def test_input_list():
dynamic_rnn(
cell=cell,
inputs=[np_inputs_dynamic_rnn],
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_input_list)
def test_initial_states_type():
cell = GRUCell(hidden_size, name="GRUCell_for_rnn")
error_initial_states = np.random.random(
(batch_size, hidden_size)).astype("float32")
dynamic_rnn(
cell=cell,
inputs=inputs_dynamic_rnn,
initial_states=error_initial_states,
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_initial_states_type)
def test_initial_states_list():
error_initial_states = [
np.random.random(
(batch_size, hidden_size)).astype("float32"),
np.random.random(
(batch_size, hidden_size)).astype("float32")
]
dynamic_rnn(
cell=cell,
inputs=inputs_dynamic_rnn,
initial_states=error_initial_states,
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_initial_states_type)
def test_sequence_length_type():
np_sequence_length = np.random.random(
(batch_size)).astype("float32")
dynamic_rnn(
cell=cell,
inputs=inputs_dynamic_rnn,
sequence_length=np_sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_sequence_length_type)
class TestRnn(unittest.TestCase):
def setUp(self):
self.batch_size = 4
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册