未验证 提交 58f6d459 编写于 作者: z8hanghuan's avatar z8hanghuan 提交者: GitHub

[cherry-pick]support multi_layer of bilstm,*test=kunlun (#42076)

* modify xpu.cmake,*test=kunlun (#41832)

* modify xpu.cmake,*test=kunlun

* modify xpu.cmake,*test=kunlun

* modify xpu.cmake,*test=kunlun

* modify xpu.cmake,*test=kunlun

* support bilstm,*test=kunlun

* [cherry-pick]support multi_layer of bilstm,*test=kunlun
上级 8a12f459
......@@ -296,6 +296,7 @@ XPUOpMap& get_kl2_ops() {
pOpKernelType(vartype::BOOL, XPUPlace()),
pOpKernelType(vartype::FP32, XPUPlace())})},
{"rnn", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})},
{"rnn_grad", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})},
{"roi_align", XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})},
{"roi_align_grad",
XPUKernelSet({pOpKernelType(vartype::FP32, XPUPlace())})},
......
......@@ -48,6 +48,10 @@ class XPUTestRNNOp(XPUOpTestWrapper):
self.place = paddle.XPUPlace(0)
self.sequence_length = np.ones(
(self.batch_size, ), dtype=np.int32) * self.seq_length
#self.sequence_length = np.array(
# [12, 11, 10, 9, 8], dtype=np.int32)
self.num_layers = 1
self.is_bidirec = False
self.set_attrs()
self.mode = "LSTM"
self.is_test = False
......@@ -61,6 +65,10 @@ class XPUTestRNNOp(XPUOpTestWrapper):
high=0.1,
size=(self.seq_length, self.batch_size,
self.input_size)).astype(self.dtype)
input[11][1:][:] = 0
input[10][2:][:] = 0
input[9][3:][:] = 0
input[8][4:][:] = 0
rnn1 = LSTM(
self.input_size,
......@@ -117,7 +125,7 @@ class XPUTestRNNOp(XPUOpTestWrapper):
def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.no_need_check_grad = False
self.__class__.op_type = self.in_type
def test_check_output(self):
......@@ -125,11 +133,20 @@ class XPUTestRNNOp(XPUOpTestWrapper):
self.place, atol=0.01,
no_check_set=['Reserve', 'DropoutState'])
def test_grad(self):
if not self.is_test:
var_name_list = self.get_weight_names()
grad_check_list = ['Input', 'init_h', 'init_c']
grad_check_list.extend(var_name_list)
self.check_grad_with_place(self.place,
set(grad_check_list),
['Out', 'last_hidden', 'last_cell'])
def init_size(self):
self.seq_length = 1
self.batch_size = 1
self.input_size = 5
self.hidden_size = 16
self.seq_length = 12
self.batch_size = 5
self.input_size = 3
self.hidden_size = 2
def get_weight_names(self):
weight_names = []
......@@ -142,38 +159,24 @@ class XPUTestRNNOp(XPUOpTestWrapper):
return weight_names
def set_attrs(self):
self.num_layers = 1
self.is_bidirec = False
self.num_layers = 2
self.is_bidirec = True
class TestRNNOp1(TestRNNOp):
def init_size(self):
self.seq_length = 2
self.batch_size = 4
self.input_size = 10
self.hidden_size = 32
class TestRNNOp0(TestRNNOp):
def set_attrs(self):
self.sequence_length = None
class TestRNNOp1(TestRNNOp):
def set_attrs(self):
self.num_layers = 1
self.is_bidirec = False
class TestRNNOp2(TestRNNOp):
def init_size(self):
self.seq_length = 5
self.batch_size = 16
self.input_size = 30
self.hidden_size = 64
def set_attrs(self):
self.num_layers = 1
self.is_bidirec = True
class TestRNNOp3(TestRNNOp):
def init_size(self):
self.seq_length = 10
self.batch_size = 64
self.input_size = 50
self.hidden_size = 64
def set_attrs(self):
self.num_layers = 2
self.is_bidirec = False
......@@ -188,6 +191,17 @@ class XPUTestRNNOp(XPUOpTestWrapper):
self.num_layers = 2
self.is_bidirec = True
class TestRNNOp6(TestRNNOp):
def set_attrs(self):
self.num_layers = 2
self.is_bidirec = True
self.sequence_length = None
class TestRNNOp7(TestRNNOp):
def set_attrs(self):
self.num_layers = 3
self.is_bidirec = True
support_types = get_xpu_op_support_types('rnn')
for stype in support_types:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册