提交 48d9fd08 编写于 作者: D dzhwinter

fix default value. test=develop

上级 dfb21219
...@@ -117,7 +117,7 @@ class TestIrMemOptBase(BuildIrMemOptBase): ...@@ -117,7 +117,7 @@ class TestIrMemOptBase(BuildIrMemOptBase):
self.network = None self.network = None
def test_network(self): def test_network(self):
if self.network is None: if self.network is None or not core.is_compiled_with_cuda():
return return
baseline_first_loss, baseline_last_loss = None, None baseline_first_loss, baseline_last_loss = None, None
...@@ -139,7 +139,12 @@ class TestIrMemOptBase(BuildIrMemOptBase): ...@@ -139,7 +139,12 @@ class TestIrMemOptBase(BuildIrMemOptBase):
self.network, self.network,
use_cuda=use_cuda, use_cuda=use_cuda,
memory_opt=use_python_mem_opt) memory_opt=use_python_mem_opt)
self.assertAlmostEquals(np.mean(baseline_last_loss),
np.mean(cur_last_loss), delta=1e-2) self.assertAlmostEquals(
self.assertAlmostEquals(np.mean(baseline_first_loss), np.mean(baseline_last_loss),
np.mean(cur_first_loss), delta=1e-2) np.mean(cur_last_loss),
delta=1e-2)
self.assertAlmostEquals(
np.mean(baseline_first_loss),
np.mean(cur_first_loss),
delta=1e-2)
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle.fluid as fluid
import unittest
from ir_memory_optimize_net_base import TestIrMemOptBase
from paddle.fluid.layers.control_flow import ConditionalBlock
def lstm_net(data,
label,
dict_dim,
emb_dim=128,
hid_dim=128,
hid_dim2=96,
class_dim=2,
emb_lr=30.0):
emb = fluid.layers.embedding(
input=data,
size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr(learning_rate=emb_lr))
fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)
lstm_h, c = fluid.layers.dynamic_lstm(
input=fc0, size=hid_dim * 4, is_reverse=False)
lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')
lstm_max_tanh = fluid.layers.tanh(lstm_max)
fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')
prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
return avg_cost
class TestIrMemOptRNN(TestIrMemOptBase):
def setUp(self):
self.network = lstm_net
self.iter = 2
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册