提交 b8d1f503 编写于 作者: Z Zhen Wang

Add the executor test for the graph clone API. test=develop

上级 ac6ef06f
无相关合并请求
...@@ -13,59 +13,92 @@ ...@@ -13,59 +13,92 @@
# limitations under the license. # limitations under the license.
from __future__ import print_function from __future__ import print_function
import os
import six
import unittest import unittest
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import six
from paddle.fluid.framework import IrGraph from paddle.fluid.framework import IrGraph
from paddle.fluid import core from paddle.fluid import core
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ["CPU_NUM"] = "1"
def residual_block(num):
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
bias_attr=False):
tmp = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr)
return fluid.layers.batch_norm(input=tmp, act=act)
data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32') def conv_block():
img = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64') label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data conv_pool_1 = fluid.nets.simple_img_conv_pool(
for _ in six.moves.xrange(num): input=img,
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True) filter_size=5,
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None) num_filters=20,
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu') pool_size=2,
fc = fluid.layers.fc(input=hidden, size=10) pool_stride=2,
loss = fluid.layers.cross_entropy(input=fc, label=label) act="relu")
loss = fluid.layers.mean(loss) conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
return loss conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu")
prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
return [img, label], avg_loss
class TestGraph(unittest.TestCase): class TestGraph(unittest.TestCase):
def test_graph_functions(self, for_ci=True): def graph_apis(self, use_cuda=False, for_ci=True):
main = fluid.Program() main = fluid.Program()
startup = fluid.Program() startup = fluid.Program()
with fluid.program_guard(main, startup): with fluid.program_guard(main, startup):
loss = residual_block(2) feeds, loss = conv_block()
opt = fluid.optimizer.Adam(learning_rate=0.001) opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss) opt.minimize(loss)
graph = IrGraph(core.Graph(main.desc), for_test=False) graph = IrGraph(core.Graph(main.desc), for_test=False)
backup_graph = graph.clone()
self.assertEqual(len(graph.all_nodes()), len(backup_graph.all_nodes()))
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = False
origin_binary = fluid.CompiledProgram(graph.graph).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
backup_binary = fluid.CompiledProgram(
backup_graph.graph).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup)
iters = 5
batch_size = 8
train_reader = paddle.batch(
paddle.dataset.mnist.train(), batch_size=batch_size)
feeder = fluid.DataFeeder(feed_list=feeds, place=place)
def train(binary):
for _ in range(iters):
data = next(train_reader())
loss_v = exe.run(binary,
feed=feeder.feed(data),
fetch_list=[loss.name])
print('{}: {}'.format('loss', loss_v))
train(origin_binary)
train(backup_binary)
marked_nodes = set() marked_nodes = set()
for op in graph.all_op_nodes(): for op in graph.all_op_nodes():
if op.name().find('conv2d') > -1: if op.name().find('conv2d') > -1:
marked_nodes.add(op) marked_nodes.add(op)
if not for_ci: if not for_ci:
graph.draw('.', 'residual', marked_nodes) graph.draw('.', 'residual', marked_nodes)
backup_marked_nodes = set()
for op in backup_graph.all_op_nodes():
if op.name().find('conv2d') > -1:
backup_marked_nodes.add(op)
backup_graph.draw('.', 'backup', backup_marked_nodes)
self.assertFalse(graph.has_circle()) self.assertFalse(graph.has_circle())
self.assertEqual(graph.graph_num(), 1) self.assertEqual(graph.graph_num(), 1)
nodes = graph.topology_sort() nodes = graph.topology_sort()
...@@ -75,14 +108,13 @@ class TestGraph(unittest.TestCase): ...@@ -75,14 +108,13 @@ class TestGraph(unittest.TestCase):
nodes_num = len(graph.all_nodes()) nodes_num = len(graph.all_nodes())
graph.safe_remove_nodes(marked_nodes) graph.safe_remove_nodes(marked_nodes)
self.assertEqual(len(graph.all_nodes()), nodes_num - len(marked_nodes)) self.assertEqual(len(graph.all_nodes()), nodes_num - len(marked_nodes))
backup_graph = graph.clone()
self.assertEqual(len(graph.all_nodes()), len(backup_graph.all_nodes())) def test_graph_apis_cpu(self):
if not for_ci: self.graph_apis(use_cuda=False, for_ci=True)
backup_marked_nodes = set()
for op in backup_graph.all_op_nodes(): def test_graph_apis_cuda(self):
if op.name().find('conv2d') > -1: if fluid.core.is_compiled_with_cuda():
backup_marked_nodes.add(op) self.graph_apis(use_cuda=True, for_ci=True)
backup_graph.draw('.', 'backup', backup_marked_nodes)
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
新手
引导
客服 返回
顶部