未验证 提交 b7cac50b 编写于 作者: Y Yiqun Liu 提交者: GitHub

Implement a common python unittest to test the ir passes. (#22209)

* Implement a common python unittest to test the ir passes.
test=develop

* Save the results in np.array and support to startup on CPU.
test=develop

* Fix the unittest.
test=develop

* Add check_program to check whether the optimized program is different from the origin one.
test=develop

* Remove the inferface all_ops.
test=develop

* Add exception test in pass_test.
test=develop
上级 99f5907e
......@@ -165,4 +165,5 @@ int FCFusePass::ApplyFCPattern(Graph* graph, bool with_relu) const {
} // namespace framework
} // namespace paddle
REGISTER_PASS(fc_fuse_pass, paddle::framework::ir::FCFusePass);
REGISTER_PASS(fc_fuse_pass, paddle::framework::ir::FCFusePass)
.RequirePassAttr("use_gpu");
......@@ -27,12 +27,15 @@ Graph* Pass::Apply(Graph* graph) const {
CheckPrevPass();
PADDLE_ENFORCE(graph, "graph passed to Pass::Apply() cannot be empty.");
for (const std::string& attr : required_pass_attrs_) {
PADDLE_ENFORCE(attrs_.find(attr) != attrs_.end(),
"Required pass atrribute %s not set.", attr);
PADDLE_ENFORCE_NE(
attrs_.find(attr), attrs_.end(),
platform::errors::InvalidArgument(
"Required atrribute %s for pass < %s > is not set.", attr, Type()));
}
for (const std::string& attr : required_graph_attrs_) {
PADDLE_ENFORCE(graph->Has(attr), "Required graph atrribute %s not set.",
attr);
PADDLE_ENFORCE_EQ(graph->Has(attr), true,
platform::errors::InvalidArgument(
"Required atrribute %s for graph is not set.", attr));
}
ApplyImpl(graph);
// TODO(panyx0718): Add more verifications.
......
......@@ -60,10 +60,25 @@ class Pass {
try {
return *boost::any_cast<AttrType *>(attrs_.at(attr_name));
} catch (boost::bad_any_cast &) {
PADDLE_THROW(
"Invalid attribute type of %s error, expected: %s, actual: %s",
attr_name, typeid(AttrType *).name(),
attrs_.at(attr_name).type().name());
auto TypeToString = [](const std::type_info &info) -> std::string {
if (std::type_index(info) == std::type_index(typeid(bool *))) {
return "bool";
} else if (std::type_index(info) == std::type_index(typeid(int *))) {
return "int";
} else if (std::type_index(info) ==
std::type_index(typeid(const int *))) {
return "const int";
} else if (std::type_index(info) ==
std::type_index(typeid(std::string *))) {
return "std::string";
}
return info.name();
};
PADDLE_THROW(platform::errors::InvalidArgument(
"Invalid type for attritube %s, expected: %s, actual: %s", attr_name,
TypeToString(typeid(AttrType *)),
TypeToString(attrs_.at(attr_name).type())));
}
}
......
......@@ -63,18 +63,38 @@ TEST(PassTest, TestPassAttrCheck) {
} catch (paddle::platform::EnforceNotMet& e) {
exception = std::string(e.what());
}
ASSERT_TRUE(exception.find("test_pass_attr not set") != exception.npos);
ASSERT_TRUE(exception.find("Required atrribute test_pass_attr for pass < "
"test_pass > is not set") != exception.npos);
int val = 1;
graph.reset(new Graph(prog));
pass->SetNotOwned<int>("test_pass_attr", &val);
for (std::string try_type : {"bool", "const int", "std::string"}) {
try {
if (try_type == "bool") {
pass->Get<bool>("test_pass_attr");
} else if (try_type == "const int") {
pass->Get<const int>("test_pass_attr");
} else if (try_type == "std::string") {
pass->Get<std::string>("test_pass_attr");
}
} catch (paddle::platform::EnforceNotMet& e) {
exception = std::string(e.what());
}
std::string msg = "Invalid type for attritube test_pass_attr, expected: " +
try_type + ", actual: int";
ASSERT_TRUE(exception.find(msg) != exception.npos);
}
try {
graph.reset(pass->Apply(graph.release()));
} catch (paddle::platform::EnforceNotMet& e) {
exception = std::string(e.what());
}
ASSERT_TRUE(exception.find("test_graph_attr not set") != exception.npos);
ASSERT_TRUE(exception.find(
"Required atrribute test_graph_attr for graph is not set") !=
exception.npos);
graph.reset(new Graph(prog));
graph->Set<int>("test_graph_attr", new int);
......
......@@ -1597,6 +1597,8 @@ All parameter, weight, gradient are variables in Paddle.
[](ir::Pass &self, const std::string &name, const std::string &attr) {
self.Set<std::string>(name, new std::string(attr));
})
.def("set", [](ir::Pass &self, const std::string &name,
bool val) { self.Set<bool>(name, new bool(val)); })
.def("set", [](ir::Pass &self, const std::string &name,
int val) { self.Set<const int>(name, new int(val)); })
.def("set",
......
......@@ -336,6 +336,8 @@ if (WITH_MKLDNN)
add_subdirectory(mkldnn)
endif()
add_subdirectory(ir)
if (WITH_TESTING)
set_property(TEST test_parallel_executor_mnist PROPERTY ENVIRONMENT GLOG_vmodule=all_reduce_deps_pass=10)
endif()
......
file(GLOB TEST_IR_PASSES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py")
string(REPLACE ".py" "" TEST_IR_PASSES "${TEST_IR_PASSES}")
foreach(target ${TEST_IR_PASSES})
py_test_modules(${target} MODULES ${target})
endforeach()
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import six
import random
import unittest
import warnings
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.framework import Program, Block
from paddle.fluid.backward import append_backward
class PassTest(unittest.TestCase):
@classmethod
def setUpClass(self):
self.main_program = fluid.Program()
self.startup_program = fluid.Program()
self.feeds = None
self.fetch_list = None
self.pass_names = None
self.pass_attrs = {}
self.fused_op_type = None
self.num_fused_ops = -1
np.random.seed(123)
random.seed(124)
def _get_places(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
return places
def check_output(self, startup_on_cpu=False, atol=1e-5):
'''
Check whether the fetched outputs of the origin program and the
optimized program are the same.
For inference model, the parameters are loaded to CPUPlace first,
after apply all specified passes, then copy the parameters to GPUPlace.
We can set startup_on_cpu to True to test inference pass.
'''
places = self._get_places()
for place in places:
self.check_output_with_place(place, startup_on_cpu, atol)
def _run_program(self, executor, program):
outs = executor.run(program=program,
feed=self.feeds,
fetch_list=self.fetch_list,
return_numpy=False)
outs_np = []
outs_lod = []
for out in outs:
outs_np.append(np.array(out))
outs_lod.append(out.lod())
return outs_np, outs_lod
def _apply_ir_passes(self):
graph = core.Graph(self.main_program.desc)
graph.set_not_owned("__param_scope__", fluid.global_scope())
if not isinstance(self.pass_names, list):
self.pass_names = [self.pass_names]
pass_builder = core.PassBuilder()
for name in self.pass_names:
ir_pass = pass_builder.append_pass(name)
# Set attr for pass
if self.pass_attrs.get(name, None) is not None:
attrs = self.pass_attrs[name]
for key in attrs:
ir_pass.set(key, attrs[key])
trans_pass = pass_builder.append_pass("graph_to_program_pass")
opt_program = fluid.Program()
trans_pass.set_not_owned("program", opt_program.desc)
for p in pass_builder.all_passes():
p.apply(graph)
opt_program.blocks = [
Block(opt_program, i)
for i in six.moves.range(opt_program.desc.num_blocks())
]
opt_program._sync_with_cpp()
return opt_program
def check_output_with_place(self, place, startup_on_cpu=False, atol=1e-5):
'''
Check whether the fetched outputs of the origin program and the
optimized program are the same.
For inference model, the parameters are loaded to CPUPlace first,
after apply all specified passes, then copy the parameters to GPUPlace.
We can set startup_on_cpu to True to test inference pass.
'''
executor = fluid.Executor(place)
if startup_on_cpu:
# Initialize parameters on CPU
cpu_executor = fluid.Executor(fluid.CPUPlace())
cpu_executor.run(self.startup_program)
outs, lods = self._run_program(cpu_executor, self.main_program)
else:
executor.run(self.startup_program)
outs, lods = self._run_program(executor, self.main_program)
self.assertTrue(
len(self.fetch_list) == len(outs),
"Checking the number of fetchs failed. Expected: {}, Received: {}".
format(len(self.fetch_list), len(outs)))
# Parameters may be changed in ir passes.
opt_program = self._apply_ir_passes()
self.check_program(opt_program)
if startup_on_cpu and not isinstance(place, fluid.CPUPlace):
warnings.warn(
"Parameters are on CPU, and will be transfered to GPU "
"automatically by data transform.")
outs_opt, lods_opt = self._run_program(executor, opt_program)
self.assertTrue(
len(self.fetch_list) == len(outs_opt),
"Checking the number of fetchs failed. Expected: {}, Received: {}".
format(len(self.fetch_list), len(outs_opt)))
for i in six.moves.xrange(len(self.fetch_list)):
self.assertTrue(
np.allclose(
outs_opt[i], outs[i], atol=atol),
"Output < {} > has diff at {}".format(self.fetch_list[i].name,
str(place)))
def _check_fused_ops(self, program):
'''
Check the number of specified fused op is equal to the the expected
number.
'''
if self.fused_op_type is None or self.num_fused_ops < 0:
return
if program is None or program == self.main_program:
program = self._apply_ir_passes()
acctual_num_fused_ops = 0
# Ir passes can only be applyed to block 0.
for op in program.block(0).ops:
if op.type == self.fused_op_type:
acctual_num_fused_ops += 1
self.assertTrue(
self.num_fused_ops == acctual_num_fused_ops,
"Checking of the number of fused operator < {} > failed. "
"Expected: {}, Received: {}".format(
self.fused_op_type, self.num_fused_ops, acctual_num_fused_ops))
def check_program(self, program=None):
'''
Check whether the optimized program is different from the origin
program.
'''
if program is None or program == self.main_program:
program = self._apply_ir_passes()
self._check_fused_ops(program)
self.assertTrue(
self.main_program.desc != program.desc,
"The optimized program and the origin main_program hold the same "
"desc.")
self.assertTrue(
self.main_program.num_blocks == program.num_blocks,
"The number of blocks of the origin program and the optimized "
"program are different ({} vs {}).".format(
self.main_program.num_blocks, program.num_blocks))
is_different = False
for i in six.moves.xrange(program.num_blocks):
if len(self.main_program.block(i).ops) != len(program.block(i).ops):
# The number of ops in the block i of the origin program and
# the optimized program is different.
is_different = True
break
# If there are different ops between the origin and optimized program.
for op in self.main_program.block(i).ops:
if not self._find_op(op, program, i):
is_different = True
break
if len(self.main_program.block(i).vars) != len(
program.block(i).vars):
# The number of vars in the block i of the origin program and
# the optimized program is different.
is_different = True
break
# If there are different vars between the origin and optimized program.
for name in self.main_program.block(i).vars:
var = self.main_program.block(i).var(name)
if not self._find_var(var, program, i):
is_different = True
break
self.assertTrue(
is_different,
"The optimized program is logically the same with the origin "
"program.")
def _find_op(self, specified_op, program, block_id):
is_find = False
for op in program.block(block_id).ops:
if specified_op.type == op.type:
for name in op.input_names:
if op.input(name) != specified_op.input(name):
break
for name in op.output_names:
if op.output(name) != specified_op.output(name):
break
for name in op.attr_names:
if op.attr(name) != specified_op.attr(name):
break
is_find = True
break
return is_find
def _find_var(self, specified_var, program, block_id):
if not program.block(block_id).has_var(specified_var.name):
return False
var = program.block(block_id).var(specified_var.name)
if var.type != specified_var.type:
return False
if var.dtype != specified_var.dtype:
return False
if var.lod_level != specified_var.lod_level:
return False
if var.shape != specified_var.shape:
return False
if var.persistable != specified_var.persistable:
return False
return True
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from pass_test import PassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
class FCFusePassTest(PassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[32, 128], dtype="float32", lod_level=0)
tmp_0 = fluid.layers.fc(input=data,
size=128,
num_flatten_dims=1,
act="relu")
tmp_1 = fluid.layers.fc(input=tmp_0, size=32, num_flatten_dims=1)
tmp_2 = fluid.layers.softmax(input=tmp_1)
self.feeds = {"data": np.random.random((32, 128)).astype("float32")}
self.fetch_list = [tmp_0, tmp_1, tmp_2]
self.pass_names = "fc_fuse_pass"
self.fused_op_type = "fc"
self.num_fused_ops = 2
def test_check_output(self):
use_gpu_set = [False]
if core.is_compiled_with_cuda():
use_gpu_set.append(True)
for use_gpu in use_gpu_set:
self.pass_attrs = {"fc_fuse_pass": {"use_gpu": use_gpu}}
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
self.check_output_with_place(place, startup_on_cpu=True)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册