提交 316bc9bf 编写于 作者: T Tao Luo

fix typo and warning in analyzer_resnet50_test

test=develop
上级 5d6783f8
......@@ -42,12 +42,10 @@ if(WITH_MKLDNN)
pass_library(mkldnn_placement_pass base)
pass_library(conv_bias_mkldnn_fuse_pass inference)
pass_library(conv_relu_mkldnn_fuse_pass inference)
pass_library(conv_elementwise_add_mkldnn_fuse_pass inference)
endif()
cc_library(fuse_elewise_add_act_pass SRCS fuse_elewise_add_act_pass.cc DEPS pass graph_pattern_detector )
if(WITH_MKLDNN)
pass_library(conv_elementwise_add_mkldnn_fuse_pass inference)
endif()
set(GLOB_PASS_LIB ${PASS_LIBRARY} CACHE INTERNAL "Global PASS library")
......
......@@ -200,15 +200,15 @@ TEST(GraphHelperTest, GraphNum) {
Graph g(prog);
BuildZeroGraph(&g);
ASSERT_EQ(GraphNum(g), 0);
ASSERT_EQ(GraphNum(g), 0UL);
Graph g2(prog);
BuildOneGraph(&g2);
ASSERT_EQ(GraphNum(g2), 1);
ASSERT_EQ(GraphNum(g2), 1UL);
Graph g3(prog);
BuildTwoGraphs(&g3);
ASSERT_EQ(GraphNum(g3), 2);
ASSERT_EQ(GraphNum(g3), 2UL);
}
} // namespace ir
......
......@@ -124,7 +124,7 @@ TEST(GraphTest, Basic) {
ASSERT_EQ(n->outputs.size(), 0UL);
}
}
ASSERT_EQ(nodes.size(), 5);
ASSERT_EQ(nodes.size(), 5UL);
}
TEST(GraphTest, WriteAfterRead) {
......
......@@ -103,7 +103,7 @@ TEST(ProgramDesc, copy_ctor) {
ASSERT_EQ(1, op->GetBlockAttrId("sub_block"));
found_sub_block = true;
ASSERT_EQ(2, op->GetBlocksAttrIds("sub_blocks").size());
ASSERT_EQ(2UL, op->GetBlocksAttrIds("sub_blocks").size());
found_sub_blocks = true;
}
}
......
......@@ -40,7 +40,7 @@ TEST(READER, decorate_chain) {
auto endpoints = root->GetEndPoints();
ASSERT_EQ(endpoints.size(), 2U);
ASSERT_NE(endpoints.count(end_point1.get()), 0UL);
ASSERT_NE(endpoints.count(end_point2.get()), 0);
ASSERT_NE(endpoints.count(end_point2.get()), 0UL);
}
{
......
......@@ -71,7 +71,7 @@ void profile(bool use_mkldnn = false) {
}
TEST(Analyzer_resnet50, profile) { profile(); }
#ifndef PADDLE_WITH_MKLDNN
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_resnet50, profile_mkldnn) { profile(true /* use_mkldnn */); }
#endif
......
......@@ -50,7 +50,7 @@ void CompareResult(const std::vector<PaddleTensor> &outputs,
auto &ref_out = ref_outputs[i];
size_t size = VecReduceToInt(out.shape);
size_t ref_size = VecReduceToInt(ref_out.shape);
EXPECT_GT(size, 0);
EXPECT_GT(size, 0UL);
EXPECT_EQ(size, ref_size);
EXPECT_EQ(out.dtype, ref_out.dtype);
switch (out.dtype) {
......
......@@ -237,7 +237,7 @@ TEST(BlockingQueue, speed_test_mode) {
}
for (size_t i = 0; i < queue_size; ++i) {
q2.Receive(&b);
EXPECT_EQ(b, 0);
EXPECT_EQ(b, 0UL);
}
EXPECT_EQ(q2.Size(), queue_size);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册