未验证 提交 aa8cef4a 编写于 作者: P Paulina Gacek 提交者: GitHub

Rewrite conv testers from cpp to python (#49582)

* conv_bias_mkldnn_fuse_pass_tester rewritten

* conv_concat_relu_mkldnn_fuse_pass_tester rewritten

* conv_elementwise_add_fuse_pass_tester rewritten

* mkldnn changed to onednn

* tests added to cmakeLists, style fix

* got rid of unnecessary UT, some style changes

* changes in naming convention

* max_examples reduced

* time out added
上级 50c43dd3
......@@ -387,22 +387,10 @@ if(WITH_MKLDNN)
test_depthwise_conv_mkldnn_pass
SRCS mkldnn/depthwise_conv_mkldnn_pass_tester.cc
DEPS depthwise_conv_mkldnn_pass)
cc_test(
test_conv_bias_mkldnn_fuse_pass_cc
SRCS mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc
DEPS conv_bias_mkldnn_fuse_pass naive_executor)
cc_test(
test_conv_activation_mkldnn_fuse_pass
SRCS mkldnn/conv_activation_mkldnn_fuse_pass_tester.cc
DEPS conv_activation_mkldnn_fuse_pass)
cc_test(
test_conv_concat_relu_mkldnn_fuse_pass
SRCS mkldnn/conv_concat_relu_mkldnn_fuse_pass_tester.cc
DEPS conv_activation_mkldnn_fuse_pass)
cc_test_old(
test_conv_elementwise_add_mkldnn_fuse_pass SRCS
mkldnn/conv_elementwise_add_mkldnn_fuse_pass_tester.cc DEPS
conv_elementwise_add_mkldnn_fuse_pass pass_test_util)
cc_test_old(
test_int8_scale_calculation_mkldnn_pass SRCS
mkldnn/int8_scale_calculation_mkldnn_pass_tester.cc DEPS
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.h"
#include "paddle/fluid/framework/naive_executor.h"
#include "paddle/fluid/framework/op_proto_maker.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/imperative/type_defs.h"
#include "paddle/phi/common/place.h"
namespace paddle {
namespace framework {
namespace ir {
void SetOp(ProgramDesc* prog,
const std::string& type,
const std::string& name,
const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs) {
auto* op = prog->MutableBlock(0)->AppendOp();
op->SetType(type);
if (type == "conv2d") {
const std::vector<int> strides({1, 1});
const std::vector<int> paddings({0, 0});
const std::vector<int> dilations({1, 1});
op->SetAttr("use_mkldnn", true);
op->SetAttr("name", name);
op->SetAttr("strides", strides);
op->SetAttr("groups", 1);
op->SetAttr("paddings", paddings);
op->SetAttr("padding_algorithm", std::string("EXPLICIT"));
op->SetAttr("dilations", dilations);
op->SetAttr("data_format", std::string("NCHW"));
op->SetOutput("Output", outputs);
op->SetInput("Input", {inputs[0]});
op->SetInput("Filter", {inputs[1]});
if (inputs.size() > 2)
op->SetInput("Bias", {inputs[2]});
else
op->SetInput("Bias", {});
} else if (type == "elementwise_add") {
op->SetAttr("use_mkldnn", true);
op->SetAttr("axis", 1);
op->SetInput("X", {inputs[0]});
op->SetInput("Y", {inputs[1]});
op->SetOutput("Out", outputs);
}
op->SetAttr(OpProtoAndCheckerMaker::OpRoleAttrName(),
static_cast<int>(OpRole::kForward));
}
// (c, weights)->conv->f
// (f)->elementwise_add->g
ProgramDesc BuildProgramDesc(bool convWithExistingBias) {
ProgramDesc prog;
std::vector<std::string> nodes{"c", "weights", "f", "eltwise_bias", "g"};
if (convWithExistingBias) nodes.push_back("conv_bias");
for (auto& v : nodes) {
auto* var = prog.MutableBlock(0)->Var(v);
var->SetType(proto::VarType::LOD_TENSOR);
if (v == "weights" || v == "conv_bias" || v == "eltwise_bias") {
var->SetPersistable(true);
}
}
// conv+bias, both with MKL-DNN
if (convWithExistingBias) {
SetOp(&prog,
"conv2d",
"conv",
std::vector<std::string>({"c", "weights", "conv_bias"}),
std::vector<std::string>({"f"}));
} else {
SetOp(&prog,
"conv2d",
"conv",
std::vector<std::string>({"c", "weights"}),
std::vector<std::string>({"f"}));
}
SetOp(&prog,
"elementwise_add",
"eltwise",
std::vector<std::string>({"f", "eltwise_bias"}),
std::vector<std::string>({"g"}));
return prog;
}
void InitTensorHolder(Scope* scope,
const paddle::platform::Place& place,
const char* var_name) {
auto x = scope->Var(var_name);
auto tensor = x->GetMutable<phi::DenseTensor>();
tensor->mutable_data(
place, framework::TransToPhiDataType(proto::VarType::FP32), 1);
}
void MainTest(bool convWithExistingBias) {
auto prog = BuildProgramDesc(convWithExistingBias);
std::unique_ptr<ir::Graph> graph(new ir::Graph(prog));
auto place = phi::CPUPlace();
NaiveExecutor exe{place};
Scope scope;
// Init scope, as it is used in pass
exe.CreateVariables(prog, 0, true, &scope);
if (convWithExistingBias) {
InitTensorHolder(&scope, place, "conv_bias");
InitTensorHolder(&scope, place, "eltwise_bias");
}
graph->SetNotOwned(kParamScopeAttr, &scope);
auto pass = PassRegistry::Instance().Get("conv_bias_mkldnn_fuse_pass");
int original_nodes_num = graph->Nodes().size();
graph.reset(pass->Apply(graph.release()));
int current_nodes_num = graph->Nodes().size();
// Remove 3 Nodes: Conv, Bias, conv_out
// Add 1 Node: ConvBias
EXPECT_EQ(original_nodes_num - 2, current_nodes_num);
// Assert conv_bias op in newly generated graph
int conv_bias_count = 0;
for (auto* node : graph->Nodes()) {
if (node->IsOp() && (node->Op()->Type() == "conv2d" ||
node->Op()->Type() == "fused_conv2d")) {
auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
// check if "conv" convolution is fused
auto op_name = PADDLE_GET_CONST(std::string, op->GetAttr("name"));
if (op_name == "conv") {
auto input_names = op->InputNames();
ASSERT_TRUE(std::find(input_names.begin(), input_names.end(), "Bias") !=
input_names.end());
auto bias = op->Input("Bias");
if (bias.size()) {
++conv_bias_count;
}
}
}
}
EXPECT_EQ(conv_bias_count, 1);
}
TEST(ConvBiasFusePass, bias_free_conv) { MainTest(false); }
TEST(ConvBiasFusePass, conv_with_existing_bias) { MainTest(true); }
TEST(ConvBiasFusePass, conv3d) {
Conv3DBiasFusePass pass;
ASSERT_EQ(pass.type(), std::string("conv3d"));
}
TEST(ConvBiasFusePass, conv2d_transpose) {
Conv2DTransposeBiasFusePass pass;
ASSERT_EQ(pass.type(), std::string("conv2d_transpose"));
}
TEST(ConvBiasFusePass, pass_op_version_check) {
ASSERT_TRUE(
paddle::framework::compatible::PassVersionCheckerRegistrar::GetInstance()
.IsPassCompatible("conv_bias_mkldnn_fuse_pass"));
}
} // namespace ir
} // namespace framework
} // namespace paddle
USE_PASS(conv_bias_mkldnn_fuse_pass);
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.h"
#include "paddle/fluid/framework/op_proto_maker.h"
namespace paddle {
namespace framework {
namespace ir {
void SetOp(ProgramDesc* prog,
const std::string& type,
const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs,
bool use_mkldnn = true) {
auto* op = prog->MutableBlock(0)->AppendOp();
op->SetType(type);
if (type == "conv2d") {
op->SetAttr("use_mkldnn", use_mkldnn);
op->SetAttr("fuse_activation", std::string(""));
op->SetInput("Input", {inputs[0]});
op->SetInput("Filter", {inputs[1]});
if (inputs.size() > 2) {
op->SetInput("Bias", {inputs[2]});
}
op->SetOutput("Output", outputs);
} else if (type == "relu") {
op->SetAttr("use_mkldnn", use_mkldnn);
op->SetInput("X", inputs);
op->SetOutput("Out", outputs);
} else if (type == "pool2d") {
op->SetAttr("use_mkldnn", use_mkldnn);
op->SetInput("X", inputs);
op->SetOutput("Out", outputs);
} else if (type == "concat") {
op->SetAttr("use_mkldnn", use_mkldnn);
op->SetAttr("axis", 0);
op->SetInput("X", inputs);
op->SetOutput("Out", outputs);
}
op->SetAttr(OpProtoAndCheckerMaker::OpRoleAttrName(),
static_cast<int>(OpRole::kForward));
}
// (a1,w1)->conv1->c1
// (a2,w2,b2)->conv2->c2
// if put_only_convs_before_concat=true
// (a3,w3)->conv3->c3
// else
// a3->pool1->c3
//
// (c1,c2,c3)->concat1->d
// d->relu1->e
ProgramDesc BuildProgramDesc(bool put_only_convs_before_concat,
bool all_convs_use_mkldnn) {
ProgramDesc prog;
for (auto& v : std::initializer_list<std::string>({"a1",
"w1",
"c1",
"a2",
"w2",
"b2",
"c2",
"a3",
"w3",
"c3",
"d",
"e"})) {
auto* var = prog.MutableBlock(0)->Var(v);
var->SetType(proto::VarType::SELECTED_ROWS);
if (v.find("w") == 0 || v.find("b") == 0) {
var->SetPersistable(true);
}
}
SetOp(&prog, "conv2d", {"a1", "w1", "b1"}, {"c1"}, all_convs_use_mkldnn);
SetOp(&prog, "conv2d", {"a2", "w2", "b2"}, {"c2"});
if (put_only_convs_before_concat) {
SetOp(&prog, "conv2d", {"a3", "w3", "b3"}, {"c3"});
} else {
SetOp(&prog, "pool2d", {"a3"}, {"c3"});
}
SetOp(&prog, "concat", {"c1", "c2", "c3"}, {"d"});
SetOp(&prog, "relu", {"d"}, {"e"});
return prog;
}
void MainTest(const ProgramDesc& prog, bool fuse_relu) {
std::unique_ptr<ir::Graph> graph(new ir::Graph(prog));
int original_nodes_num = graph->Nodes().size();
auto pass = PassRegistry::Instance().Get("conv_activation_mkldnn_fuse_pass");
graph.reset(pass->Apply(graph.release()));
int current_nodes_num = graph->Nodes().size();
if (fuse_relu) {
// Remove 2 nodes: concat_out, relu
EXPECT_EQ(original_nodes_num - 2, current_nodes_num);
} else {
EXPECT_EQ(original_nodes_num, current_nodes_num);
}
int relu_count = 0;
for (auto* node : graph->Nodes()) {
if (node->IsOp()) {
auto* op = node->Op();
if (op->Type() == "conv2d") {
ASSERT_TRUE(op->HasAttr("fuse_activation"));
bool fuse_relu_attr =
(PADDLE_GET_CONST(std::string, op->GetAttr("fuse_activation")) ==
"relu");
EXPECT_EQ(fuse_relu, fuse_relu_attr);
} else if (op->Type() == "relu") {
relu_count++;
}
}
}
EXPECT_EQ(relu_count, fuse_relu ? 0 : 1);
}
TEST(ConvConcatReLUFusePass, only_convs_before_concat) {
bool all_convs_use_mkldnn = true;
bool put_only_convs_before_concat = true;
auto prog =
BuildProgramDesc(put_only_convs_before_concat, all_convs_use_mkldnn);
bool expect_relu_fuse = true;
MainTest(prog, expect_relu_fuse);
}
TEST(ConvConcatReLUFusePass, only_convs_before_concat_but_one_non_mkldnn) {
bool all_convs_use_mkldnn = false;
bool put_only_convs_before_concat = true;
auto prog =
BuildProgramDesc(put_only_convs_before_concat, all_convs_use_mkldnn);
bool expect_relu_fuse = false;
MainTest(prog, expect_relu_fuse);
}
TEST(ConvConcatReLUFusePass, convs_and_pool_before_concat) {
bool all_convs_use_mkldnn = true;
bool put_only_convs_before_concat = false;
auto prog =
BuildProgramDesc(put_only_convs_before_concat, all_convs_use_mkldnn);
bool expect_relu_fuse = false;
MainTest(prog, expect_relu_fuse);
}
} // namespace ir
} // namespace framework
} // namespace paddle
USE_PASS(conv_activation_mkldnn_fuse_pass);
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "paddle/fluid/framework/ir/mkldnn/conv_elementwise_add_mkldnn_fuse_pass.h"
#include "paddle/fluid/framework/ir/pass_test_util.h"
#include "paddle/fluid/framework/op_proto_maker.h"
#include "paddle/fluid/framework/op_version_registry.h"
namespace paddle {
namespace framework {
namespace ir {
constexpr int nodes_removed = 3;
constexpr int nodes_added = 1;
OpDesc* Create_Op_con2d(ProgramDesc* prog,
const std::string& op_type_name,
const std::vector<test::InOutVarNamePair>& inputs,
const std::vector<test::InOutVarNamePair>& outputs,
const bool use_mkldnn = true) {
auto* op = prog->MutableBlock(0)->AppendOp();
const std::vector<int> strides({1, 1});
const std::vector<int> paddings({0, 0});
const std::vector<int> dilations({1, 1});
op->SetType(op_type_name);
op->SetAttr("use_mkldnn", use_mkldnn);
op->SetAttr("strides", strides);
op->SetAttr("groups", 1);
op->SetAttr("paddings", paddings);
op->SetAttr("padding_algorithm", std::string("EXPLICIT"));
op->SetAttr("dilations", dilations);
op->SetAttr("data_format", std::string("NCHW"));
for (const auto& input : inputs) {
op->SetInput(input.first, {input.second});
}
for (const auto& output : outputs) {
op->SetOutput(output.first, {output.second});
}
op->SetAttr(OpProtoAndCheckerMaker::OpRoleAttrName(),
static_cast<int>(OpRole::kForward));
return op;
}
OpDesc* Create_Op_elemntwise_add(
ProgramDesc* prog,
const std::string& op_type_name,
const std::vector<test::InOutVarNamePair>& inputs,
const std::vector<test::InOutVarNamePair>& outputs,
bool use_mkldnn = true) {
auto* op = prog->MutableBlock(0)->AppendOp();
op->SetType(op_type_name);
op->SetAttr("use_mkldnn", use_mkldnn);
op->SetAttr("axis", -1);
for (const auto& input : inputs) {
op->SetInput(input.first, {input.second});
}
for (const auto& output : outputs) {
op->SetOutput(output.first, {output.second});
}
op->SetAttr(OpProtoAndCheckerMaker::OpRoleAttrName(),
static_cast<int>(OpRole::kForward));
return op;
}
TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionAsYWithElementwiseAddRelu) {
auto prog =
test::BuildProgramDesc({"a", "b", "c", "d", "e"}, {"bias", "weights"});
test::CreateOp(&prog, "sigmoid", {{"X", "a"}}, {{"Out", "b"}});
Create_Op_con2d(&prog,
"conv2d",
{{"Input", "b"}, {"Bias", "bias"}, {"Filter", "weights"}},
{{"Output", "c"}});
Create_Op_elemntwise_add(
&prog, "elementwise_add", {{"X", "a"}, {"Y", "c"}}, {{"Out", "d"}});
test::CreateOp(&prog, "relu", {{"X", "d"}}, {{"Out", "e"}});
Graph graph(prog);
EXPECT_TRUE(test::RunPassAndAssert(&graph,
"conv_elementwise_add_mkldnn_fuse_pass",
"a",
"relu",
nodes_removed,
nodes_added));
EXPECT_TRUE(test::AssertOpsCount(
graph, {{"fused_conv2d", 1}, {"elementwise_add", 0}}));
}
TEST(ConvElementwiseAddMKLDNNFusePass,
ConvolutionProjectionAsYWithElementwiseAddRelu) {
auto prog = test::BuildProgramDesc({"a", "b", "c", "d", "e", "f"},
{"bias", "weights", "bias2", "weights2"});
test::CreateOp(&prog, "sigmoid", {{"X", "a"}}, {{"Out", "b"}});
// right branch
Create_Op_con2d(&prog,
"conv2d",
{{"Input", "b"}, {"Bias", "bias"}, {"Filter", "weights"}},
{{"Output", "c"}});
// left branch
Create_Op_con2d(&prog,
"conv2d",
{{"Input", "a"}, {"Bias", "bias2"}, {"Filter", "weights2"}},
{{"Output", "f"}});
Create_Op_elemntwise_add(
&prog, "elementwise_add", {{"X", "f"}, {"Y", "c"}}, {{"Out", "d"}});
test::CreateOp(&prog, "relu", {{"X", "d"}}, {{"Out", "e"}});
Graph graph(prog);
EXPECT_TRUE(test::RunPassAndAssert(&graph,
"conv_elementwise_add_mkldnn_fuse_pass",
"a",
"relu",
nodes_removed,
nodes_added));
EXPECT_TRUE(test::AssertOpsCount(
graph, {{"conv2d", 1}, {"fused_conv2d", 1}, {"elementwise_add", 0}}));
}
TEST(ConvElementwiseAddMKLDNNFusePass,
ConvolutionAsYWithElementwiseAddReluNoBias) {
auto prog = test::BuildProgramDesc({"a", "b", "c", "d", "e"}, {"weights"});
test::CreateOp(&prog, "sigmoid", {{"X", "a"}}, {{"Out", "b"}});
Create_Op_con2d(&prog,
"conv2d",
{{"Input", "b"}, {"Filter", "weights"}},
{{"Output", "c"}});
Create_Op_elemntwise_add(
&prog, "elementwise_add", {{"X", "a"}, {"Y", "c"}}, {{"Out", "d"}});
test::CreateOp(&prog, "relu", {{"X", "d"}}, {{"Out", "e"}});
Graph graph(prog);
EXPECT_TRUE(test::RunPassAndAssert(&graph,
"conv_elementwise_add_mkldnn_fuse_pass",
"a",
"relu",
nodes_removed,
nodes_added));
EXPECT_TRUE(test::AssertOpsCount(
graph, {{"fused_conv2d", 1}, {"elementwise_add", 0}}));
}
TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionAsXWithElementwiseAddRelu) {
auto prog =
test::BuildProgramDesc({"a", "b", "c", "d", "e"}, {"bias", "weights"});
test::CreateOp(&prog, "sigmoid", {{"X", "a"}}, {{"Out", "b"}});
Create_Op_con2d(&prog,
"conv2d",
{{"Input", "b"}, {"Bias", "bias"}, {"Filter", "weights"}},
{{"Output", "c"}});
Create_Op_elemntwise_add(
&prog, "elementwise_add", {{"X", "c"}, {"Y", "a"}}, {{"Out", "d"}});
test::CreateOp(&prog, "relu", {{"X", "d"}}, {{"Out", "e"}});
Graph graph(prog);
EXPECT_TRUE(test::RunPassAndAssert(&graph,
"conv_elementwise_add_mkldnn_fuse_pass",
"a",
"relu",
nodes_removed,
nodes_added));
EXPECT_TRUE(test::AssertOpsCount(
graph, {{"fused_conv2d", 1}, {"elementwise_add", 0}}));
}
TEST(ConvElementwiseAddMKLDNNFusePass,
ConvolutionAsXWithElementwiseAddReluNoBias) {
auto prog = test::BuildProgramDesc({"a", "b", "c", "d", "e"}, {"weights"});
test::CreateOp(&prog, "sigmoid", {{"X", "a"}}, {{"Out", "b"}});
Create_Op_con2d(&prog,
"conv2d",
{{"Input", "b"}, {"Filter", "weights"}},
{{"Output", "c"}});
Create_Op_elemntwise_add(
&prog, "elementwise_add", {{"X", "c"}, {"Y", "a"}}, {{"Out", "d"}});
test::CreateOp(&prog, "relu", {{"X", "d"}}, {{"Out", "e"}});
Graph graph(prog);
EXPECT_TRUE(test::RunPassAndAssert(&graph,
"conv_elementwise_add_mkldnn_fuse_pass",
"a",
"relu",
nodes_removed,
nodes_added));
EXPECT_TRUE(test::AssertOpsCount(
graph, {{"fused_conv2d", 1}, {"elementwise_add", 0}}));
}
TEST(ConvElementwiseAddMKLDNNFusePass, NoFusion) {
auto prog =
test::BuildProgramDesc({"a", "b", "c", "d", "e", "f", "g"}, {"weights"});
test::CreateOp(&prog, "sigmoid", {{"X", "a"}}, {{"Out", "b"}});
Create_Op_con2d(&prog,
"conv2d",
{{"Input", "b"}, {"Filter", "weights"}},
{{"Output", "c"}});
Create_Op_con2d(&prog,
"conv2d",
{{"Input", "d"}, {"Filter", "weights"}},
{{"Output", "e"}});
Create_Op_elemntwise_add(
&prog, "elementwise_add", {{"X", "c"}, {"Y", "e"}}, {{"Out", "f"}});
test::CreateOp(&prog, "relu", {{"X", "f"}}, {{"Out", "g"}});
Graph graph(prog);
EXPECT_TRUE(test::RunPassAndAssert(
&graph, "conv_elementwise_add_mkldnn_fuse_pass", "a", "g", 0, 0));
EXPECT_TRUE(
test::AssertOpsCount(graph, {{"conv2d", 2}, {"elementwise_add", 1}}));
}
TEST(ConvElementwiseAddMKLDNNFusePass, pass_op_version_check) {
ASSERT_TRUE(
paddle::framework::compatible::PassVersionCheckerRegistrar::GetInstance()
.IsPassCompatible("conv_elementwise_add_mkldnn_fuse_pass"));
}
} // namespace ir
} // namespace framework
} // namespace paddle
USE_PASS(conv_elementwise_add_mkldnn_fuse_pass);
......@@ -185,6 +185,9 @@ if(WITH_GPU AND TENSORRT_FOUND)
PROPERTIES TIMEOUT 120)
set_tests_properties(test_conv_elementwise_add_act_fuse_pass
PROPERTIES TIMEOUT 120)
set_tests_properties(test_onednn_conv_bias_fuse_pass PROPERTIES TIMEOUT 300)
set_tests_properties(test_onednn_conv_concat_activation_fuse_pass
PROPERTIES TIMEOUT 300)
set_tests_properties(test_flatten2_matmul_fuse_pass PROPERTIES TIMEOUT 240)
set_tests_properties(test_squeeze2_matmul_fuse_pass PROPERTIES TIMEOUT 240)
set_tests_properties(test_reshape2_matmul_fuse_pass PROPERTIES TIMEOUT 240)
......@@ -223,7 +226,7 @@ if(WITH_GPU AND TENSORRT_FOUND)
endif()
if(WITH_MKLDNN)
set_tests_properties(test_mkldnn_conv_elementwise_add_fuse_pass
set_tests_properties(test_onednn_conv_elementwise_add_fuse_pass
PROPERTIES TIMEOUT 120)
set_tests_properties(test_mkldnn_depthwise_conv_pass PROPERTIES TIMEOUT 120)
set_tests_properties(test_onednn_reshape_transpose_matmul_fuse_pass
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
from paddle.fluid.core import PassVersionChecker
# padding SAME
class ConvBiasMkldnnFusePassSamePadTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 100, 100], dtype="float32"
)
param_attr = fluid.ParamAttr(
initializer=paddle.nn.initializer.XavierNormal(),
learning_rate=0.001,
)
conv_out = paddle.static.nn.conv2d(
input=data,
num_filters=3,
filter_size=3,
padding="SAME",
bias_attr=param_attr,
)
self.feeds = {
"data": np.random.random((1, 3, 100, 100)).astype("float32")
}
self.fetch_list = [conv_out]
self.enable_mkldnn = True
def test_check_output(self):
use_gpu = False
self.check_output_with_option(use_gpu)
self.assertTrue(
PassVersionChecker.IsCompatible("conv_bias_mkldnn_fuse_pass")
)
# padding VALID
class ConvBiasMkldnnFusePassValidPadTest(ConvBiasMkldnnFusePassSamePadTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 100, 100], dtype="float32"
)
param_attr = fluid.ParamAttr(
initializer=paddle.nn.initializer.XavierNormal(),
learning_rate=0.001,
)
conv_out = paddle.static.nn.conv2d(
input=data,
num_filters=3,
filter_size=3,
padding="VALID",
bias_attr=param_attr,
)
self.feeds = {
"data": np.random.random((1, 3, 100, 100)).astype("float32")
}
self.fetch_list = [conv_out]
self.enable_mkldnn = True
# padding EXPLICT NUMBER
class ConvBiasMkldnnFusePassExplictPadTest(ConvBiasMkldnnFusePassSamePadTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 100, 100], dtype="float32"
)
param_attr = fluid.ParamAttr(
initializer=paddle.nn.initializer.XavierNormal(),
learning_rate=0.001,
)
conv_out = paddle.static.nn.conv2d(
input=data,
num_filters=3,
filter_size=3,
padding=[2, 4, 6, 8],
bias_attr=param_attr,
)
self.feeds = {
"data": np.random.random((1, 3, 100, 100)).astype("float32")
}
self.fetch_list = [conv_out]
self.enable_mkldnn = True
class ConvBiasMkldnnFusePassGroupTest(ConvBiasMkldnnFusePassSamePadTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 100, 100], dtype="float32"
)
param_attr = fluid.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform(),
learning_rate=0.001,
)
conv_out = paddle.static.nn.conv2d(
input=data,
num_filters=3,
filter_size=3,
padding="VALID",
groups=3,
bias_attr=param_attr,
use_cudnn=False,
act="softmax",
data_format="NCHW",
)
self.feeds = {
"data": np.random.random((1, 3, 100, 100)).astype("float32")
}
self.fetch_list = [conv_out]
self.enable_mkldnn = True
class ConvBiasMkldnnFusePassDialtionsGroupsTest(
ConvBiasMkldnnFusePassSamePadTest
):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 100, 100], dtype="float32"
)
param_attr = fluid.ParamAttr(
initializer=paddle.nn.initializer.XavierNormal(),
learning_rate=0.001,
)
conv_out = paddle.static.nn.conv2d(
input=data,
num_filters=3,
filter_size=3,
padding="VALID",
dilation=2,
groups=3,
bias_attr=param_attr,
use_cudnn=False,
act="softmax",
data_format="NCHW",
)
self.feeds = {
"data": np.random.random((1, 3, 100, 100)).astype("float32")
}
self.fetch_list = [conv_out]
self.enable_mkldnn = True
class ConvTransposeMkldnnFusePassDialtionsGroupsTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(name="data", shape=[-1, 3, 5, 5], dtype="float32")
param_attr = fluid.ParamAttr(
initializer=paddle.nn.initializer.XavierNormal(),
learning_rate=0.001,
)
conv_out = paddle.static.nn.conv2d_transpose(
input=data,
num_filters=3,
filter_size=3,
padding="SAME",
dilation=1,
bias_attr=param_attr,
use_cudnn=False,
)
self.feeds = {"data": np.random.random((1, 3, 5, 5)).astype("float32")}
self.fetch_list = [conv_out]
self.enable_mkldnn = True
def test_check_output(self):
use_gpu = False
self.check_output_with_option(use_gpu)
self.assertTrue(
PassVersionChecker.IsCompatible(
"conv_transpose_bias_mkldnn_fuse_pass"
)
)
if __name__ == "__main__":
unittest.main()
......@@ -19,67 +19,43 @@ from auto_scan_test import PassAutoScanTest
from program_config import OpConfig, ProgramConfig, TensorConfig
class TestConvBiasMkldnnFusePass(PassAutoScanTest):
r"""
x_var f_var(persistable)
\ /
conv2d
|
conv2d_var bias_var(persistable)
\ /
elementwise_add
|
elementwise_add_var
"""
class TestConvBiasOneDNNFusePass(PassAutoScanTest):
def sample_predictor_configs(self, program_config):
# MKLDNN
config = self.create_inference_config(use_gpu=False)
config.enable_mkldnn()
yield config, ["fused_conv2d"], (1e-4, 1e-5)
yield config, ['fused_conv2d'], (1e-4, 1e-5)
def is_program_valid(self, prog_config):
paddings = prog_config.ops[0].attrs["paddings"]
strides = prog_config.ops[0].attrs["strides"]
groups = prog_config.ops[0].attrs["groups"]
padding_algorithm = prog_config.ops[0].attrs["padding_algorithm"]
dilations = prog_config.ops[0].attrs["dilations"]
data_format = prog_config.ops[0].attrs["data_format"]
filter_shape = prog_config.weights["filter"].shape
input_shape = prog_config.inputs["input_x"].shape
if padding_algorithm == "VALID":
if (
(input_shape[2] - (dilations[0] * (filter_shape[2] - 1) + 1))
/ strides[0]
+ 1
) <= 1 or (
(input_shape[3] - (dilations[1] * (filter_shape[3] - 1) + 1))
/ strides[1]
+ 1
) <= 1:
paddings = prog_config.ops[0].attrs['paddings']
groups = prog_config.ops[0].attrs['groups']
padding_algorithm = prog_config.ops[0].attrs['padding_algorithm']
dilations = prog_config.ops[0].attrs['dilations']
data_format = prog_config.ops[0].attrs['data_format']
filter_shape = prog_config.weights['filter'].shape
input_shape = prog_config.inputs['input_x'].shape
height = input_shape[data_format.index('H')]
width = input_shape[data_format.index('W')]
if padding_algorithm == 'VALID':
if (height - (dilations[0] * (filter_shape[2] - 1) + 1) <= 0) or (
width - (dilations[1] * (filter_shape[3] - 1) + 1) <= 0
):
return False
if padding_algorithm == "EXPLICIT":
if padding_algorithm == 'EXPLICIT':
if (
(
input_shape[2]
height
+ paddings[0]
+ paddings[1]
- (dilations[0] * (filter_shape[2] - 1) + 1)
)
/ strides[0]
+ 1
) <= 1 or (
(
input_shape[3]
<= 0
) or (
width
+ paddings[2]
+ paddings[3]
- (dilations[1] * (filter_shape[3] - 1) + 1)
)
/ strides[1]
+ 1
) <= 1:
<= 0
):
return False
if data_format == "NCHW":
if data_format == 'NCHW':
if input_shape[1] != filter_shape[1] * groups:
return False
if filter_shape[0] % groups != 0:
......@@ -101,7 +77,7 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest):
x_shape[1] = draw(st.integers(min_value=5, max_value=10))
# 2. Generate legal attr:data_format of conv2d
data_format = draw(st.sampled_from(["NCHW", "NHWC"]))
data_format = draw(st.sampled_from(['NCHW', 'NHWC']))
# 3. Generate legal shape of input:Y of conv2d
f_shape = draw(
......@@ -109,7 +85,7 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest):
st.integers(min_value=1, max_value=4), min_size=4, max_size=4
)
)
if data_format == "NCHW":
if data_format == 'NCHW':
f_shape[1] = x_shape[1]
else:
f_shape[1] = x_shape[3]
......@@ -122,7 +98,7 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest):
)
# 5. Generate legal attr:padding_algorithm of conv2d
padding_algorithm = draw(st.sampled_from(["EXPLICIT", "SAME", "VALID"]))
padding_algorithm = draw(st.sampled_from(['EXPLICIT', 'SAME', 'VALID']))
# 6. Generate legal attr:padding of conv2d
padding = draw(
......@@ -146,7 +122,7 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest):
# 10. Generate legal shape of attr:axis of elementwise_add
axis = 1
if data_format == "NCHW":
if data_format == 'NCHW':
axis = 1
else:
axis = 3
......@@ -156,36 +132,36 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest):
inputs = dict()
weights = dict()
use_mkldnn = None
conv_type = "conv2d"
conv_type = 'conv2d'
if draw(st.booleans()):
conv_bias_shape = [f_shape[0]]
conv_type = "fused_conv2d"
conv_type = 'fused_conv2d'
inputs = {
"Input": ["input_x"],
"Filter": ["filter"],
"Bias": ["conv_bias"],
'Input': ['input_x'],
'Filter': ['filter'],
'Bias': ['conv_bias'],
}
weights = {
"filter": TensorConfig(shape=f_shape),
"bias": TensorConfig(shape=bias_shape),
"conv_bias": TensorConfig(shape=conv_bias_shape),
'filter': TensorConfig(shape=f_shape),
'bias': TensorConfig(shape=bias_shape),
'conv_bias': TensorConfig(shape=conv_bias_shape),
}
use_mkldnn = True
else:
inputs = {
"Input": ["input_x"],
"Filter": ["filter"],
'Input': ['input_x'],
'Filter': ['filter'],
}
weights = {
"filter": TensorConfig(shape=f_shape),
"bias": TensorConfig(shape=bias_shape),
'filter': TensorConfig(shape=f_shape),
'bias': TensorConfig(shape=bias_shape),
}
use_mkldnn = False
conv2d_op = OpConfig(
conv_type,
inputs=inputs,
outputs={"Output": ["conv2d_out"]},
outputs={'Output': ['conv2d_out']},
strides=strides,
padding_algorithm=padding_algorithm,
paddings=padding,
......@@ -196,9 +172,9 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest):
)
add_op = OpConfig(
"elementwise_add",
inputs={"X": ["conv2d_out"], "Y": ["bias"]},
outputs={"Out": ["add_out"]},
'elementwise_add',
inputs={'X': ['conv2d_out'], 'Y': ['bias']},
outputs={'Out': ['add_out']},
axis=axis,
)
......@@ -207,16 +183,16 @@ class TestConvBiasMkldnnFusePass(PassAutoScanTest):
program_config = ProgramConfig(
ops=ops,
weights=weights,
inputs={"input_x": TensorConfig(shape=x_shape)},
outputs=ops[-1].outputs["Out"],
inputs={'input_x': TensorConfig(shape=x_shape)},
outputs=ops[-1].outputs['Out'],
)
return program_config
def test(self):
self.run_and_statis(
quant=False, max_examples=350, passes=["conv_bias_mkldnn_fuse_pass"]
quant=False, passes=['conv_bias_mkldnn_fuse_pass'], max_examples=130
)
if __name__ == "__main__":
if __name__ == '__main__':
unittest.main()
......@@ -21,7 +21,7 @@ from auto_scan_test import PassAutoScanTest
from program_config import OpConfig, ProgramConfig, TensorConfig
class TestConvConcatActivationMkldnnFusePass(PassAutoScanTest):
class TestOneDNNConvConcatActivationFusePass(PassAutoScanTest):
def sample_program_config(self, draw):
data_format = draw(st.sampled_from(['NCHW', 'NHWC']))
dilations = draw(st.sampled_from([[2, 2]]))
......@@ -162,7 +162,9 @@ class TestConvConcatActivationMkldnnFusePass(PassAutoScanTest):
def test(self):
self.run_and_statis(
quant=False, passes=['conv_activation_mkldnn_fuse_pass']
quant=False,
passes=['conv_activation_mkldnn_fuse_pass'],
max_examples=50,
)
......
......@@ -21,22 +21,21 @@ from auto_scan_test import PassAutoScanTest
from program_config import OpConfig, ProgramConfig, TensorConfig
# the two inputs of elementwise_add are tensor
class TestConvElementwiseAddMkldnnFusePass(PassAutoScanTest):
class TestOneDNNConvElementwiseAddFusePass(PassAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
if attrs[1]['data_format'] == "NHWC" and attrs[3]['axis'] == 0:
if attrs[1]['data_format'] == 'NHWC' and attrs[3]['axis'] == 0:
return False
if attrs[1]['data_format'] == "NCHW" and attrs[3]['axis'] == -1:
if attrs[1]['data_format'] == 'NCHW' and attrs[3]['axis'] == -1:
return False
return True
def sample_program_config(self, draw):
data_format = draw(st.sampled_from(["NCHW", "NHWC"]))
data_format = draw(st.sampled_from(['NCHW', 'NHWC']))
dilations = draw(st.sampled_from([[1, 1], [2, 2], [1, 2]]))
padding_algorithm = draw(st.sampled_from(["EXPLICIT", "SAME", "VALID"]))
padding_algorithm = draw(st.sampled_from(['EXPLICIT', 'SAME', 'VALID']))
groups = draw(st.sampled_from([1, 2, 4]))
paddings = draw(st.sampled_from([[0, 3], [1, 1], [1, 2, 3, 4]]))
strides = draw(st.sampled_from([[1, 1], [2, 2], [1, 2]]))
......@@ -44,7 +43,7 @@ class TestConvElementwiseAddMkldnnFusePass(PassAutoScanTest):
batch_size = draw(st.integers(min_value=1, max_value=4))
def generate_input():
if data_format == "NCHW":
if data_format == 'NCHW':
return np.random.random([batch_size, 48, 64, 64]).astype(
np.float32
)
......@@ -59,44 +58,44 @@ class TestConvElementwiseAddMkldnnFusePass(PassAutoScanTest):
)
relu_op = OpConfig(
type="relu",
inputs={"X": ["input_data"]},
outputs={"Out": ["relu_out"]},
type='relu',
inputs={'X': ['input_data']},
outputs={'Out': ['relu_out']},
attrs={},
)
conv2d_op1 = OpConfig(
type="conv2d",
inputs={"Input": ["relu_out"], "Filter": ["conv_weight1"]},
outputs={"Output": ["conv_output1"]},
type='conv2d',
inputs={'Input': ['relu_out'], 'Filter': ['conv_weight1']},
outputs={'Output': ['conv_output1']},
attrs={
"data_format": data_format,
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
'data_format': data_format,
'dilations': dilations,
'padding_algorithm': padding_algorithm,
'groups': groups,
'paddings': paddings,
'strides': strides,
},
)
conv2d_op2 = OpConfig(
type="conv2d",
inputs={"Input": ["input_data"], "Filter": ["conv_weight2"]},
outputs={"Output": ["conv_output2"]},
type='conv2d',
inputs={'Input': ['input_data'], 'Filter': ['conv_weight2']},
outputs={'Output': ['conv_output2']},
attrs={
"data_format": data_format,
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
'data_format': data_format,
'dilations': dilations,
'padding_algorithm': padding_algorithm,
'groups': groups,
'paddings': paddings,
'strides': strides,
},
)
elt_op = OpConfig(
type="elementwise_add",
inputs={"X": ["conv_output1"], "Y": ["conv_output2"]},
outputs={"Out": ["elementwise_output"]},
type='elementwise_add',
inputs={'X': ['conv_output1'], 'Y': ['conv_output2']},
outputs={'Out': ['elementwise_output']},
attrs={'axis': axis},
)
......@@ -105,26 +104,26 @@ class TestConvElementwiseAddMkldnnFusePass(PassAutoScanTest):
program_config = ProgramConfig(
ops=model_net,
weights={
"conv_weight1": TensorConfig(data_gen=partial(generate_weight)),
"conv_weight2": TensorConfig(data_gen=partial(generate_weight)),
'conv_weight1': TensorConfig(data_gen=partial(generate_weight)),
'conv_weight2': TensorConfig(data_gen=partial(generate_weight)),
},
inputs={
"input_data": TensorConfig(data_gen=partial(generate_input))
'input_data': TensorConfig(data_gen=partial(generate_input))
},
outputs=["elementwise_output"],
outputs=['elementwise_output'],
)
return program_config
def sample_predictor_configs(self, program_config):
config = self.create_inference_config(use_mkldnn=True)
yield config, ["relu", "conv2d", "fused_conv2d"], (1e-5, 1e-5)
yield config, ['relu', 'conv2d', 'fused_conv2d'], (1e-5, 1e-5)
def test(self):
self.run_and_statis(
quant=False, passes=["conv_elementwise_add_mkldnn_fuse_pass"]
quant=False, passes=['conv_elementwise_add_mkldnn_fuse_pass']
)
if __name__ == "__main__":
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册