diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index f65db53893038ce28bd25a03179173216068e323..4b429d5f72347c93287b98c35e35c1cf3d026764 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -151,19 +151,6 @@ endif() if(WITH_MKLDNN) pass_library(mkldnn_placement_pass base DEPS placement_pass_base DIR mkldnn) - pass_library( - mkldnn_inplace_pass - inference - DEPS - mkldnn_placement_pass - op_registry - elementwise_add_op - generated_op - activation_op - softmax_op - softmax - DIR - mkldnn) pass_library(depthwise_conv_mkldnn_pass base DIR mkldnn) pass_library(conv_affine_channel_mkldnn_fuse_pass inference DIR mkldnn) pass_library(conv_bias_mkldnn_fuse_pass inference DIR mkldnn) @@ -450,10 +437,6 @@ if(WITH_MKLDNN) test_mkldnn_placement_pass SRCS mkldnn/mkldnn_placement_pass_tester.cc DEPS mkldnn_placement_pass) - cc_test( - test_mkldnn_inplace_pass - SRCS mkldnn/mkldnn_inplace_pass_tester.cc - DEPS mkldnn_inplace_pass) cc_test( test_compute_propagate_scales_mkldnn_pass SRCS mkldnn/compute_propagate_scales_mkldnn_pass_tester.cc diff --git a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc b/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc deleted file mode 100644 index 02a56e819c03f753edeb07c692ec8d3061d284e9..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.h" - -#include -#include -#include -#include -#include -#include - -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/framework/op_info.h" -#include "paddle/fluid/framework/op_version_registry.h" -#include "paddle/fluid/platform/enforce.h" - -namespace paddle { -namespace framework { -namespace ir { - -void MKLDNNInPlacePass::ApplyImpl(ir::Graph* graph) const { - PADDLE_ENFORCE_NOT_NULL(graph, - platform::errors::InvalidArgument( - "Pointer to graph argument should not be NULL.")); - std::unordered_map original_output_names; - std::unordered_set inplaced_vars; - GraphPatternDetector gpd; - patterns::MKLDNNInPlace mkldnn_inplace{gpd.mutable_pattern(), - "mkldnn_inplace"}; - mkldnn_inplace(); - - int found_inplace_count = 0; - auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, - Graph* g) { - VLOG(3) << "Start to handle MKL-DNN In-Place pass"; - - GET_IR_NODE_FROM_SUBGRAPH(current_op, inplace_to_be_op, mkldnn_inplace); - GET_IR_NODE_FROM_SUBGRAPH( - current_op_in, inplace_to_be_op_in, mkldnn_inplace); - GET_IR_NODE_FROM_SUBGRAPH( - current_op_out, inplace_to_be_op_out, mkldnn_inplace); - GET_IR_NODE_FROM_SUBGRAPH(next_op, next_op, mkldnn_inplace); - GET_IR_NODE_FROM_SUBGRAPH(next_op_out, next_op_out, mkldnn_inplace); - - if ((current_op->Op()->HasAttr("use_mkldnn") == false) || - (PADDLE_GET_CONST(bool, current_op->Op()->GetAttr("use_mkldnn")) == - false)) { - VLOG(3) << "do not perform mkl-dnn inplace: use_mkldnn missing or set to " - "false"; - return; - } - - auto& infer_inplace = - OpInfoMap::Instance().Get(current_op->Op()->Type()).infer_inplace_; - if (!infer_inplace) { - VLOG(3) << "do not perform mkl-dnn inplace: missing InplaceInferer"; - return; - } - - VLOG(3) << "oneDNN Inplace op(" << current_op->id() << ") " - << "Curr Node In: " << current_op_in->Name() - << " Curr Node out: " << current_op_out->Name(); - - VLOG(3) << "oneDNN Inplace next op(" << next_op->id() << ") " - << " next Node out: " << next_op_out->Name(); - - auto inputs = current_op->Op()->Inputs(); - auto outputs = current_op->Op()->Outputs(); - auto in_to_outs = infer_inplace(false); // strictly no CUDA for MKL-DNN - VLOG(3) << "oneDNN InplaceInferer op(" << current_op->id() << ") " - << in_to_outs.begin()->first << ": " - << inputs[in_to_outs.begin()->first][0] << " " - << in_to_outs.begin()->second << ": " - << outputs[in_to_outs.begin()->second][0]; - // If InferInplace pattern does not contain input node then skip - auto inplace_input_vec = inputs[in_to_outs.begin()->first]; - if (std::find(inplace_input_vec.begin(), - inplace_input_vec.end(), - current_op_in->Name()) == inplace_input_vec.end()) { - VLOG(3) << "oneDNN in-place pass SKIP pattern "; - return; - } - - // Checking if this particular node (to be inplaced, overwritten) - // is used anywhere else apart from inplaced op - auto input_consumers = current_op_in->outputs; - if (input_consumers.size() > 1) { - VLOG(3) << "oneDNN in-place pass FAIL: in-place var cannot " - "be an input to multiple operators"; - return; - } else { - // We will prevent in-place when - // input is used in other part of graph, unless it was a result of - // inplacing - // Allow to next op out reuse inpuit var, as this is the same chaing - if (inplaced_vars.find(current_op_in->Name()) == inplaced_vars.end()) { - for (const Node* n : graph->Nodes()) { - if ((n->id() != current_op_in->id()) && - (n->id() != next_op_out->id()) && - (n->Name() == current_op_in->Name())) { - VLOG(3) << "oneDNN in-place pass FAIL var used in diffrent part of " - "graph "; - return; - } - } - } - } - - // If this op was alrady inplaced in previous pass placements - // then we need to update input of next op - // but original name to be changed is gone, so we need to remember it - // on first time given op is to be inplaced - if (current_op_in->Name() != current_op_out->Name()) { - original_output_names[current_op->Name() + current_op_in->Name()] = - current_op_out->Name(); - } else { - VLOG(3) << "oneDNN Inplace: Current op already inplaced! "; - } - - // It may be that next op is reusing some of vars, we need to - // make sure that unwanted inplace is not created - for (auto& n : current_op_out->outputs) { - auto& n_op_infer_inplace = - OpInfoMap::Instance().Get(n->Op()->Type()).infer_inplace_; - if ((n_op_infer_inplace == nullptr)) { - for (auto& m : n->outputs) { - if (m->Name() == current_op_in->Name()) { - VLOG(3) << "oneDNN in-place pass FAIL: in-place var cannot " - "be an output to non-inplaced next op"; - return; - } - } - } - } - - auto original_name = - original_output_names[current_op->Name() + current_op_in->Name()]; - current_op_out->RenameVar(current_op_in->Name()); - - // Get mapping of input to output - auto out_name = in_to_outs.begin()->second; - current_op->Op()->SetOutput( - out_name, std::vector({current_op_out->Name()})); - // Record var name - inplaced_vars.insert(current_op_out->Name()); - - // If next op in a line is doing inplace - // then we need to update its output as well - - // Get inferer of next op - // If no inferer then we are done - auto& next_op_infer_inplace = - OpInfoMap::Instance().Get(next_op->Op()->Type()).infer_inplace_; - if (next_op_infer_inplace) { - auto in_to_outs = next_op_infer_inplace(false); - auto out_name = in_to_outs.begin()->second; - auto* op = next_op->Op(); - auto inputs = op->Inputs(); - auto outputs = op->Outputs(); - // Check if in-place happened - // for variable we changed (original name) - // TODO(jczaja): make recursive propagation of inplace - auto next_op_inplace_inputs = inputs[in_to_outs.begin()->first]; - if ((next_op_inplace_inputs == outputs[in_to_outs.begin()->second]) && - (std::find(next_op_inplace_inputs.begin(), - next_op_inplace_inputs.end(), - original_name) != next_op_inplace_inputs.end())) { - VLOG(3) << "oneDNN InPlace: Next Op is in-placed , updating its " - "input " - "and output var!"; - next_op->Op()->SetOutput( - out_name, std::vector({current_op_out->Name()})); - next_op_out->RenameVar(current_op_in->Name()); - // Get ops that next_op_out is linked to and update their input - auto next_op_out_consumers = next_op_out->outputs; // Has to be ops - for (auto& c : next_op_out_consumers) { - c->Op()->RenameInput(original_name, current_op_out->Name()); - } - } - } - - next_op->Op()->RenameInput(original_name, current_op_out->Name()); - - found_inplace_count++; - VLOG(3) << "oneDNN InPlace applied!"; - }; - - // TODO(jczaja): inplace pass does not influece ops inside block ops - auto should_inplace = [&](Graph* g) { - std::unordered_set unwanted_ops( - {"conditional_block", "While", "while_loop"}); - for (auto& node : g->Nodes()) { - if (node->IsOp() && - unwanted_ops.find(node->Name()) != unwanted_ops.end()) { - VLOG(3) << "oneDNN InPlace FAILED: unsupported op: " << node->Name(); - return false; - } - } - return true; - }; - - if (should_inplace(graph)) gpd(graph, handler); -} - -} // namespace ir -} // namespace framework -} // namespace paddle - -REGISTER_PASS(mkldnn_inplace_pass, paddle::framework::ir::MKLDNNInPlacePass); -REGISTER_PASS_CAPABILITY(mkldnn_inplace_pass) - .AddCombination( - paddle::framework::compatible::OpVersionComparatorCombination() - .EQ("softmax", 0) - .LE("elementwise_add", 1) - .EQ("tanh", 0)); diff --git a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.h b/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.h deleted file mode 100644 index 880630055e91637a2baf67ee206f7dcbc5fc5a2e..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.h +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#pragma once -#include - -#include "paddle/fluid/framework/ir/graph.h" -#include "paddle/fluid/framework/ir/graph_pattern_detector.h" -#include "paddle/fluid/framework/ir/pass.h" - -namespace paddle { -namespace framework { -namespace ir { - -/* - * Transpose weights of FC to comply with MKL-DNN interface - */ -class MKLDNNInPlacePass : public Pass { - public: - virtual ~MKLDNNInPlacePass() {} - - protected: - void ApplyImpl(ir::Graph* graph) const; - - private: -#if PADDLE_WITH_TESTING - friend class MKLDNNInPlacePassTest; -#endif -}; - -} // namespace ir -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass_tester.cc deleted file mode 100644 index e8116fc47c8a6d1de3a8b19de93a88adaefec2df..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass_tester.cc +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include - -#include -#include "paddle/utils/tribool.h" - -#include "paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.h" -#include "paddle/fluid/framework/ir/pass_tester_helper.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/core/kernel_registry.h" - -USE_OP_ITSELF(softmax); -PD_DECLARE_KERNEL(softmax, OneDNN, ONEDNN); -USE_OP_ITSELF(elementwise_add); -PD_DECLARE_KERNEL(add_raw, OneDNN, ONEDNN); -USE_OP_ITSELF(leaky_relu); -PD_DECLARE_KERNEL(leaky_relu, OneDNN, ONEDNN); -USE_OP_ITSELF(gelu); -USE_OP_ITSELF(relu); -USE_OP_ITSELF(tanh); -PD_DECLARE_KERNEL(tanh, OneDNN, ONEDNN); -PD_DECLARE_ARG_MAPPING_FN(gelu); - -namespace paddle { -namespace framework { -namespace ir { - -class MKLDNNInplacePassTest { - private: - void SetOp(ProgramDesc* prog, - const std::string& type, - const std::string& name, - const std::vector& inputs, - const std::vector& outputs, - paddle::tribool use_mkldnn) { - auto* op = prog->MutableBlock(0)->AppendOp(); - - op->SetType(type); - - if (!paddle::indeterminate(use_mkldnn)) - op->SetAttr("use_mkldnn", use_mkldnn); - - if (type == "conv2d") { - op->SetAttr("name", name); - op->SetInput("Input", {inputs[0]}); - op->SetInput("Filter", {inputs[1]}); - op->SetInput("Bias", {inputs[2]}); - } else if (std::unordered_set{ - "gelu", "leaky_relu", "relu", "tanh"} - .count(type)) { - op->SetInput("X", inputs); - } else if (type == "softmax") { - op->SetAttr("axis", -1); - op->SetInput("X", inputs); - } else if (type == "elementwise_add") { - op->SetInput("X", {inputs[0]}); - op->SetInput("Y", {inputs[1]}); - } else { - FAIL() << "Unexpected operator type."; - } - op->SetOutput("Out", {outputs[0]}); - } - - ProgramDesc BuildProgramDesc(const std::string& mkldnn_enabled_op, - bool branched) { - ProgramDesc prog; - - for (auto& v : std::vector({"a", - "weights", - "bias", - "f", - "g", - "h", - "i", - "j", - "k", - "l", - "m", - "n", - "z"})) { - auto* var = prog.MutableBlock(0)->Var(v); - var->SetType(proto::VarType::SELECTED_ROWS); - if (v == "weights" || v == "bias") { - var->SetPersistable(true); - } - } - - SetOp(&prog, - "conv2d", - "conv1", - std::vector({"a", "weights", "bias"}), - std::vector({"f"}), - paddle::indeterminate); - SetOp(&prog, - "relu", - "relu1", - std::vector({"f"}), - std::vector({"g"}), - mkldnn_enabled_op.compare("relu") == 0); - SetOp(&prog, - "softmax", - "softmax1", - std::vector({"g"}), - std::vector({"h"}), - mkldnn_enabled_op.compare("softmax") == 0); - SetOp(&prog, - "elementwise_add", - "elementwise_add1", - std::vector({"h", "i"}), - std::vector({"j"}), - mkldnn_enabled_op.compare("elementwise_add") == 0); - SetOp(&prog, - "relu", - "relu2", - std::vector({"j"}), - std::vector({"k"}), - mkldnn_enabled_op.compare("relu") == 0); - SetOp(&prog, - "tanh", - "tanh1", - std::vector({"k"}), - std::vector({"l"}), - mkldnn_enabled_op.compare("tanh") == 0); - SetOp(&prog, - "relu", - "relu3", - std::vector({"l"}), - std::vector({"m"}), - mkldnn_enabled_op.compare("relu") == 0); - SetOp(&prog, - "leaky_relu", - "leaky_relu1", - std::vector({"m"}), - std::vector({"n"}), - mkldnn_enabled_op.compare("leaky_relu") == 0); - SetOp(&prog, - "gelu", - "gelu1", - std::vector({"n"}), - std::vector({"m"}), - mkldnn_enabled_op.compare("gelu") == 0); - if (branched == true) { - SetOp(&prog, - "softmax", - "softmax2", - std::vector({"g"}), - std::vector({"z"}), - mkldnn_enabled_op.compare("softmax") == 0); - } - - return prog; - } - - public: - void MainTest(const std::string& mkldnn_enabled_op, - bool branched, - unsigned expected_use_mkldnn_true_count) { - auto prog = BuildProgramDesc(mkldnn_enabled_op, branched); - - std::unique_ptr graph(new ir::Graph(prog)); - auto pass = PassRegistry::Instance().Get("mkldnn_inplace_pass"); - - graph.reset(pass->Apply(graph.release())); - - unsigned use_mkldnn_true_count = 0; - std::unordered_map input_names; - std::unordered_map output_names; - - VLOG(3) << DebugString(graph); - - for (auto* node : graph->Nodes()) { - if (node->IsOp()) { - auto* op = node->Op(); - if (op->Type() == mkldnn_enabled_op) { - auto ins = op->Inputs(); - auto outs = op->Outputs(); - // Input and output are the same var - // All inplace ops are inplacing input named: X - // and output : Out - if (ins["X"] == outs["Out"]) { - ++use_mkldnn_true_count; - } - } - } - } - - EXPECT_EQ(use_mkldnn_true_count, expected_use_mkldnn_true_count); - } -}; - -TEST(MKLDNNInplacePass, inplace_softmax) { - // softmax to be mkl-dnn enabled and made in-place - MKLDNNInplacePassTest().MainTest("softmax", false, 1); -} - -TEST(MKLDNNInplacePass, inplace_softmax_branched) { - // softmax's input is shared by two branches. so no in-place - MKLDNNInplacePassTest().MainTest("softmax", true, 0); -} - -TEST(MKLDNNInplacePass, inplace_elementwise_add) { - // Two elementwise_add mkl-dnn enabled op instances to be made inplace - MKLDNNInplacePassTest().MainTest("elementwise_add", false, 0); -} -TEST(MKLDNNInplacePass, inplace_tanh) { - MKLDNNInplacePassTest().MainTest("tanh", false, 1); -} - -TEST(MKLDNNInplacePass, inplace_leaky_relu) { - // Input of leaky_relu is used as output of subsequent gelu, so no inplace - // cannot be done - MKLDNNInplacePassTest().MainTest("leaky_relu", false, 0); -} -} // namespace ir -} // namespace framework -} // namespace paddle - -USE_PASS(mkldnn_inplace_pass); diff --git a/paddle/fluid/inference/api/paddle_pass_builder.cc b/paddle/fluid/inference/api/paddle_pass_builder.cc index ea81ab1635f080433d193c6dc69463aefa0f68b8..7d325498f7a6008961cfeafe26681da788a7cf2e 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.cc +++ b/paddle/fluid/inference/api/paddle_pass_builder.cc @@ -355,11 +355,6 @@ void CpuPassStrategy::EnableMKLDNN() { "operator_scale_onednn_fuse_pass", // "operator_unsqueeze2_onednn_fuse_pass", // "operator_reshape2_onednn_fuse_pass", // - // TODO(intel): Please fix the bug on windows. - // https://github.com/PaddlePaddle/Paddle/issues/29710 - // "mkldnn_inplace_pass", // This pass should be activated after - // fuses. Disabled by default due to - // little gain and lots of problems })) { passes_.push_back(pass); } @@ -459,7 +454,6 @@ void CpuPassStrategy::EnableMkldnnInt8() { passes_.push_back("cpu_quantize_squash_pass"); passes_.push_back("int8_scale_calculation_mkldnn_pass"); passes_.push_back("params_quantization_mkldnn_pass"); - passes_.push_back("mkldnn_inplace_pass"); } use_mkldnn_int8_ = true; #else diff --git a/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py b/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py index a617bac359b6f10b1d642a72283c722ce95b3bb8..89540eeedbd3aedd336745e7b8fbd2cd77c1fd9b 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quant2_int8_mkldnn_pass.py @@ -111,7 +111,6 @@ class Quant2Int8MkldnnPass: # graph = self._update_relu_output_scales(graph) graph = self._propagate_scales(graph) graph = self._quantize_fp32_graph(graph) - graph = self._final_optimizations(graph) graph = self._cleanup(graph) return graph @@ -122,7 +121,6 @@ class Quant2Int8MkldnnPass: self._reset_pass_idx_and_group('fp32') graph = self._optimize_fp32_graph(graph) - graph = self._final_optimizations(graph) graph = self._cleanup(graph) return graph @@ -521,11 +519,6 @@ class Quant2Int8MkldnnPass: self._pass_idx += 1 return graph - def _final_optimizations(self, graph): - # make some MKL-DNN ops working inplace - graph = self._apply_pass(graph, 'mkldnn_inplace_pass') - return graph - def _cleanup(self, graph): graph = self._remove_unused_var_nodes(graph) graph = self._set_op_role_forward(graph) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_inplace_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_inplace_fuse_pass.py deleted file mode 100644 index 875025f8fb668d790bf7c69b95ec1a7671cb698a..0000000000000000000000000000000000000000 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_inplace_fuse_pass.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from inference_pass_test import InferencePassTest - -import paddle -import paddle.fluid as fluid -import paddle.nn.functional as F -from paddle.fluid.core import PassVersionChecker - - -class MkldnnInplacePassTest(InferencePassTest): - def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): - paddle.enable_static() - data = fluid.data( - name="data", shape=[-1, 3, 100, 100], dtype="float32" - ) - conv_out_1 = fluid.layers.conv2d( - data, num_filters=3, filter_size=3, bias_attr=False - ) - softmax_out = paddle.nn.functional.softmax(conv_out_1) - relu_out = F.relu(conv_out_1) - eltwise_out = paddle.add(softmax_out, relu_out) - - self.pass_name = 'mkldnn_inplace_pass' - self.feeds = { - "data": np.random.random((1, 3, 100, 100)).astype("float32") - } - self.fetch_list = [softmax_out, relu_out, eltwise_out] - self.enable_mkldnn = True - - def test_check_output(self): - use_gpu = False - self.check_output_with_option(use_gpu) - - def test_pass_compatible(self): - self.assertTrue(PassVersionChecker.IsCompatible(self.pass_name)) - - -if __name__ == "__main__": - unittest.main() diff --git a/tools/parallel_UT_rule.py b/tools/parallel_UT_rule.py index 853191400b877f64d75c5f9912bada239b1ea42b..2225ae33021d846faf9b857ed0c11951dd5889bb 100755 --- a/tools/parallel_UT_rule.py +++ b/tools/parallel_UT_rule.py @@ -112,7 +112,6 @@ HIGH_PARALLEL_JOB_NEW = [ 'test_fc_lstm_fuse_pass_cc', 'test_version', 'gather_test', - 'test_mkldnn_inplace_fuse_pass', 'test_reshape_bf16_op', 'test_compat', 'test_data_feeder', @@ -237,7 +236,6 @@ HIGH_PARALLEL_JOB_NEW = [ 'test_selected_rows', 'test_fleet_sharding_meta_optimizer', 'test_inference_api', - 'test_mkldnn_inplace_pass', 'test_data_generator', 'test_deprecated_memory_optimize_interfaces', 'test_ir_skip_layernorm_pass', @@ -1819,8 +1817,6 @@ CPU_PARALLEL_JOB = [ 'test_mkldnn_op_inplace', 'test_mkldnn_matmul_transpose_reshape_fuse_pass', 'test_mkldnn_matmul_op_output_fuse_pass', - 'test_mkldnn_inplace_pass', - 'test_mkldnn_inplace_fuse_pass', 'test_mkldnn_cpu_bfloat16_pass', 'test_mkldnn_conv_concat_relu_mkldnn_fuse_pass', 'test_mkldnn_conv_bias_fuse_pass', diff --git a/tools/static_mode_white_list.py b/tools/static_mode_white_list.py index 94865fb538bdd6ce4a97da6192fbbafa0ba71589..81660ab7f9402e7223256bce893e00734cd1243e 100755 --- a/tools/static_mode_white_list.py +++ b/tools/static_mode_white_list.py @@ -655,7 +655,6 @@ STATIC_MODE_TESTING_LIST = [ 'test_mkldnn_matmul_op_output_fuse_pass', 'test_mkldnn_matmul_transpose_reshape_fuse_pass', 'test_mkldnn_scale_matmul_fuse_pass', - 'test_mkldnn_inplace_fuse_pass', 'test_mkldnn_conv_affine_channel_fuse_pass', 'test_batch_fc_op', 'test_c_comm_init_all_op',