diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index a2f3b8dc7911a3ac60c5db90f9d9e1516187cc84..a3b49476d820f2871d8d9edb6b3d665ebb05e52e 100755 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -159,7 +159,6 @@ if(WITH_IPU) pass_library(infer_shape_pass base DIR ipu) pass_library(delete_scale_op_pass base DIR ipu) pass_library(avg_shard_pass base DIR ipu) - pass_library(transfer_cast_op_pass base DIR ipu) endif() cc_library(fuse_bn_act_pass SRCS fuse_bn_act_pass.cc DEPS pass graph_pattern_detector ) diff --git a/paddle/fluid/framework/ir/ipu/inference_process_pass.cc b/paddle/fluid/framework/ir/ipu/inference_process_pass.cc index 02f000acc2a39f489663f6aed68200427e1f4182..a6b82089dc4dfe8f8d55b1100602f8645efac4c0 100644 --- a/paddle/fluid/framework/ir/ipu/inference_process_pass.cc +++ b/paddle/fluid/framework/ir/ipu/inference_process_pass.cc @@ -121,9 +121,9 @@ void InferenceProcessPass::ApplyImpl(ir::Graph* graph) const { } // Run passes - std::vector graph_pass = { - "forward_graph_extract_pass", "infer_shape_pass", "avg_shard_pass", - "popart_canonicalization_pass", "transfer_cast_op_pass"}; + std::vector graph_pass = {"forward_graph_extract_pass", + "infer_shape_pass", "avg_shard_pass", + "popart_canonicalization_pass"}; std::vector compile_pass = { "ipu_inplace_pass", "ipu_graph_builder_pass", "ipu_runtime_replacer_pass", "inference_postprocess_pass"}; diff --git a/paddle/fluid/framework/ir/ipu/transfer_cast_op_pass.cc b/paddle/fluid/framework/ir/ipu/transfer_cast_op_pass.cc deleted file mode 100644 index 5cd8358dc083eb3f76eddcba79dae2d9352c11e2..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/ir/ipu/transfer_cast_op_pass.cc +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/framework/ir/ipu/transfer_cast_op_pass.h" - -#include "paddle/fluid/framework/ir/pass_tester_helper.h" -#include "paddle/fluid/platform/device/ipu/ipu_backend.h" - -namespace paddle { -namespace framework { -namespace ir { - -// Transfer the target dtype of Cast Op to FP16 if the original target is FP32 -// and enable FP16 mode. -void TransferCastOpPass::ApplyImpl(ir::Graph* graph) const { - VLOG(10) << "enter TransferCastOpPass::ApplyImpl"; - VLOG(10) << "Raw Graph: "; - VLOG(10) << DebugString(graph); - - auto ipu_backend = platform::ipu::IpuBackend::GetInstance(); - auto enable_fp16 = ipu_backend->GetIpuStrategy()->enable_fp16; - auto transfer_cast_op = ipu_backend->GetIpuStrategy()->transfer_cast_op; - if (enable_fp16 && transfer_cast_op) { - for (auto* node : graph->Nodes()) { - if (node->IsOp() && node->Op()->Type() == "popart_cast") { - if (BOOST_GET_CONST(std::string, node->Op()->GetAttr("to")) == - "FLOAT") { - node->Op()->SetAttr("to", std::string("FLOAT16")); - } - } - } - } - - VLOG(10) << "Post Graph: "; - VLOG(10) << DebugString(graph); - VLOG(10) << "leave TransferCastOpPass::ApplyImpl"; -} - -} // namespace ir -} // namespace framework -} // namespace paddle - -REGISTER_PASS(transfer_cast_op_pass, paddle::framework::ir::TransferCastOpPass); diff --git a/paddle/fluid/framework/ir/ipu/transfer_cast_op_pass.h b/paddle/fluid/framework/ir/ipu/transfer_cast_op_pass.h deleted file mode 100644 index 580fec10f2ac67934bb85ed6deb762e9f5e760e5..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/ir/ipu/transfer_cast_op_pass.h +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "paddle/fluid/framework/ir/pass.h" - -namespace paddle { -namespace framework { -namespace ir { - -class TransferCastOpPass : public Pass { - protected: - void ApplyImpl(ir::Graph* graph) const override; -}; - -} // namespace ir -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/platform/device/ipu/ipu_info.cc b/paddle/fluid/platform/device/ipu/ipu_info.cc index 9e6951c37139db2bbca6a1eab7f521e850dba6db..749628ffac452a87d6594e5b8ae95b661dc51e7a 100644 --- a/paddle/fluid/platform/device/ipu/ipu_info.cc +++ b/paddle/fluid/platform/device/ipu/ipu_info.cc @@ -10,6 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/platform/device/ipu/ipu_info.h" + #include "paddle/fluid/platform/device/ipu/ipu_device.h" namespace paddle { diff --git a/paddle/fluid/platform/device/ipu/ipu_strategy.cc b/paddle/fluid/platform/device/ipu/ipu_strategy.cc index e35464e30c7a8b483f5172449e66e88d1b6d8a41..aff5498243000799a43b28463a20dcc36ea59eef 100644 --- a/paddle/fluid/platform/device/ipu/ipu_strategy.cc +++ b/paddle/fluid/platform/device/ipu/ipu_strategy.cc @@ -64,7 +64,6 @@ IpuStrategy::IpuStrategy() { ADD_BOOL_OPTION(is_training); ADD_BOOL_OPTION(need_avg_shard); ADD_BOOL_OPTION(enable_fp16); - ADD_BOOL_OPTION(transfer_cast_op); ADD_BOOL_OPTION(use_no_bias_optimizer); ADD_BOOL_OPTION(enable_distribution); ADD_BOOL_OPTION(scaled_optimizer_state); diff --git a/paddle/fluid/platform/device/ipu/ipu_strategy.h b/paddle/fluid/platform/device/ipu/ipu_strategy.h index 26566bc18fed034265e472676772893314768a30..fa57dcd676d81293f3b38eb3f219a57b5d8a08c3 100644 --- a/paddle/fluid/platform/device/ipu/ipu_strategy.h +++ b/paddle/fluid/platform/device/ipu/ipu_strategy.h @@ -43,9 +43,6 @@ class IpuStrategy { // Flag for fp16, true for pure fp16 bool enable_fp16 = false; - // Enable transfer cast Op target from fp32 to fp16 in fp16 mode - bool transfer_cast_op = true; - // The mode of Adam/Lamb optimizer // false: The standard Adam/Lamb optimizer // true: The Adam_No_Bias/Lamb_No_Bias optimizer from PopART diff --git a/paddle/fluid/platform/device/ipu/popart_canonicalization/op_builder.h b/paddle/fluid/platform/device/ipu/popart_canonicalization/op_builder.h index de3788e437a4243b9fe1dbe36833c8ab503a7e4a..f096beb9c4d7748953951cd16f0355fe266a02d7 100644 --- a/paddle/fluid/platform/device/ipu/popart_canonicalization/op_builder.h +++ b/paddle/fluid/platform/device/ipu/popart_canonicalization/op_builder.h @@ -17,8 +17,8 @@ #include "paddle/fluid/platform/device/ipu/ipu_names.h" #include "paddle/fluid/platform/device/ipu/popart_canonicalization/canonicalization_utils.h" -using paddle::framework::AttributeMap; -using paddle::framework::Attribute; +using AttributeMap = paddle::framework::AttributeMap; +using Attribute = paddle::framework::Attribute; namespace paddle { namespace platform { diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index d21b7e4740a6e1b0194c71f039f7706f32fae742..47c64ff8bd60575045e2c4c6c2643bcca0cfcfe9 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -1001,9 +1001,6 @@ class IpuCompiledProgram(object): a_pass.set('custom_ops', self._custom_op_names) a_pass.apply(self._graph) - a_pass = core.get_pass("transfer_cast_op_pass") - a_pass.apply(self._graph) - passes = [ 'ipu_inplace_pass', 'ipu_graph_builder_pass',