未验证 提交 09a13294 编写于 作者: A Allen Guo 提交者: GitHub

[IPU] remove transfer cast pass (#42520)

* rm transfer_cast_op_pass

* rm header
上级 001dab0b
......@@ -159,7 +159,6 @@ if(WITH_IPU)
pass_library(infer_shape_pass base DIR ipu)
pass_library(delete_scale_op_pass base DIR ipu)
pass_library(avg_shard_pass base DIR ipu)
pass_library(transfer_cast_op_pass base DIR ipu)
endif()
cc_library(fuse_bn_act_pass SRCS fuse_bn_act_pass.cc DEPS pass graph_pattern_detector )
......
......@@ -121,9 +121,9 @@ void InferenceProcessPass::ApplyImpl(ir::Graph* graph) const {
}
// Run passes
std::vector<std::string> graph_pass = {
"forward_graph_extract_pass", "infer_shape_pass", "avg_shard_pass",
"popart_canonicalization_pass", "transfer_cast_op_pass"};
std::vector<std::string> graph_pass = {"forward_graph_extract_pass",
"infer_shape_pass", "avg_shard_pass",
"popart_canonicalization_pass"};
std::vector<std::string> compile_pass = {
"ipu_inplace_pass", "ipu_graph_builder_pass", "ipu_runtime_replacer_pass",
"inference_postprocess_pass"};
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/ir/ipu/transfer_cast_op_pass.h"
#include "paddle/fluid/framework/ir/pass_tester_helper.h"
#include "paddle/fluid/platform/device/ipu/ipu_backend.h"
namespace paddle {
namespace framework {
namespace ir {
// Transfer the target dtype of Cast Op to FP16 if the original target is FP32
// and enable FP16 mode.
void TransferCastOpPass::ApplyImpl(ir::Graph* graph) const {
VLOG(10) << "enter TransferCastOpPass::ApplyImpl";
VLOG(10) << "Raw Graph: ";
VLOG(10) << DebugString(graph);
auto ipu_backend = platform::ipu::IpuBackend::GetInstance();
auto enable_fp16 = ipu_backend->GetIpuStrategy()->enable_fp16;
auto transfer_cast_op = ipu_backend->GetIpuStrategy()->transfer_cast_op;
if (enable_fp16 && transfer_cast_op) {
for (auto* node : graph->Nodes()) {
if (node->IsOp() && node->Op()->Type() == "popart_cast") {
if (BOOST_GET_CONST(std::string, node->Op()->GetAttr("to")) ==
"FLOAT") {
node->Op()->SetAttr("to", std::string("FLOAT16"));
}
}
}
}
VLOG(10) << "Post Graph: ";
VLOG(10) << DebugString(graph);
VLOG(10) << "leave TransferCastOpPass::ApplyImpl";
}
} // namespace ir
} // namespace framework
} // namespace paddle
REGISTER_PASS(transfer_cast_op_pass, paddle::framework::ir::TransferCastOpPass);
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/framework/ir/pass.h"
namespace paddle {
namespace framework {
namespace ir {
class TransferCastOpPass : public Pass {
protected:
void ApplyImpl(ir::Graph* graph) const override;
};
} // namespace ir
} // namespace framework
} // namespace paddle
......@@ -10,6 +10,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/platform/device/ipu/ipu_info.h"
#include "paddle/fluid/platform/device/ipu/ipu_device.h"
namespace paddle {
......
......@@ -64,7 +64,6 @@ IpuStrategy::IpuStrategy() {
ADD_BOOL_OPTION(is_training);
ADD_BOOL_OPTION(need_avg_shard);
ADD_BOOL_OPTION(enable_fp16);
ADD_BOOL_OPTION(transfer_cast_op);
ADD_BOOL_OPTION(use_no_bias_optimizer);
ADD_BOOL_OPTION(enable_distribution);
ADD_BOOL_OPTION(scaled_optimizer_state);
......
......@@ -43,9 +43,6 @@ class IpuStrategy {
// Flag for fp16, true for pure fp16
bool enable_fp16 = false;
// Enable transfer cast Op target from fp32 to fp16 in fp16 mode
bool transfer_cast_op = true;
// The mode of Adam/Lamb optimizer
// false: The standard Adam/Lamb optimizer
// true: The Adam_No_Bias/Lamb_No_Bias optimizer from PopART
......
......@@ -17,8 +17,8 @@
#include "paddle/fluid/platform/device/ipu/ipu_names.h"
#include "paddle/fluid/platform/device/ipu/popart_canonicalization/canonicalization_utils.h"
using paddle::framework::AttributeMap;
using paddle::framework::Attribute;
using AttributeMap = paddle::framework::AttributeMap;
using Attribute = paddle::framework::Attribute;
namespace paddle {
namespace platform {
......
......@@ -1001,9 +1001,6 @@ class IpuCompiledProgram(object):
a_pass.set('custom_ops', self._custom_op_names)
a_pass.apply(self._graph)
a_pass = core.get_pass("transfer_cast_op_pass")
a_pass.apply(self._graph)
passes = [
'ipu_inplace_pass',
'ipu_graph_builder_pass',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册