未验证 提交 cbbd940e 编写于 作者: A Asthestarsfalll 提交者: GitHub

[NewIR]Remove compatible logic of ProgramTranslator (#55453)

上级 147fbfe0
......@@ -126,9 +126,6 @@ def OpNameNormalizerInitialization(
backward_op, op_compat_item["scalar"]
)
# special op mappings
op_name_mappings["fetch_v2"] = "fetch"
op_name_normailzer_template = env.get_template("op_compat_info.cc.j2")
with open(output_source_file, 'wt') as f:
op_compat_definition = op_name_normailzer_template.render(
......
......@@ -19,8 +19,6 @@
#include "glog/logging.h"
#include "paddle/fluid/ir_adaptor/translator/utils.h"
#pragma once
namespace paddle {
......@@ -106,11 +104,11 @@ class OpNameNormalizer {
return legacy_name;
}
if (op_arg_name_mappings.find(op_type) == op_arg_name_mappings.end()) {
return UnderscoreToCamelCase(arg_name);
return arg_name;
}
auto& arg_mappings = op_arg_name_mappings[op_type];
if (arg_mappings.find(arg_name) == arg_mappings.end()) {
return UnderscoreToCamelCase(arg_name);
return arg_name;
}
return arg_mappings.at(arg_name);
}
......
......@@ -307,7 +307,7 @@ ir::OpInfo OpTranscriber::LoopkUpOpInfo(ir::IrContext* ctx,
const OpDesc& op_desc) {
std::string target_op_name =
kTargetDialectPrefix + OpNameCompatibleMapping(op_desc.Type());
if (IsInplace(op_desc)) {
if (IsInplace(op_desc) && *target_op_name.rbegin() != '_') {
target_op_name += "_";
}
VLOG(6) << "[op name normalizing]: " << op_desc.Type() << " to "
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <string_view>
namespace paddle {
namespace translator {
static std::string UnderscoreToCamelCase(std::string str) {
std::string camel_case;
bool next_upper = true;
for (char c : str) {
if (c == '_') {
next_upper = true;
} else {
if (next_upper) {
camel_case += toupper(c);
next_upper = false;
} else {
camel_case += c;
}
}
}
return camel_case;
}
} // namespace translator
} // namespace paddle
......@@ -354,6 +354,7 @@
attrs : [bool use_mkldnn = false]
- op : bilinear (bilinear_tensor_product)
backward: bilinear_grad (bilinear_tensor_product_grad)
inputs :
{x : X, y : Y,weight: Weight, bias: Bias}
outputs :
......@@ -1838,7 +1839,7 @@
data_type : float
support_tensor : true
- op : merged_momentum_
- op : merged_momentum_ (merged_momentum)
inputs :
{param : Param, grad : Grad, velocity : Velocity, learning_rate : LearningRate, master_param : MasterParam}
outputs :
......@@ -3038,11 +3039,27 @@
yolo_loss : GetYoloLossExpectedKernelType
yolo_loss_grad : GetYoloLossExpectedKernelType
- op: fetch
- op: channel_shuffle
inputs:
{x: X}
outputs:
{out: Out}
- op: fetch (fetch_v2)
inputs: {x: X}
outputs: {out: Out}
- op: full_batch_size_like (fill_constant_batch_size_like)
inputs:
{input: Input}
outputs:
{out: Out}
- op: logspace
inputs:
{start: Start, stop: Stop, num: Num, base: Base}
outputs:
{out: Out}
- op: lu
backward: lu_grad
......@@ -3059,6 +3076,12 @@
outputs :
{reindex_src : Reindex_Src, reindex_dst : Reindex_Dst, out_nodes : Out_Nodes}
- op: rrelu
inputs:
{x: X}
outputs:
{out: Out, noise: Noise}
- op: sigmoid_cross_entropy_with_logits
backward: sigmoid_cross_entropy_with_logits_grad
inputs :
......
......@@ -194,5 +194,21 @@ class TestReduceOpTranscriber(unittest.TestCase):
np.testing.assert_array_equal(out[0], np.all(arr, axis=0))
class TestIndexPutOpTranscriber(unittest.TestCase):
def test_op(self):
place = core.Place()
place.set_place(paddle.CPUPlace())
new_scope = paddle.static.Scope()
main_program = paddle.static.Program()
with paddle.static.scope_guard(new_scope):
with paddle.static.program_guard(main_program):
x = paddle.randn([2, 3])
indices = [paddle.randint(0, 2, [2]), paddle.randint(0, 1, [2])]
value = paddle.randn([2])
y = paddle.index_put(x, indices, value, False)
_ = ir.translate_to_new_ir(main_program.desc)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册