未验证 提交 5c6d0e26 编写于 作者: C chen2016013 提交者: GitHub

[IR] Register cinn_dialect & Generate AddOp (#56357)

* Register cinn_dialect & Generate AddOp

* codestyle fix

* codestyle fix02

* 合并cinn_ops.parsed.yaml和cinn_backward_ops.parsed.yaml;
将add算子注册进CinnDialect

* 通过宏定义注册算子

* bug-fix

* bug-fix

* bug-fix

* bug-fix

* skip conflict include

* resolve conflict
上级 ee7877e4
# TODO(Aurelius84): new_ir_compiler depends on pd_dialect and could
# not found under CINN_ONLY mode
if(NOT CINN_ONLY)
cinn_cc_library(cinn_dialect SRCS runtime_dialect.cc jit_kernel_op.cc DEPS
set(CINN_DIALECT_SOURCE_DIR "${PADDLE_SOURCE_DIR}/paddle/cinn/hlir/dialect")
set(CINN_DIALECT_BINARY_DIR "${PADDLE_BINARY_DIR}/paddle/cinn/hlir/dialect")
# Generate cinn_dialect files defining op using op_gen_file
set(cinn_op_gen_file
${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/op_generator/op_gen.py)
set(cinn_op_compat_yaml_file
${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/op_compat.yaml)
set(cinn_op_forward_yaml_file1
${PADDLE_SOURCE_DIR}/paddle/cinn/hlir/dialect/cinn_ops.parsed.yaml)
set(cinn_op_yaml_files ${cinn_op_forward_yaml_file1})
set(cinn_op_namespace cinn,dialect)
set(cinn_dialect_name cinn)
set(cinn_op_header_file ${CINN_DIALECT_BINARY_DIR}/cinn_op.h)
set(cinn_op_source_file ${CINN_DIALECT_BINARY_DIR}/cinn_op.cc)
set(cinn_op_header_file_tmp ${cinn_op_header_file}.tmp)
set(cinn_op_source_file_tmp ${cinn_op_source_file}.tmp)
add_custom_command(
OUTPUT ${cinn_op_header_file} ${cinn_op_source_file}
COMMAND
${PYTHON_EXECUTABLE} ${cinn_op_gen_file} --op_yaml_files
${cinn_op_yaml_files} --op_compat_yaml_file ${cinn_op_compat_yaml_file}
--namespaces ${cinn_op_namespace} --dialect_name ${cinn_dialect_name}
--op_def_h_file ${cinn_op_header_file_tmp} --op_def_cc_file
${cinn_op_source_file_tmp}
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${cinn_op_header_file_tmp}
${cinn_op_header_file}
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${cinn_op_source_file_tmp}
${cinn_op_source_file}
DEPENDS ${cinn_op_gen_file} ${cinn_op_forward_yaml_file1}
${cinn_op_compat_yaml_file}
VERBATIM)
cinn_cc_library(
cinn_dialect
SRCS
runtime_dialect.cc
jit_kernel_op.cc
cinn_dialect.cc
cinn_op.cc
DEPS
pd_dialect)
target_include_directories(cinn_dialect PRIVATE ${CINN_DIALECT_BINARY_DIR})
endif()
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/cinn/hlir/dialect/cinn_dialect.h"
// NOTE(chenxi67): File cinn_op.h is generated by op_gen.py, see details in
// paddle/cinn/hlir/dialect/CMakeLists.txt.
#include "paddle/cinn/hlir/dialect/cinn_op.h"
namespace cinn {
namespace dialect {
CinnDialect::CinnDialect(::ir::IrContext* context)
: ::ir::Dialect(
name(), context, ::ir::TypeId::get<cinn::dialect::CinnDialect>()) {
this->initialize();
}
void CinnDialect::initialize() {
// NOTE(chenxi67): GET_OP_LIST is defined in cinn_op.h which is
// generated by op_gen.py, see details in
// paddle/cinn/hlir/dialect/CMakeLists.txt.
RegisterOps<
#define GET_OP_LIST
#include "paddle/cinn/hlir/dialect/cinn_op.h" // NOLINT
>();
}
} // namespace dialect
} // namespace cinn
IR_DEFINE_EXPLICIT_TYPE_ID(cinn::dialect::CinnDialect)
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/ir/core/dialect.h"
namespace cinn {
namespace dialect {
class CinnDialect : public ::ir::Dialect {
public:
explicit CinnDialect(::ir::IrContext* context);
static const char* name() { return "cinn"; }
private:
void initialize();
};
} // namespace dialect
} // namespace cinn
IR_DECLARE_EXPLICIT_TYPE_ID(cinn::dialect::CinnDialect)
- name: add
inputs:
- typename: Tensor
name: x
optional: false
no_need_buffer: false
data_transform: {}
- typename: Tensor
name: y
optional: false
no_need_buffer: false
data_transform: {}
attrs: []
outputs:
- {typename: Tensor, name: out, optional: false, intermediate: false}
no_need_buffer: null
data_transform: null
infer_meta:
func: ElementwiseInferMeta
param: [x, y]
kernel:
func: [add]
param: [x, y]
backend: null
layout: null
data_type: null
dispatch: {add: null}
force_backend: null
inplace: {out: x}
view: null
backward: add_grad
- name: add_grad
inputs:
- typename: Tensor
name: x
optional: false
no_need_buffer: true
data_transform: {}
- typename: Tensor
name: y
optional: false
no_need_buffer: true
data_transform: {}
- typename: Tensor
name: out_grad
optional: false
no_need_buffer: false
data_transform: {}
attrs:
- {typename: int, name: axis, default_value: '-1'}
outputs:
- {typename: Tensor, name: x_grad, optional: false, intermediate: false}
- {typename: Tensor, name: y_grad, optional: false, intermediate: false}
no_need_buffer: [x, y]
data_transform: null
infer_meta:
func: GeneralBinaryGradInferMeta
param: [x, y]
kernel:
func: [add_grad]
param: [x, y, out_grad, axis]
backend: null
layout: null
data_type: null
dispatch: {add_grad: null}
force_backend: null
inplace: {x_grad: out_grad}
view: null
composite: {func_name: add_grad, func_args: 'x, y, out_grad, axis, x_grad, y_grad'}
backward: add_double_grad
forward:
name: add
inputs:
- {name: x, typename: Tensor}
- {name: y, typename: Tensor}
attrs: []
outputs:
- {name: out, typename: Tensor}
......@@ -191,7 +191,7 @@ def GenBuildAttributes(
):
INTARRAY_STR_TEMPLATE = """ ir::Attribute attr_{attr_name} = {op_attribute_type}::get(ir::IrContext::Instance(), phi::IntArray({attr}));
"""
SCALAR_STR_TEMPLATE = """ ir::Attribute attr_{attr_name} = TransToIrAttribute({attr}, ir::IrContext::Instance());
SCALAR_STR_TEMPLATE = """ ir::Attribute attr_{attr_name} = paddle::dialect::TransToIrAttribute({attr}, ir::IrContext::Instance());
"""
STR_TEMPLATE = """ ir::Attribute attr_{attr_name} = {op_attribute_type}::get(ir::IrContext::Instance(), {attr});
"""
......@@ -286,7 +286,7 @@ def GenBuildOutputs(
CREATE_INPUT_METATENSOR_TEMPLATE = """
VLOG(4) << "Builder construction dense_{name}";
phi::DenseTensor dense_{name}(std::make_unique<paddle::experimental::DefaultAllocator>(paddle::platform::CPUPlace()).get(),
phi::DenseTensorMeta(TransToPhiDataType({name}.dtype()),
phi::DenseTensorMeta(paddle::dialect::TransToPhiDataType({name}.dtype()),
{name}.dims(),
{name}.data_layout(),
{name}.lod(),
......@@ -297,7 +297,7 @@ def GenBuildOutputs(
CREATE_INPUT_VEC_METATENSOR_TEMPLATE = """ std::vector<phi::DenseTensor> vec_dense_{name};
for (size_t i=0; i < static_cast<size_t>({name}.size()); i++) {{
vec_dense_{name}.push_back(phi::DenseTensor(std::make_unique<paddle::experimental::DefaultAllocator>(paddle::platform::CPUPlace()).get(),
phi::DenseTensorMeta(TransToPhiDataType({name}[i].dyn_cast<paddle::dialect::DenseTensorType>().dtype()),
phi::DenseTensorMeta(paddle::dialect::TransToPhiDataType({name}[i].dyn_cast<paddle::dialect::DenseTensorType>().dtype()),
{name}[i].dyn_cast<paddle::dialect::DenseTensorType>().dims(),
{name}[i].dyn_cast<paddle::dialect::DenseTensorType>().data_layout(),
{name}[i].dyn_cast<paddle::dialect::DenseTensorType>().lod(),
......@@ -430,13 +430,13 @@ def GenBuildOutputs(
build_output_str += "\n std::vector<ir::Type> argument_outputs;"
CREATE_OUTPUT_DENSE_TENSOR_TEMPLATE = """
ir::Type {name}_dense_tensor_type = paddle::dialect::DenseTensorType::get(ir::IrContext::Instance(), TransToIrDataType(dense_{name}.dtype()), dense_{name}.dims(), dense_{name}.layout(), dense_{name}.lod(), dense_{name}.offset());
ir::Type {name}_dense_tensor_type = paddle::dialect::DenseTensorType::get(ir::IrContext::Instance(), paddle::dialect::TransToIrDataType(dense_{name}.dtype()), dense_{name}.dims(), dense_{name}.layout(), dense_{name}.lod(), dense_{name}.offset());
argument_outputs.push_back({name}_dense_tensor_type);
"""
CREATE_OUTPUT_VEC_DENSE_TENSOR_TEMPLATE = """
std::vector<ir::Type> {name}_types;
for (size_t i=0; i < static_cast<size_t>({output_size}); i++) {{
{name}_types.push_back(paddle::dialect::DenseTensorType::get(ir::IrContext::Instance(), TransToIrDataType(vec_dense_{name}[i].dtype()), vec_dense_{name}[i].dims(), vec_dense_{name}[i].layout(), vec_dense_{name}[i].lod(), vec_dense_{name}[i].offset()));
{name}_types.push_back(paddle::dialect::DenseTensorType::get(ir::IrContext::Instance(), paddle::dialect::TransToIrDataType(vec_dense_{name}[i].dtype()), vec_dense_{name}[i].dims(), vec_dense_{name}[i].layout(), vec_dense_{name}[i].lod(), vec_dense_{name}[i].offset()));
}}
ir::Type {name}_vector_type = ir::VectorType::get(ir::IrContext::Instance(), {name}_types);
argument_outputs.push_back({name}_vector_type);
......
......@@ -97,8 +97,7 @@ op_n_attribute_declare_str = (
# String Template for cc file code gen
# =====================================
CC_FILE_TEMPLATE = """// This file is generated by "paddle/fluid/ir/dialect/op_generator/op_gen.py"
#include "paddle/fluid/ir/dialect/pd_op.h"
#include "{h_file}"
#include "paddle/fluid/ir/dialect/pd_type.h"
#include "paddle/fluid/ir/dialect/pd_attribute.h"
#include "paddle/ir/core/builtin_attribute.h"
......@@ -115,7 +114,7 @@ CC_FILE_TEMPLATE = """// This file is generated by "paddle/fluid/ir/dialect/op_g
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/fluid/primitive/rule/vjp/vjp.h"
#include "paddle/fluid/primitive/type/lazy_tensor.h"
{def_primitive}
#include "paddle/ir/core/op_base.h"
{input}
......@@ -133,18 +132,14 @@ OpInfoTuple {op_name}::GetOpInfo() {{
std::vector<paddle::dialect::OpInputInfo> inputs = {{ {inputs} }};
std::vector<paddle::dialect::OpAttributeInfo> attributes = {{ {attributes} }};
std::vector<paddle::dialect::OpOutputInfo> outputs = {{ {outputs} }};
paddle::dialect::OpRunTimeInfo run_time_info = OpRunTimeInfo("{infer_meta_func}", {{"{infer_meta_param}"}}, {{"{kernel_func}"}}, {{"{kernel_param}"}}, {{"{kernel_key_dtype}"}}, {{{inplace}}}, {{{view}}});
paddle::dialect::OpRunTimeInfo run_time_info = paddle::dialect::OpRunTimeInfo("{infer_meta_func}", {{"{infer_meta_param}"}}, {{"{kernel_func}"}}, {{"{kernel_param}"}}, {{"{kernel_key_dtype}"}}, {{{inplace}}}, {{{view}}});
return std::make_tuple(inputs, attributes, outputs, run_time_info);
}}
"""
CONSTRUCT_INPUT_INFO_TEMPLATE = """OpInputInfo("{name}", "{typename}", {optional}, {no_need_buffer}, {is_mutable_attribute})"""
CONSTRUCT_OUTPUT_INFO_TEMPLATE = (
"""OpOutputInfo("{name}", "{typename}", {optional}, {intermediate})"""
)
CONSTRUCT_ATTRIBUTE_INFO_TEMPLATE = (
"""OpAttributeInfo("{name}", "{typename}", "{data_type}")"""
)
CONSTRUCT_INPUT_INFO_TEMPLATE = """paddle::dialect::OpInputInfo("{name}", "{typename}", {optional}, {no_need_buffer}, {is_mutable_attribute})"""
CONSTRUCT_OUTPUT_INFO_TEMPLATE = """paddle::dialect::OpOutputInfo("{name}", "{typename}", {optional}, {intermediate})"""
CONSTRUCT_ATTRIBUTE_INFO_TEMPLATE = """paddle::dialect::OpAttributeInfo("{name}", "{typename}", "{data_type}")"""
DEFINE_OP_TYPE_ID = """
......@@ -751,17 +746,17 @@ def OpGenerator(
op_kernel_map = op_info.kernel_map
op_inplace_map = op_info.inplace_map
op_view_map = op_info.view_map
op_interfaces = ["OpYamlInfoInterface"]
op_interfaces = ["paddle::dialect::OpYamlInfoInterface"]
op_traits = []
if op_info.infer_meta_func:
op_interfaces += ["InferMetaInterface"]
op_interfaces += ["paddle::dialect::InferMetaInterface"]
if (
op_info.backward_name
and op_info.op_phi_name[0] in vjp_interface_declare_gen_op_list
):
op_interfaces += ["VjpInterface"]
op_interfaces += ["paddle::dialect::VjpInterface"]
exclusive_interface_str = gen_exclusive_interface_str(op_info)
# If op has inplace info, we will generate inplace op and non-inplace op.
......@@ -779,7 +774,7 @@ def OpGenerator(
op_interfaces_str = "," + ",".join(op_interfaces)
if op_name[-1] == "_":
op_traits += ["InplaceTrait"]
op_traits += ["paddle::dialect::InplaceTrait"]
op_traits_str = ""
if len(op_traits) > 0:
......@@ -1079,6 +1074,12 @@ def OpGenerator(
ops_defined_list.append(build_func_with_muta_attr_is_input)
ops_defined_list.append(op_verify_str)
ops_defined_list.append(op_infer_meta_str)
# NOTE(chenxi67)skip if dialect_name==cinn
if dialect_name == "cinn":
import logging
logging.warning("cinn is currently not support Vjp function")
else:
ops_defined_list.append(op_vjp_str)
# (4) Generate head file str
......@@ -1119,8 +1120,15 @@ def OpGenerator(
for op in ops_name_with_namespace_list:
define_type_id_str += DEFINE_OP_TYPE_ID.format(op_name=op)
# NOTE(chenxi67) Skip include this header file if dialect_name == cinn
# otherwise we may get compile error when compile with "ncclDataType_t"
def_primitive_str = "#include \"paddle/fluid/primitive/type/lazy_tensor.h\""
if dialect_name == "cinn":
def_primitive_str = ""
source_file_str = CC_FILE_TEMPLATE.format(
h_file=op_def_h_file[:-4],
def_primitive=def_primitive_str,
input=source_file_str,
define_type_id=define_type_id_str,
) # Add head
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/fluid/framework/convert_utils.h"
// #include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/ir/dialect/pd_attribute.h"
#include "paddle/fluid/ir/dialect/pd_type_storage.h"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册