diff --git a/paddle/cinn/hlir/dialect/CMakeLists.txt b/paddle/cinn/hlir/dialect/CMakeLists.txt index 897e9ab8cfb445ec86939fd694c62b862f0988b1..d7c6d787a7fb0437c5727757c2f1a150851d7ea3 100755 --- a/paddle/cinn/hlir/dialect/CMakeLists.txt +++ b/paddle/cinn/hlir/dialect/CMakeLists.txt @@ -1,6 +1,53 @@ # TODO(Aurelius84): new_ir_compiler depends on pd_dialect and could # not found under CINN_ONLY mode if(NOT CINN_ONLY) - cinn_cc_library(cinn_dialect SRCS runtime_dialect.cc jit_kernel_op.cc DEPS - pd_dialect) + set(CINN_DIALECT_SOURCE_DIR "${PADDLE_SOURCE_DIR}/paddle/cinn/hlir/dialect") + set(CINN_DIALECT_BINARY_DIR "${PADDLE_BINARY_DIR}/paddle/cinn/hlir/dialect") + + # Generate cinn_dialect files defining op using op_gen_file + set(cinn_op_gen_file + ${PADDLE_SOURCE_DIR}/paddle/fluid/ir/dialect/op_generator/op_gen.py) + + set(cinn_op_compat_yaml_file + ${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/op_compat.yaml) + + set(cinn_op_forward_yaml_file1 + ${PADDLE_SOURCE_DIR}/paddle/cinn/hlir/dialect/cinn_ops.parsed.yaml) + + set(cinn_op_yaml_files ${cinn_op_forward_yaml_file1}) + + set(cinn_op_namespace cinn,dialect) + set(cinn_dialect_name cinn) + set(cinn_op_header_file ${CINN_DIALECT_BINARY_DIR}/cinn_op.h) + set(cinn_op_source_file ${CINN_DIALECT_BINARY_DIR}/cinn_op.cc) + set(cinn_op_header_file_tmp ${cinn_op_header_file}.tmp) + set(cinn_op_source_file_tmp ${cinn_op_source_file}.tmp) + + add_custom_command( + OUTPUT ${cinn_op_header_file} ${cinn_op_source_file} + COMMAND + ${PYTHON_EXECUTABLE} ${cinn_op_gen_file} --op_yaml_files + ${cinn_op_yaml_files} --op_compat_yaml_file ${cinn_op_compat_yaml_file} + --namespaces ${cinn_op_namespace} --dialect_name ${cinn_dialect_name} + --op_def_h_file ${cinn_op_header_file_tmp} --op_def_cc_file + ${cinn_op_source_file_tmp} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${cinn_op_header_file_tmp} + ${cinn_op_header_file} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${cinn_op_source_file_tmp} + ${cinn_op_source_file} + DEPENDS ${cinn_op_gen_file} ${cinn_op_forward_yaml_file1} + ${cinn_op_compat_yaml_file} + VERBATIM) + + cinn_cc_library( + cinn_dialect + SRCS + runtime_dialect.cc + jit_kernel_op.cc + cinn_dialect.cc + cinn_op.cc + DEPS + pd_dialect) + + target_include_directories(cinn_dialect PRIVATE ${CINN_DIALECT_BINARY_DIR}) endif() diff --git a/paddle/cinn/hlir/dialect/cinn_dialect.cc b/paddle/cinn/hlir/dialect/cinn_dialect.cc new file mode 100644 index 0000000000000000000000000000000000000000..9bc6c6ff8c9c65b485ccdefb8e9928ab6480f1df --- /dev/null +++ b/paddle/cinn/hlir/dialect/cinn_dialect.cc @@ -0,0 +1,42 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/cinn/hlir/dialect/cinn_dialect.h" +// NOTE(chenxi67): File cinn_op.h is generated by op_gen.py, see details in +// paddle/cinn/hlir/dialect/CMakeLists.txt. +#include "paddle/cinn/hlir/dialect/cinn_op.h" + +namespace cinn { +namespace dialect { + +CinnDialect::CinnDialect(::ir::IrContext* context) + : ::ir::Dialect( + name(), context, ::ir::TypeId::get()) { + this->initialize(); +} + +void CinnDialect::initialize() { + // NOTE(chenxi67): GET_OP_LIST is defined in cinn_op.h which is + // generated by op_gen.py, see details in + // paddle/cinn/hlir/dialect/CMakeLists.txt. + RegisterOps< +#define GET_OP_LIST +#include "paddle/cinn/hlir/dialect/cinn_op.h" // NOLINT + >(); +} + +} // namespace dialect +} // namespace cinn + +IR_DEFINE_EXPLICIT_TYPE_ID(cinn::dialect::CinnDialect) diff --git a/paddle/cinn/hlir/dialect/cinn_dialect.h b/paddle/cinn/hlir/dialect/cinn_dialect.h new file mode 100644 index 0000000000000000000000000000000000000000..77fb96863ad3714d43b22cebb8068f1df75bf3cb --- /dev/null +++ b/paddle/cinn/hlir/dialect/cinn_dialect.h @@ -0,0 +1,35 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/ir/core/dialect.h" + +namespace cinn { +namespace dialect { + +class CinnDialect : public ::ir::Dialect { + public: + explicit CinnDialect(::ir::IrContext* context); + + static const char* name() { return "cinn"; } + + private: + void initialize(); +}; + +} // namespace dialect +} // namespace cinn + +IR_DECLARE_EXPLICIT_TYPE_ID(cinn::dialect::CinnDialect) diff --git a/paddle/cinn/hlir/dialect/cinn_ops.parsed.yaml b/paddle/cinn/hlir/dialect/cinn_ops.parsed.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6297be83e23986b871b29217738b90c258a7ac50 --- /dev/null +++ b/paddle/cinn/hlir/dialect/cinn_ops.parsed.yaml @@ -0,0 +1,78 @@ +- name: add + inputs: + - typename: Tensor + name: x + optional: false + no_need_buffer: false + data_transform: {} + - typename: Tensor + name: y + optional: false + no_need_buffer: false + data_transform: {} + attrs: [] + outputs: + - {typename: Tensor, name: out, optional: false, intermediate: false} + no_need_buffer: null + data_transform: null + infer_meta: + func: ElementwiseInferMeta + param: [x, y] + kernel: + func: [add] + param: [x, y] + backend: null + layout: null + data_type: null + dispatch: {add: null} + force_backend: null + inplace: {out: x} + view: null + backward: add_grad +- name: add_grad + inputs: + - typename: Tensor + name: x + optional: false + no_need_buffer: true + data_transform: {} + - typename: Tensor + name: y + optional: false + no_need_buffer: true + data_transform: {} + - typename: Tensor + name: out_grad + optional: false + no_need_buffer: false + data_transform: {} + attrs: + - {typename: int, name: axis, default_value: '-1'} + outputs: + - {typename: Tensor, name: x_grad, optional: false, intermediate: false} + - {typename: Tensor, name: y_grad, optional: false, intermediate: false} + no_need_buffer: [x, y] + data_transform: null + infer_meta: + func: GeneralBinaryGradInferMeta + param: [x, y] + kernel: + func: [add_grad] + param: [x, y, out_grad, axis] + backend: null + layout: null + data_type: null + dispatch: {add_grad: null} + force_backend: null + inplace: {x_grad: out_grad} + view: null + composite: {func_name: add_grad, func_args: 'x, y, out_grad, axis, x_grad, y_grad'} + backward: add_double_grad + forward: + name: add + inputs: + - {name: x, typename: Tensor} + - {name: y, typename: Tensor} + attrs: [] + outputs: + - {name: out, typename: Tensor} diff --git a/paddle/fluid/ir/dialect/op_generator/op_build_gen.py b/paddle/fluid/ir/dialect/op_generator/op_build_gen.py index f4d91d5c06821f29cd0788fce1e6751337748815..a59948bcb3c6d3677a72a24eadcf3b6b75bc7eda 100644 --- a/paddle/fluid/ir/dialect/op_generator/op_build_gen.py +++ b/paddle/fluid/ir/dialect/op_generator/op_build_gen.py @@ -191,7 +191,7 @@ def GenBuildAttributes( ): INTARRAY_STR_TEMPLATE = """ ir::Attribute attr_{attr_name} = {op_attribute_type}::get(ir::IrContext::Instance(), phi::IntArray({attr})); """ - SCALAR_STR_TEMPLATE = """ ir::Attribute attr_{attr_name} = TransToIrAttribute({attr}, ir::IrContext::Instance()); + SCALAR_STR_TEMPLATE = """ ir::Attribute attr_{attr_name} = paddle::dialect::TransToIrAttribute({attr}, ir::IrContext::Instance()); """ STR_TEMPLATE = """ ir::Attribute attr_{attr_name} = {op_attribute_type}::get(ir::IrContext::Instance(), {attr}); """ @@ -286,7 +286,7 @@ def GenBuildOutputs( CREATE_INPUT_METATENSOR_TEMPLATE = """ VLOG(4) << "Builder construction dense_{name}"; phi::DenseTensor dense_{name}(std::make_unique(paddle::platform::CPUPlace()).get(), - phi::DenseTensorMeta(TransToPhiDataType({name}.dtype()), + phi::DenseTensorMeta(paddle::dialect::TransToPhiDataType({name}.dtype()), {name}.dims(), {name}.data_layout(), {name}.lod(), @@ -297,7 +297,7 @@ def GenBuildOutputs( CREATE_INPUT_VEC_METATENSOR_TEMPLATE = """ std::vector vec_dense_{name}; for (size_t i=0; i < static_cast({name}.size()); i++) {{ vec_dense_{name}.push_back(phi::DenseTensor(std::make_unique(paddle::platform::CPUPlace()).get(), - phi::DenseTensorMeta(TransToPhiDataType({name}[i].dyn_cast().dtype()), + phi::DenseTensorMeta(paddle::dialect::TransToPhiDataType({name}[i].dyn_cast().dtype()), {name}[i].dyn_cast().dims(), {name}[i].dyn_cast().data_layout(), {name}[i].dyn_cast().lod(), @@ -430,13 +430,13 @@ def GenBuildOutputs( build_output_str += "\n std::vector argument_outputs;" CREATE_OUTPUT_DENSE_TENSOR_TEMPLATE = """ - ir::Type {name}_dense_tensor_type = paddle::dialect::DenseTensorType::get(ir::IrContext::Instance(), TransToIrDataType(dense_{name}.dtype()), dense_{name}.dims(), dense_{name}.layout(), dense_{name}.lod(), dense_{name}.offset()); + ir::Type {name}_dense_tensor_type = paddle::dialect::DenseTensorType::get(ir::IrContext::Instance(), paddle::dialect::TransToIrDataType(dense_{name}.dtype()), dense_{name}.dims(), dense_{name}.layout(), dense_{name}.lod(), dense_{name}.offset()); argument_outputs.push_back({name}_dense_tensor_type); """ CREATE_OUTPUT_VEC_DENSE_TENSOR_TEMPLATE = """ std::vector {name}_types; for (size_t i=0; i < static_cast({output_size}); i++) {{ - {name}_types.push_back(paddle::dialect::DenseTensorType::get(ir::IrContext::Instance(), TransToIrDataType(vec_dense_{name}[i].dtype()), vec_dense_{name}[i].dims(), vec_dense_{name}[i].layout(), vec_dense_{name}[i].lod(), vec_dense_{name}[i].offset())); + {name}_types.push_back(paddle::dialect::DenseTensorType::get(ir::IrContext::Instance(), paddle::dialect::TransToIrDataType(vec_dense_{name}[i].dtype()), vec_dense_{name}[i].dims(), vec_dense_{name}[i].layout(), vec_dense_{name}[i].lod(), vec_dense_{name}[i].offset())); }} ir::Type {name}_vector_type = ir::VectorType::get(ir::IrContext::Instance(), {name}_types); argument_outputs.push_back({name}_vector_type); diff --git a/paddle/fluid/ir/dialect/op_generator/op_gen.py b/paddle/fluid/ir/dialect/op_generator/op_gen.py index da1d7cbdde0900f4dd36225bf8acee6366408c86..423b47ae44ed0c81413c540b36b79bc6ac54ac71 100644 --- a/paddle/fluid/ir/dialect/op_generator/op_gen.py +++ b/paddle/fluid/ir/dialect/op_generator/op_gen.py @@ -97,8 +97,7 @@ op_n_attribute_declare_str = ( # String Template for cc file code gen # ===================================== CC_FILE_TEMPLATE = """// This file is generated by "paddle/fluid/ir/dialect/op_generator/op_gen.py" - -#include "paddle/fluid/ir/dialect/pd_op.h" +#include "{h_file}" #include "paddle/fluid/ir/dialect/pd_type.h" #include "paddle/fluid/ir/dialect/pd_attribute.h" #include "paddle/ir/core/builtin_attribute.h" @@ -115,7 +114,7 @@ CC_FILE_TEMPLATE = """// This file is generated by "paddle/fluid/ir/dialect/op_g #include "paddle/phi/infermeta/backward.h" #include "paddle/phi/api/lib/utils/allocator.h" #include "paddle/fluid/primitive/rule/vjp/vjp.h" -#include "paddle/fluid/primitive/type/lazy_tensor.h" +{def_primitive} #include "paddle/ir/core/op_base.h" {input} @@ -133,18 +132,14 @@ OpInfoTuple {op_name}::GetOpInfo() {{ std::vector inputs = {{ {inputs} }}; std::vector attributes = {{ {attributes} }}; std::vector outputs = {{ {outputs} }}; - paddle::dialect::OpRunTimeInfo run_time_info = OpRunTimeInfo("{infer_meta_func}", {{"{infer_meta_param}"}}, {{"{kernel_func}"}}, {{"{kernel_param}"}}, {{"{kernel_key_dtype}"}}, {{{inplace}}}, {{{view}}}); + paddle::dialect::OpRunTimeInfo run_time_info = paddle::dialect::OpRunTimeInfo("{infer_meta_func}", {{"{infer_meta_param}"}}, {{"{kernel_func}"}}, {{"{kernel_param}"}}, {{"{kernel_key_dtype}"}}, {{{inplace}}}, {{{view}}}); return std::make_tuple(inputs, attributes, outputs, run_time_info); }} """ -CONSTRUCT_INPUT_INFO_TEMPLATE = """OpInputInfo("{name}", "{typename}", {optional}, {no_need_buffer}, {is_mutable_attribute})""" -CONSTRUCT_OUTPUT_INFO_TEMPLATE = ( - """OpOutputInfo("{name}", "{typename}", {optional}, {intermediate})""" -) -CONSTRUCT_ATTRIBUTE_INFO_TEMPLATE = ( - """OpAttributeInfo("{name}", "{typename}", "{data_type}")""" -) +CONSTRUCT_INPUT_INFO_TEMPLATE = """paddle::dialect::OpInputInfo("{name}", "{typename}", {optional}, {no_need_buffer}, {is_mutable_attribute})""" +CONSTRUCT_OUTPUT_INFO_TEMPLATE = """paddle::dialect::OpOutputInfo("{name}", "{typename}", {optional}, {intermediate})""" +CONSTRUCT_ATTRIBUTE_INFO_TEMPLATE = """paddle::dialect::OpAttributeInfo("{name}", "{typename}", "{data_type}")""" DEFINE_OP_TYPE_ID = """ @@ -751,17 +746,17 @@ def OpGenerator( op_kernel_map = op_info.kernel_map op_inplace_map = op_info.inplace_map op_view_map = op_info.view_map - op_interfaces = ["OpYamlInfoInterface"] + op_interfaces = ["paddle::dialect::OpYamlInfoInterface"] op_traits = [] if op_info.infer_meta_func: - op_interfaces += ["InferMetaInterface"] + op_interfaces += ["paddle::dialect::InferMetaInterface"] if ( op_info.backward_name and op_info.op_phi_name[0] in vjp_interface_declare_gen_op_list ): - op_interfaces += ["VjpInterface"] + op_interfaces += ["paddle::dialect::VjpInterface"] exclusive_interface_str = gen_exclusive_interface_str(op_info) # If op has inplace info, we will generate inplace op and non-inplace op. @@ -779,7 +774,7 @@ def OpGenerator( op_interfaces_str = "," + ",".join(op_interfaces) if op_name[-1] == "_": - op_traits += ["InplaceTrait"] + op_traits += ["paddle::dialect::InplaceTrait"] op_traits_str = "" if len(op_traits) > 0: @@ -1079,7 +1074,13 @@ def OpGenerator( ops_defined_list.append(build_func_with_muta_attr_is_input) ops_defined_list.append(op_verify_str) ops_defined_list.append(op_infer_meta_str) - ops_defined_list.append(op_vjp_str) + # NOTE(chenxi67)skip if dialect_name==cinn + if dialect_name == "cinn": + import logging + + logging.warning("cinn is currently not support Vjp function") + else: + ops_defined_list.append(op_vjp_str) # (4) Generate head file str op_namespaces_prev = "" @@ -1119,8 +1120,15 @@ def OpGenerator( for op in ops_name_with_namespace_list: define_type_id_str += DEFINE_OP_TYPE_ID.format(op_name=op) + # NOTE(chenxi67) Skip include this header file if dialect_name == cinn + # otherwise we may get compile error when compile with "ncclDataType_t" + def_primitive_str = "#include \"paddle/fluid/primitive/type/lazy_tensor.h\"" + if dialect_name == "cinn": + def_primitive_str = "" + source_file_str = CC_FILE_TEMPLATE.format( h_file=op_def_h_file[:-4], + def_primitive=def_primitive_str, input=source_file_str, define_type_id=define_type_id_str, ) # Add head diff --git a/paddle/fluid/ir/dialect/utils.h b/paddle/fluid/ir/dialect/utils.h index 2cc1c653fc0effc0e1a40b2d43997a40cbbdcef6..9b5a79c7a4433f4d24c5a19798a8fef7395439be 100644 --- a/paddle/fluid/ir/dialect/utils.h +++ b/paddle/fluid/ir/dialect/utils.h @@ -14,7 +14,7 @@ #pragma once -#include "paddle/fluid/framework/convert_utils.h" +// #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/ir/dialect/pd_attribute.h" #include "paddle/fluid/ir/dialect/pd_type_storage.h"