未验证 提交 4c01763c 编写于 作者: 王明冬 提交者: GitHub

[infrt] move pd_ops.td to pd floder. test=develop (#40613)

上级 add304ed
......@@ -52,12 +52,12 @@ tools/__pycache__
# This file is automatically generated.
# TODO(zhiqiang) Move this file to build directory.
paddle/infrt/dialect/pd_ops.td
paddle/infrt/dialect/pd/ir/pd_ops.td
paddle/infrt/dialect/phi/ir/phi_cpu_kernels.td
paddle/infrt/dialect/phi/ir/phi_gpu_kernels.td
tools/infrt/kernels.json
tools/infrt/kernel_signature.json
paddle/infrt/dialect/pd_ops_info.h
paddle/infrt/dialect/pd/common/pd_ops_info.h
.lit_test_times.txt
paddle/infrt/tests/dialect/Output
paddle/infrt/tests/lit.cfg.py
......
......@@ -102,7 +102,6 @@ set(infrt_mlir_incs
test_kernels_inc
tensor_shape_inc
dense_tensor_inc
pd_ops_inc
pd_extra_ops_inc
trt_ops_inc
)
......
......@@ -11,11 +11,6 @@ gather_srcs(infrt_src SRCS
mlir_tablegen_on(tensor_shape DIALECT ts)
mlir_tablegen_on(dense_tensor DIALECT dt)
mlir_tablegen_on(pd_op_base DIALECT pd)
mlir_tablegen_on(pd_ops)
mlir_tablegen_on(pd_extra_ops)
mlir_add_rewriter(rewrite)
# TODO(Superjomn) add a cmake function cc_executable to ecapsulate the following code
add_executable(infrtopt opt.cc)
......@@ -23,7 +18,6 @@ target_link_libraries(infrtopt infrt)
add_executable(print-ir print_ir.cc)
target_link_libraries(print-ir infrt ${mlir_libs})
add_dependencies(print-ir pd_ops_inc)
cc_test_tiny(test_infrt_mlir_loader SRCS mlir_loader_test.cc DEPS infrt ${MLIR_IR_LIBS})
add_subdirectory(infrt)
......
......@@ -3,7 +3,7 @@
include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/infrt/ir/infrt_ops.td"
include "paddle/infrt/dialect/pd_ops.td"
include "paddle/infrt/dialect/pd/ir/pd_ops.td"
def FuseTensorCastPattern : Pat<
(Infrt_TensorCastOp (Infrt_TensorCastOp $arg)),
......
......@@ -3,3 +3,5 @@ core_gather_headers()
gather_srcs(infrt_src SRCS
pd_ops.cc
)
add_mlir_dialect(pd_ops pd)
mlir_tablegen_on(pd_extra_ops)
......@@ -4,7 +4,7 @@
include "mlir/Interfaces/InferTypeOpInterface.td"
include "mlir/Interfaces/LoopLikeInterface.td"
include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/pd_op_base.td"
include "paddle/infrt/dialect/pd/ir/pd_op_base.td"
def PD_FusedFC : PD_Op<"FC", [NoSideEffect]> {
let summary = "Computes the Fully Connected result of two tensors";
......
......@@ -8,7 +8,7 @@ include "mlir/IR/OpBase.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
def PD_Dialect : Dialect {
def Paddle_Dialect : Dialect {
let name = "pd";
let description = [{
......@@ -16,12 +16,12 @@ def PD_Dialect : Dialect {
This dialect contains the PaddlePaddle operators.
}];
let hasConstantMaterializer = 1;
let cppNamespace = "mlir::pd";
}
class PD_Op<string mnemonic, list<OpTrait> traits = []> :
Op<PD_Dialect, mnemonic, traits>;
Op<Paddle_Dialect, mnemonic, traits>;
class PD_PaddleAttr <string name, string description> :
......
......@@ -17,24 +17,22 @@
#include <mlir/IR/Matchers.h>
#include <mlir/IR/PatternMatch.h>
#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/pd/ir/pd_opsDialect.cpp.inc"
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_ops.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/pd/ir/pd_ops.cpp.inc" // NOLINT
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_extra_ops.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/pd/ir/pd_extra_ops.cpp.inc" // NOLINT
namespace mlir {
namespace pd {
#include "paddle/infrt/dialect/rewrite.cpp.inc" // NOLINT
PaddleDialect::PaddleDialect(MLIRContext *context)
: Dialect("pd", context, TypeID::get<PaddleDialect>()) {
void PaddleDialect::initialize() {
addOperations<
#define GET_OP_LIST
#include "paddle/infrt/dialect/pd_ops.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/pd/ir/pd_ops.cpp.inc" // NOLINT
,
#define GET_OP_LIST
#include "paddle/infrt/dialect/pd_extra_ops.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/pd/ir/pd_extra_ops.cpp.inc" // NOLINT
>();
}
......@@ -73,106 +71,5 @@ mlir::OpFoldResult ConstantOp::fold(
::llvm::ArrayRef<mlir::Attribute> operands) {
return value();
}
/*
LogicalResult ElementwiseAdd::inferReturnTypes(
MLIRContext *context,
Optional<Location> location,
ValueRange operands,
DictionaryAttr attributes,
RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
inferredReturnTypes.push_back(operands[0].getType());
return success();
}
*/
void Elementwise_addOp::getCanonicalizationPatterns(
mlir::OwningRewritePatternList &results, mlir::MLIRContext *context) {
results.insert<FuseMulAdd>(context);
}
/*
mlir::OpFoldResult ElementwiseAdd::fold(
llvm::ArrayRef<mlir::Attribute> operands) {
if (getElementTypeOrSelf(getType()).isa<FloatType>()) {
if (!operands[0] || !operands[1]) return {};
DenseElementsAttr lhs = operands[0].dyn_cast<DenseElementsAttr>();
DenseElementsAttr rhs = operands[1].dyn_cast<DenseElementsAttr>();
if (!lhs || !rhs) return {};
ShapedType type = getType().template cast<ShapedType>();
if (!type.hasStaticShape()) return {};
Type etype = type.getElementType();
if (!etype.isa<FloatType>()) return {};
SmallVector<APFloat, 6> values;
values.reserve(lhs.getNumElements());
for (const auto zip :
llvm::zip(lhs.getValues<APFloat>(), rhs.getValues<APFloat>())) {
values.push_back(
std::plus<APFloat>()(std::get<0>(zip), std::get<1>(zip)));
}
return DenseElementsAttr::get(type, values);
}
return {};
}
LogicalResult ElementwiseDiv::inferReturnTypes(
MLIRContext *context,
Optional<Location> location,
ValueRange operands,
DictionaryAttr attributes,
RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
inferredReturnTypes.push_back(operands[0].getType());
return success();
}
LogicalResult ElementwiseMul::inferReturnTypes(
MLIRContext *context,
Optional<Location> location,
ValueRange operands,
DictionaryAttr attributes,
RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
inferredReturnTypes.push_back(operands[0].getType());
return success();
}
LogicalResult ElementwiseSub::inferReturnTypes(
MLIRContext *context,
Optional<Location> location,
ValueRange operands,
DictionaryAttr attributes,
RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
inferredReturnTypes.push_back(operands[0].getType());
return success();
}
LogicalResult MulOp::inferReturnTypes(
MLIRContext *context,
Optional<Location> location,
ValueRange operands,
DictionaryAttr attributes,
RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
inferredReturnTypes.push_back(operands[0].getType());
return success();
}
void ReluOp::getCanonicalizationPatterns(
mlir::OwningRewritePatternList &results, mlir::MLIRContext *context) {
results.insert<FuseFCRelu>(context);
}
void FusedRepeatedFCRelu::getCanonicalizationPatterns(
mlir::OwningRewritePatternList &results, mlir::MLIRContext *context) {
results.insert<FuseRepeatedFCRelu2>(context);
}
void BatchNormOp::getCanonicalizationPatterns(
mlir::OwningRewritePatternList &results, mlir::MLIRContext *context) {
results.insert<FuseBatchNormWithConvPattern>(context);
}*/
} // namespace pd
} // namespace mlir
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
......@@ -14,49 +14,20 @@
#pragma once
#include <mlir/Dialect/Traits.h>
#include <mlir/IR/Attributes.h>
#include <mlir/IR/Builders.h>
#include <mlir/IR/BuiltinOps.h>
//===----------------------------------------------------------------------===//
// Dialect
//===----------------------------------------------------------------------===//
#include <llvm/ADT/StringMap.h>
#include <mlir/IR/BuiltinTypes.h>
#include <mlir/IR/Dialect.h>
#include <mlir/IR/Matchers.h>
#include <mlir/IR/OpDefinition.h>
#include <mlir/IR/OpImplementation.h>
#include <mlir/IR/TypeUtilities.h>
#include <mlir/Interfaces/CallInterfaces.h>
#include <mlir/Interfaces/DerivedAttributeOpInterface.h>
#include <mlir/Interfaces/InferTypeOpInterface.h>
#include <mlir/Interfaces/LoopLikeInterface.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
namespace mlir {
namespace pd {
class PaddleDialect : public Dialect {
public:
explicit PaddleDialect(MLIRContext* context);
static StringRef getDialectNamespace() { return "pd"; }
/// A hook used to materialize constant values with the given type.
Operation* materializeConstant(OpBuilder& builder,
Attribute value,
Type type,
Location loc) override;
Type parseType(DialectAsmParser& parser) const override {
return Dialect::parseType(parser);
}
void printType(Type type, DialectAsmPrinter& printer) const override {
Dialect::printType(type, printer);
}
};
} // namespace pd
} // namespace mlir
#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/pd/ir/pd_opsDialect.h.inc"
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_ops.hpp.inc"
#include "paddle/infrt/dialect/pd/ir/pd_ops.h.inc"
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_extra_ops.hpp.inc"
#include "paddle/infrt/dialect/pd/ir/pd_extra_ops.hpp.inc"
core_gather_headers()
gather_srcs(infrt_src SRCS
pd_op_fuse_pass.cc
)
mlir_add_rewriter(pd_op_fuse)
......@@ -3,8 +3,8 @@
include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/pd_ops.td"
include "paddle/infrt/dialect/pd_extra_ops.td"
include "paddle/infrt/dialect/pd/ir/pd_ops.td"
include "paddle/infrt/dialect/pd/ir/pd_extra_ops.td"
//===----------------------------------------------------------------------===//
// This is to fuse the composition: 'Matmul o ElementwiseAdd' into 'PD_FusedFC'.
......
......@@ -11,12 +11,13 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/pd/pass/pd_op_fuse_pass.h" // NOLINT
#include <mlir/Pass/Pass.h>
#include <mlir/Transforms/GreedyPatternRewriteDriver.h>
#include "paddle/infrt/dialect/pd/ir/pd_ops.h"
namespace {
#include "paddle/infrt/dialect/rewrite.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/pd/pass/pd_op_fuse.cpp.inc" // NOLINT
/*
* PdOpFusePass.
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <mlir/Pass/Pass.h>
namespace infrt {
/*
* PdOpFusePass.
*/
std::unique_ptr<mlir::Pass> CreatePdOpFusePass();
} // namespace infrt
......@@ -3,7 +3,7 @@
include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
include "paddle/infrt/dialect/pd_ops.td"
include "paddle/infrt/dialect/pd/ir/pd_ops.td"
include "paddle/infrt/dialect/tensorrt/trt_ops.td"
def PD2TRT_Matmul_Lower : Pat<
......
......@@ -14,20 +14,20 @@
#ifndef PADDLE_INFRT_HOST_CONTEXT_PADDLE_MLIR_H_
#define PADDLE_INFRT_HOST_CONTEXT_PADDLE_MLIR_H_
#include <llvm/Support/CommandLine.h>
#include <mlir/Dialect/StandardOps/IR/Ops.h>
#include <mlir/IR/AsmState.h>
#include <mlir/IR/Builders.h>
#include <mlir/IR/BuiltinOps.h>
#include <mlir/IR/MLIRContext.h>
#include <fstream>
#include <iostream>
#include <string>
#include "llvm/Support/CommandLine.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/AsmState.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "paddle/infrt/common/global.h"
#include "paddle/infrt/common/string.h"
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/init_dialects.h"
#include "paddle/infrt/dialect/pd/ir/pd_ops.h"
#include "paddle/infrt/dialect/tensor_shape.h"
......
// RUN: infrtopt --canonicalize %s | FileCheck %s
// RUN: infrtopt --pd-op-fuse %s | FileCheck %s
// CHECK-LABEL: @main
func @main() -> tensor<?xf32> {
%a = "pd.feed"() {name="input0"} : () -> tensor<?xf32>
......
......@@ -23,16 +23,6 @@ def PD_FetchOp : PD_Op<"fetch", [Terminator]> {
let arguments = (ins PD_Tensor :$inputs, StrAttr:$name);
}
def PD_ReturnOp : PD_Op<"return", [Terminator]> {
let summary = "return Op";
let description = [{
Fetch tensor from the graph.
}];
let arguments = (ins Variadic<PD_Tensor>:$inputs);
}
def PD_GraphOp : PD_Op<"graph", [SingleBlockImplicitTerminator<"::infrt::ReturnOp">]> {
let summary = "paddle graph Op";
let description = [{
......
......@@ -16,8 +16,6 @@ import paddle.fluid.framework as framework
from paddle.fluid import core
from paddle import compat as cpt
ops_having_canonicalization = {"elementwise_add", }
# collect original ops: op which has both inference and grid defination
def get_original_ops():
......@@ -195,7 +193,7 @@ def generate_all_ops_inputs_outputs_map(op_descs):
# funtion to generate paddle op dialect file
def convert_op_proto_into_mlir(op_descs):
dst_dialect_file = "../../paddle/infrt/dialect/pd_ops.td"
dst_dialect_file = "../../paddle/infrt/dialect/pd/ir/pd_ops.td"
custom_dialect_file = "custom_pdop.td"
# 1. Head files
......@@ -214,7 +212,7 @@ def convert_op_proto_into_mlir(op_descs):
"include \"mlir/Interfaces/InferTypeOpInterface.td\"",
"include \"mlir/Interfaces/LoopLikeInterface.td\"",
"include \"mlir/IR/OpBase.td\"",
"include \"paddle/infrt/dialect/pd_op_base.td\"",
"include \"paddle/infrt/dialect/pd/ir/pd_op_base.td\"",
"",
]
......@@ -245,7 +243,6 @@ def convert_op_proto_into_mlir(op_descs):
op_type=op_type,
left_brace="{")
SUMMARY = ' let summary = "{} op";\n'.format(op_type)
CANONICALIZATION = "let hasCanonicalizer = 1;" if op_type in ops_having_canonicalization else ""
# 2.2 Description
contents = ""
......@@ -348,7 +345,6 @@ def convert_op_proto_into_mlir(op_descs):
ops_mlir_file.write(DESCRIPTION)
ops_mlir_file.write(ARGUMENTS)
ops_mlir_file.write(RESULTS)
ops_mlir_file.write(CANONICALIZATION)
ops_mlir_file.write("}\n")
print("Skipped ops num: " + str(len(skipped_op_list)))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册