未验证 提交 14425c06 编写于 作者: Z zhangbo9674 提交者: GitHub

[IR] Refine OP auto code gen (#54186)

* refine auto gen

* refine code

* refine code

* fix bug

* fix bug

* fix bug

* fix bug

* fix bug

* fix bug
上级 4bcb5cc4
......@@ -8,16 +8,17 @@ set(op_forward_yaml_file1
${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/ops.parsed.yaml
)
set(op_forward_yaml_file2
${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/static_ops.parsed.yaml
${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/legacy_ops.parsed.yaml
)
set(op_backward_yaml_file1
${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/backward_ops.parsed.yaml
)
set(op_backward_yaml_file2
${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/static_backward.parsed.yaml
${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/legacy_backward_ops.parsed.yaml
)
set(op_yaml_file3 ${PADDLE_SOURCE_DIR}/paddle/fluid/dialect/pd_op.yaml)
set(op_yaml_files
${op_forward_yaml_file1},${op_forward_yaml_file2},${op_backward_yaml_file1},${op_backward_yaml_file2}
${op_forward_yaml_file1},${op_forward_yaml_file2},${op_backward_yaml_file1},${op_backward_yaml_file2},${op_yaml_file3}
)
set(op_namespace paddle,dialect)
set(dialect_name pd)
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/ir/core/op_base.h"
namespace paddle {
namespace dialect {
#define OPNAME(op_name) "pd." #op_name
#define REIGSTER_EMPTY_OP(op_name, className) \
class className : public ir::Op<className> { \
public: \
static const char *name() { return OPNAME(op_name); } \
static constexpr const char **attributes_name = nullptr; \
static constexpr uint32_t attributes_num = 0; \
static void verify(const std::vector<ir::OpResult> &inputs, \
const std::vector<ir::Type> &outputs, \
const ir::AttributeMap &attributes) { \
LOG(WARNING) << "This is a fake verify"; \
} \
};
// TODO(zhangbo): As operators are supplemented and defined, they are gradually
// removed.
REIGSTER_EMPTY_OP(conv2d, Conv2DOp); // To be customized: conv2d
REIGSTER_EMPTY_OP(feed, FeedOp); // To be customized: feed
REIGSTER_EMPTY_OP(batch_norm, BatchNormOp); // To be customized: batch_norm
REIGSTER_EMPTY_OP(batch_norm_, BatchNormOp_); // To be customized: batch_norm_
REIGSTER_EMPTY_OP(elementwise_add,
ElementwiseAddOp); // To be customized: add (elementwise_add)
REIGSTER_EMPTY_OP(pool2d, Pool2DOp); // To be customized: pool2d
REIGSTER_EMPTY_OP(
flatten_contiguous_range,
FlattenContiguousRangeOp); // flatten (flatten_contiguous_range)
REIGSTER_EMPTY_OP(matmul_v2,
MatmulV2Op); // To be customized: matmul (matmul_v2)
REIGSTER_EMPTY_OP(reshape2, Reshape2Op); // To be customized: reshape
REIGSTER_EMPTY_OP(softmax_with_cross_entropy,
SoftmaxWithCrossEntropyOp); // cross_entropy_with_softmax
// (softmax_with_cross_entropy)
REIGSTER_EMPTY_OP(reduce_mean,
ReduceMeanOp); // To be customized: mean (reduce_mean)
REIGSTER_EMPTY_OP(top_k_v2, TopKV2Op); // topk (top_k_v2)
REIGSTER_EMPTY_OP(fill_constant,
FillConstantOp); // To be customized: full (fill_constant)
REIGSTER_EMPTY_OP(reduce_mean_grad,
ReduceMeanGradOp); // To be customized: reduce_mean_grad
REIGSTER_EMPTY_OP(
softmax_with_cross_entropy_grad,
SoftmaxWithCrossEntropyGradOp); // cross_entropy_with_softmax_grad
// (softmax_with_cross_entropy_grad)
REIGSTER_EMPTY_OP(
elementwise_add_grad,
ElementwiseAddGradOp); // To be customized: add_grad (elementwise_add_grad)
REIGSTER_EMPTY_OP(
matmul_v2_grad,
MatmulV2GradOp); // To be customized: matmul_grad (matmul_v2_grad)
REIGSTER_EMPTY_OP(
flatten_contiguous_range_grad,
FlattenContiguousRangeGradOp); // flatten_grad
// (flatten_contiguous_range_grad)
REIGSTER_EMPTY_OP(pool2d_grad, Pool2DGradOp); // To be customized: pool2d_grad
REIGSTER_EMPTY_OP(batch_norm_grad,
BatchNormGradOp); // To be customized: batch_norm_grad
REIGSTER_EMPTY_OP(conv2d_grad, Conv2DGradOp); // To be customized: conv2d_grad
REIGSTER_EMPTY_OP(sum, SumOp); // To be customized: sum(reduce_sum)
REIGSTER_EMPTY_OP(fetch_v2, FetchV2Op); // To be customized: fetch_v2
REIGSTER_EMPTY_OP(add, AddOp);
REIGSTER_EMPTY_OP(add_grad, AddGradOp);
REIGSTER_EMPTY_OP(matmul, MatMulOp);
REIGSTER_EMPTY_OP(matmul_grad, MatMulGradOp);
REIGSTER_EMPTY_OP(reshape, ReshapeOp);
REIGSTER_EMPTY_OP(reshape_grad, ReshapeGradOp);
REIGSTER_EMPTY_OP(mean, MeanOp);
REIGSTER_EMPTY_OP(cross_entropy_with_softmax, CrossEntropyOp);
REIGSTER_EMPTY_OP(cross_entropy_with_softmax_grad, CrossEntropyGradOp);
REIGSTER_EMPTY_OP(topk, TopKOp);
REIGSTER_EMPTY_OP(topk_grad, TopKGradOp);
REIGSTER_EMPTY_OP(full, FullOp);
REIGSTER_EMPTY_OP(add_n, AddNOp);
} // namespace dialect
} // namespace paddle
此差异已折叠。
......@@ -16,7 +16,6 @@
#include "paddle/fluid/dialect/pd_attribute.h"
// NOTE(zhangbo9674): File pd_op.h is generated by op_gen.py, see details in
// paddle/fluid/dialect/CMakeLists.txt.
#include "paddle/fluid/dialect/legacy_pd_op.h"
#include "paddle/fluid/dialect/pd_op.h"
#include "paddle/fluid/dialect/pd_type.h"
#include "paddle/fluid/dialect/pd_type_storage.h"
......@@ -111,42 +110,6 @@ void PaddleDialect::initialize() {
>();
RegisterInterfaces<ParameterConvertInterface>();
RegisterOps<Conv2DOp,
FeedOp,
BatchNormOp,
BatchNormOp_,
ElementwiseAddOp,
Pool2DOp,
FlattenContiguousRangeOp,
MatmulV2Op,
Reshape2Op,
SoftmaxWithCrossEntropyOp,
ReduceMeanOp,
TopKV2Op,
FillConstantOp,
ReduceMeanGradOp,
SoftmaxWithCrossEntropyGradOp,
ElementwiseAddGradOp,
MatmulV2GradOp,
FlattenContiguousRangeGradOp,
Pool2DGradOp,
BatchNormGradOp,
Conv2DGradOp,
SumOp,
FetchV2Op,
AddOp,
MatMulOp,
ReshapeOp,
CrossEntropyOp,
TopKOp,
FullOp,
MeanOp,
AddNOp,
AddGradOp,
MatMulGradOp,
ReshapeGradOp,
CrossEntropyGradOp,
TopKGradOp>();
}
void PaddleDialect::PrintType(ir::Type type, std::ostream &os) {
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/dialect/utils.h"
#include "paddle/ir/core/op_base.h"
using OpInfoTuple = std::tuple<std::vector<paddle::dialect::OpInputInfo>,
std::vector<paddle::dialect::OpAttributeInfo>,
std::vector<paddle::dialect::OpOutputInfo>>;
namespace paddle {
namespace dialect {
class GetOpInfoInterface : public ir::OpInterfaceBase<GetOpInfoInterface> {
public:
struct Concept {
explicit Concept(OpInfoTuple (*get_op_info)(ir::Operation *))
: get_op_info_(get_op_info) {}
OpInfoTuple (*get_op_info_)(ir::Operation *);
};
template <class ConcreteOp>
struct Model : public Concept {
static OpInfoTuple GetOpInfo(ir::Operation *op) {
ConcreteOp concret_op = op->dyn_cast<ConcreteOp>();
if (concret_op == nullptr) throw("concret_op is nullptr");
return concret_op.GetOpInfo();
}
Model() : Concept(GetOpInfo) {}
};
GetOpInfoInterface(ir::Operation *op, Concept *impl)
: ir::OpInterfaceBase<GetOpInfoInterface>(op), impl_(impl) {}
OpInfoTuple GetOpInfo() { return impl_->get_op_info_(operation()); }
private:
Concept *impl_;
};
} // namespace dialect
} // namespace paddle
- name: feed
inputs:
- typename: Tensor[]
name: x
optional: false
no_need_buffer: false
data_transform: {}
attrs:
- {typename: int, name: col}
outputs:
- {typename: Tensor, name: out, optional: false, intermediate: false}
no_need_buffer: null
data_transform: null
infer_meta:
func: null
param: null
kernel:
func: null
param: null
backend: null
layout: null
data_type: null
dispatch: null
force_backend: null
inplace: null
backward: null
- name: fetch
inputs:
- typename: Tensor
name: x
optional: false
no_need_buffer: false
data_transform: {}
attrs:
- {typename: int, name: col}
outputs:
- {typename: 'Tensor[]', name: out, optional: false, intermediate: false}
no_need_buffer: null
data_transform: null
infer_meta:
func: null
param: null
kernel:
func: null
param: null
backend: null
layout: null
data_type: null
dispatch: null
force_backend: null
inplace: null
backward: null
......@@ -132,5 +132,45 @@ inline DenseTensorTypeStorage::DataLayout TransToIrDataLayout(
}
}
struct OpInputInfo {
std::string name;
std::string type_name;
bool optional = false;
bool no_need_buffer = false;
OpInputInfo(std::string name,
std::string type_name,
bool optional,
bool no_need_buffer)
: name(name),
type_name(type_name),
optional(optional),
no_need_buffer(no_need_buffer) {}
};
struct OpOutputInfo {
std::string name;
std::string type_name;
bool optional = false;
bool intermediate = false;
OpOutputInfo(std::string name,
std::string type_name,
bool optional,
bool intermediate)
: name(name),
type_name(type_name),
optional(optional),
intermediate(intermediate) {}
};
struct OpAttributeInfo {
std::string name;
std::string type_name;
std::string data_type;
OpAttributeInfo(std::string name,
std::string type_name,
std::string data_type)
: name(name), type_name(type_name), data_type(data_type) {}
};
} // namespace dialect
} // namespace paddle
......@@ -296,7 +296,7 @@
- op : einsum
args : (Tensor[] x, str equation)
output : Tensor, Tensor[]{x.size()}, Tensor[]{x.size()}
output : Tensor(out), Tensor[](inner_cache){x.size()}, Tensor[](xshape){x.size()}
infer_meta :
func : EinsumRawInferMeta
param : [x, equation]
......
......@@ -50,10 +50,7 @@ class InferShapeInterface : public ir::OpInterfaceBase<InferShapeInterface> {
concret_op.InferShape();
}
Model() : Concept(InferShape) {
static_assert(sizeof(Model) == sizeof(Concept),
"sizeof(Model) != sizeof(Concept)");
}
Model() : Concept(InferShape) {}
};
InferShapeInterface(ir::Operation *op, Concept *impl)
......
......@@ -15,6 +15,7 @@
#include <gtest/gtest.h>
#include "paddle/fluid/dialect/pd_dialect.h"
#include "paddle/fluid/dialect/pd_interface.h"
#include "paddle/fluid/dialect/pd_type.h"
#include "paddle/fluid/dialect/utils.h"
#include "paddle/ir/core/builtin_attribute.h"
......@@ -177,7 +178,21 @@ TEST(program_test, program) {
EXPECT_EQ(*(dst_tensor->data<float>() + i), data_a[i] + data_b[i]);
}
// (7) Def SetParameterOp(c, "c")
// (7) Def AbsOp(b)
ir::OpInfo abs_info = ctx->GetRegisteredOpInfo("pd.abs");
std::vector<ir::OpResult> operands = {op1->GetResultByIndex(0)};
std::unordered_map<std::string, ir::Attribute> abs_op_attribute;
std::vector<ir::Type> output_types = {dense_tensor_dtype};
ir::OperationArgument abs_argument(abs_info);
abs_argument.addOperands(operands.begin(), operands.end());
abs_argument.addAttributes(abs_op_attribute.begin(), abs_op_attribute.end());
abs_argument.addTypes(output_types.begin(), output_types.end());
ir::Operation *abs_op = ir::Operation::create(std::move(abs_argument));
paddle::dialect::GetOpInfoInterface interface =
abs_op->dyn_cast<paddle::dialect::GetOpInfoInterface>();
EXPECT_EQ(std::get<0>(interface.GetOpInfo())[0].name == "x", true);
// (8) Def SetParameterOp(c, "c")
std::string op4_name =
builtin_dialect->name() + "." + std::string(ir::SetParameterOp::name());
ir::OpInfo op4_info = ctx->GetRegisteredOpInfo(op4_name);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册