未验证 提交 852d7a12 编写于 作者: H hong 提交者: GitHub

[NewIR] Fix new ir unsqueeze op bug (#55212)

* suport optional input in new_ir

* polish code

* add coverate test

* update

* update

* add unitest

* remove reduplicate code

* udpate

* fix assign error

* revert test arg min max

* update

* fix bug

* polish code

* update

* fix unique and close op bug

* update

* update

* revert test code

* revert unique test

* polish code

* remove useless code

---------
Co-authored-by: Nzhangbo9674 <zhangbo54@baidu.com>
上级 f4bdfa60
...@@ -178,6 +178,8 @@ void HandleForSpecialOp(ir::Operation* op, ...@@ -178,6 +178,8 @@ void HandleForSpecialOp(ir::Operation* op,
auto var = CreateVar(out_value, name, scope, local_scope); auto var = CreateVar(out_value, name, scope, local_scope);
auto tensor_array = var->GetMutable<paddle::framework::TensorRefArray>(); auto tensor_array = var->GetMutable<paddle::framework::TensorRefArray>();
// clear tensor array
tensor_array->clear();
for (size_t i = 0; i < input_num; ++i) { for (size_t i = 0; i < input_num; ++i) {
auto value = op->operand(i); auto value = op->operand(i);
...@@ -203,9 +205,11 @@ void HandleForSpecialOp(ir::Operation* op, ...@@ -203,9 +205,11 @@ void HandleForSpecialOp(ir::Operation* op,
// change opreand name to param_name // change opreand name to param_name
auto orig_name = name_map->at(in_ptr); auto orig_name = name_map->at(in_ptr);
(*name_map)[in_ptr] = param_name; if (scope->FindVar(param_name) == nullptr) {
scope->Rename(orig_name, param_name); scope->Rename(orig_name, param_name);
} }
(*name_map)[in_ptr] = param_name;
}
if (op_name == "builtin.get_parameter") { if (op_name == "builtin.get_parameter") {
VLOG(6) << "Handle for builtin.get_parameter:"; VLOG(6) << "Handle for builtin.get_parameter:";
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include "paddle/phi/core/kernel_context.h" #include "paddle/phi/core/kernel_context.h"
#include "paddle/fluid/ir/dialect/kernel_attribute.h" #include "paddle/fluid/ir/dialect/kernel_attribute.h"
#include "paddle/fluid/ir/dialect/kernel_type.h"
#include "paddle/fluid/ir/dialect/pd_attribute.h" #include "paddle/fluid/ir/dialect/pd_attribute.h"
#include "paddle/fluid/ir/interface/op_yaml_info_parser.h" #include "paddle/fluid/ir/interface/op_yaml_info_parser.h"
#include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/core/infermeta_utils.h"
...@@ -109,6 +110,7 @@ void BuildPhiContext( ...@@ -109,6 +110,7 @@ void BuildPhiContext(
ctx->EmplaceBackInput(in_ptr); ctx->EmplaceBackInput(in_ptr);
continue; continue;
} }
auto in_var_name = name_map.at(ptr); auto in_var_name = name_map.at(ptr);
VLOG(6) << "ctx->EmplaceBackInput: " << t << "\t" << in_var_name; VLOG(6) << "ctx->EmplaceBackInput: " << t << "\t" << in_var_name;
...@@ -154,9 +156,27 @@ void BuildPhiContext( ...@@ -154,9 +156,27 @@ void BuildPhiContext(
auto& tensor_attr_type = op_yaml_info.TensorAttrTypeName(t); auto& tensor_attr_type = op_yaml_info.TensorAttrTypeName(t);
VLOG(6) << "ctx->EmplaceBack mutable attr: " << t << "\t" << in_var_name; VLOG(6) << "ctx->EmplaceBack mutable attr: " << t << "\t" << in_var_name;
if (tensor_attr_type == "paddle::dialect::IntArrayAttribute") { if (tensor_attr_type == "paddle::dialect::IntArrayAttribute") {
if (ptr.type().isa<paddle::dialect::AllocatedDenseTensorType>()) {
phi::Attribute r1 = phi::TensorRef( phi::Attribute r1 = phi::TensorRef(
&(inner_scope->FindVar(in_var_name)->Get<phi::DenseTensor>())); &(inner_scope->FindVar(in_var_name)->Get<phi::DenseTensor>()));
ctx->EmplaceBackAttr(r1); ctx->EmplaceBackAttr(r1);
} else if (ptr.type().isa<ir::VectorType>()) {
auto& tensor_array = inner_scope->FindVar(in_var_name)
->Get<paddle::framework::TensorRefArray>();
if (tensor_array.size() == 1) {
ctx->EmplaceBackAttr(phi::TensorRef(tensor_array[0]));
} else {
std::vector<phi::TensorRef> vec_ref;
for (size_t i = 0; i < tensor_array.size(); ++i) {
vec_ref.emplace_back(phi::TensorRef(tensor_array[i]));
}
ctx->EmplaceBackAttr(vec_ref);
}
} else {
PADDLE_THROW(phi::errors::Unimplemented(
" [%s] only support dense tensor and vector type ",
tensor_attr_type));
}
} else if (tensor_attr_type == "paddle::dialect::ScalarAttribute") { } else if (tensor_attr_type == "paddle::dialect::ScalarAttribute") {
phi::Attribute r1 = phi::TensorRef( phi::Attribute r1 = phi::TensorRef(
&(inner_scope->FindVar(in_var_name)->Get<phi::DenseTensor>())); &(inner_scope->FindVar(in_var_name)->Get<phi::DenseTensor>()));
......
...@@ -81,6 +81,7 @@ class OpNameNormalizer { ...@@ -81,6 +81,7 @@ class OpNameNormalizer {
const std::string& arg_name) { const std::string& arg_name) {
bool is_grad_op = (op_type.find(kPhiGradSuffix) != std::string::npos); bool is_grad_op = (op_type.find(kPhiGradSuffix) != std::string::npos);
bool is_grad_arg = (arg_name.find(kPhiGradSuffix) != std::string::npos); bool is_grad_arg = (arg_name.find(kPhiGradSuffix) != std::string::npos);
if (is_grad_op && is_grad_arg) { if (is_grad_op && is_grad_arg) {
std::string target = kPhiGradSuffix; std::string target = kPhiGradSuffix;
std::string data = kFluidVarGradSuffix; std::string data = kFluidVarGradSuffix;
......
...@@ -622,6 +622,7 @@ void OpTranscriber::RecordOpResultMapping(TranslationContext* param_map, ...@@ -622,6 +622,7 @@ void OpTranscriber::RecordOpResultMapping(TranslationContext* param_map,
generated_by_vector = false; generated_by_vector = false;
} }
} }
(*param_map)[arg_name] = VariableDefiningInfo( (*param_map)[arg_name] = VariableDefiningInfo(
value, generated_by_vector, generated_by_vector ? idx_in_vector : -1); value, generated_by_vector, generated_by_vector ? idx_in_vector : -1);
idx_in_vector++; idx_in_vector++;
......
...@@ -2530,6 +2530,7 @@ ...@@ -2530,6 +2530,7 @@
int_array: int_array:
sections : sections :
data_type : int data_type : int
tensor_name : AxesTensor
- op : sqrt - op : sqrt
backward : sqrt_grad, sqrt_double_grad (sqrt_grad_grad) backward : sqrt_grad, sqrt_double_grad (sqrt_grad_grad)
......
...@@ -43,6 +43,56 @@ IntArrayBase<phi::DenseTensor>::IntArrayBase( ...@@ -43,6 +43,56 @@ IntArrayBase<phi::DenseTensor>::IntArrayBase(
} }
} }
template <>
IntArrayBase<phi::DenseTensor>::IntArrayBase(
const std::vector<phi::TensorRef>& tensor_ref_list) {
is_from_tensor_ = true;
for (size_t i = 0; i < tensor_ref_list.size(); ++i) {
DataType data_type = tensor_ref_list[i].Get()->dtype();
switch (data_type) {
case DataType::INT32:
if (tensor_ref_list[i].Get()->place().GetType() ==
AllocationType::CPU) {
array_.push_back(*tensor_ref_list[i].Get()->template data<int32_t>());
} else {
phi::DenseTensor tensor_tmp;
phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance();
auto dev_ctx = pool.Get(tensor_ref_list[i].Get()->place());
phi::Copy(*dev_ctx,
*(tensor_ref_list[i].Get()),
CPUPlace(),
true,
&tensor_tmp);
array_.push_back(*tensor_tmp.template data<int32_t>());
}
break;
case DataType::INT64:
if (tensor_ref_list[i].Get()->place().GetType() ==
AllocationType::CPU) {
array_.push_back(*tensor_ref_list[i].Get()->template data<int64_t>());
} else {
phi::DenseTensor tensor_tmp;
phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance();
auto dev_ctx = pool.Get(tensor_ref_list[i].Get()->place());
phi::Copy(*dev_ctx,
*(tensor_ref_list[i].Get()),
CPUPlace(),
true,
&tensor_tmp);
array_.push_back(*tensor_tmp.template data<int64_t>());
}
break;
default:
PD_THROW(
"Data type error. Currently, The data type of IntArrayBase "
"only supports Tensor with int32 and int64, "
"but now received `",
data_type,
"`.");
}
}
}
template <> template <>
IntArrayBase<phi::DenseTensor>::IntArrayBase( IntArrayBase<phi::DenseTensor>::IntArrayBase(
const std::vector<phi::DenseTensor>& tensor_list) { const std::vector<phi::DenseTensor>& tensor_list) {
......
...@@ -18,6 +18,7 @@ limitations under the License. */ ...@@ -18,6 +18,7 @@ limitations under the License. */
#include "paddle/phi/api/ext/exception.h" #include "paddle/phi/api/ext/exception.h"
#include "paddle/phi/common/data_type.h" #include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/tensor_ref.h"
namespace phi { namespace phi {
class DDim; class DDim;
...@@ -62,6 +63,8 @@ class IntArrayBase { ...@@ -62,6 +63,8 @@ class IntArrayBase {
// The Tensor in vec must have only one element // The Tensor in vec must have only one element
IntArrayBase(const std::vector<T>& tensor_list); // NOLINT IntArrayBase(const std::vector<T>& tensor_list); // NOLINT
explicit IntArrayBase(const std::vector<phi::TensorRef>& tensor_ref_list);
template <typename OtherT> template <typename OtherT>
IntArrayBase(const IntArrayBase<OtherT>& other) : array_(other.GetData()) {} IntArrayBase(const IntArrayBase<OtherT>& other) : array_(other.GetData()) {}
...@@ -87,6 +90,7 @@ class IntArrayBase { ...@@ -87,6 +90,7 @@ class IntArrayBase {
void AssignDataFromTensor(const T& tensor) { void AssignDataFromTensor(const T& tensor) {
size_t n = tensor.numel(); size_t n = tensor.numel();
array_.reserve(n); array_.reserve(n);
switch (tensor.dtype()) { switch (tensor.dtype()) {
case DataType::INT32: case DataType::INT32:
......
...@@ -21,10 +21,10 @@ ...@@ -21,10 +21,10 @@
#include "paddle/phi/api/ext/exception.h" #include "paddle/phi/api/ext/exception.h"
#include "paddle/phi/common/data_type.h" #include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/enforce.h" #include "paddle/phi/core/enforce.h"
namespace phi { namespace phi {
class TensorBase;
// In static model pre analysis, we can't get the data from tensor // In static model pre analysis, we can't get the data from tensor
class TensorRef { class TensorRef {
......
...@@ -48,7 +48,8 @@ using Attribute = paddle::variant<bool, ...@@ -48,7 +48,8 @@ using Attribute = paddle::variant<bool,
DataType, DataType,
DataLayout, DataLayout,
Place, Place,
TensorRef>; TensorRef,
std::vector<TensorRef>>;
using AttributeMap = paddle::flat_hash_map<std::string, Attribute>; using AttributeMap = paddle::flat_hash_map<std::string, Attribute>;
......
...@@ -159,6 +159,8 @@ template const DataType& InferMetaContext::AttrAt(size_t idx) const; ...@@ -159,6 +159,8 @@ template const DataType& InferMetaContext::AttrAt(size_t idx) const;
template const DataLayout& InferMetaContext::AttrAt(size_t idx) const; template const DataLayout& InferMetaContext::AttrAt(size_t idx) const;
template const Place& InferMetaContext::AttrAt(size_t idx) const; template const Place& InferMetaContext::AttrAt(size_t idx) const;
template const TensorRef& InferMetaContext::AttrAt(size_t idx) const; template const TensorRef& InferMetaContext::AttrAt(size_t idx) const;
template const std::vector<TensorRef>& InferMetaContext::AttrAt(
size_t idx) const;
MetaFnFactory& MetaFnFactory::Instance() { MetaFnFactory& MetaFnFactory::Instance() {
static MetaFnFactory g_meta_fn_map; static MetaFnFactory g_meta_fn_map;
......
...@@ -118,8 +118,7 @@ class InferMetaContext { ...@@ -118,8 +118,7 @@ class InferMetaContext {
} \ } \
} }
#define PD_SPECIALIZE_InferMetaFnCallHelper_FOR_TENSOR_SCALAR_INTARRAY( \ #define PD_SPECIALIZE_InferMetaFnCallHelper_FOR_TENSOR_SCALAR(attr_type) \
attr_type) \
template <typename... Tail> \ template <typename... Tail> \
struct InferMetaFnCallHelper<const attr_type&, Tail...> { \ struct InferMetaFnCallHelper<const attr_type&, Tail...> { \
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs> \ template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs> \
...@@ -141,6 +140,32 @@ class InferMetaContext { ...@@ -141,6 +140,32 @@ class InferMetaContext {
} \ } \
} }
#define PD_SPECIALIZE_InferMetaFnCallHelper_FOR_TENSOR_INTARRAY(attr_type) \
template <typename... Tail> \
struct InferMetaFnCallHelper<const attr_type&, Tail...> { \
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs> \
static void Call(InferMetaContext* ctx, PreviousArgs&... pargs) { \
static_assert(out_idx == 0, \
"InferMeta's Attributes should appear before Outputs."); \
const Attribute& t = ctx->AttrAt(attr_idx); \
static Attribute cmp_t = phi::TensorRef(nullptr); \
static Attribute vec_ref = \
std::vector<phi::TensorRef>({phi::TensorRef(nullptr)}); \
attr_type attr1; \
if (cmp_t.index() == t.index()) { \
attr1 = attr_type((*paddle::get<phi::TensorRef>(t).Get())); \
} else if (vec_ref.index() == t.index()) { \
attr1 = attr_type(paddle::get<std::vector<phi::TensorRef>>(t)); \
} else { \
attr1 = paddle::get<attr_type>(t); \
} \
InferMetaFnCallHelper< \
Tail...>::template Call<in_idx, attr_idx + 1, out_idx>(ctx, \
pargs..., \
attr1); \
} \
}
template <typename T> template <typename T>
struct InferMetaTypeTag {}; struct InferMetaTypeTag {};
...@@ -222,8 +247,8 @@ struct InferMetaFnImpl<Return (*)(Args...), infer_meta_fn> { ...@@ -222,8 +247,8 @@ struct InferMetaFnImpl<Return (*)(Args...), infer_meta_fn> {
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(Backend); PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(Backend);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(DataLayout); PD_SPECIALIZE_InferMetaFnCallHelper_FOR_ATTRIBUTE(DataLayout);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_CONST_ATTRIBUTE_REF(std::string); PD_SPECIALIZE_InferMetaFnCallHelper_FOR_CONST_ATTRIBUTE_REF(std::string);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_TENSOR_SCALAR_INTARRAY(Scalar); PD_SPECIALIZE_InferMetaFnCallHelper_FOR_TENSOR_SCALAR(Scalar);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_TENSOR_SCALAR_INTARRAY(IntArray); PD_SPECIALIZE_InferMetaFnCallHelper_FOR_TENSOR_INTARRAY(IntArray);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_CONST_ATTRIBUTE_REF( PD_SPECIALIZE_InferMetaFnCallHelper_FOR_CONST_ATTRIBUTE_REF(
std::vector<bool>); std::vector<bool>);
PD_SPECIALIZE_InferMetaFnCallHelper_FOR_CONST_ATTRIBUTE_REF(std::vector<int>); PD_SPECIALIZE_InferMetaFnCallHelper_FOR_CONST_ATTRIBUTE_REF(std::vector<int>);
......
...@@ -147,5 +147,6 @@ template const DataType& KernelContext::AttrAt(size_t idx) const; ...@@ -147,5 +147,6 @@ template const DataType& KernelContext::AttrAt(size_t idx) const;
template const DataLayout& KernelContext::AttrAt(size_t idx) const; template const DataLayout& KernelContext::AttrAt(size_t idx) const;
template const Place& KernelContext::AttrAt(size_t idx) const; template const Place& KernelContext::AttrAt(size_t idx) const;
template const TensorRef& KernelContext::AttrAt(size_t idx) const; template const TensorRef& KernelContext::AttrAt(size_t idx) const;
template const std::vector<TensorRef>& KernelContext::AttrAt(size_t idx) const;
} // namespace phi } // namespace phi
...@@ -221,7 +221,7 @@ namespace phi { ...@@ -221,7 +221,7 @@ namespace phi {
} \ } \
} }
#define PD_SPECIALIZE_KernelCallHelper_FOR_TENSOR_SCALAR_INTARRAY(attr_type) \ #define PD_SPECIALIZE_KernelCallHelper_FOR_TENSOR_SCALAR(attr_type) \
template <typename... Tail> \ template <typename... Tail> \
struct KernelCallHelper<const attr_type&, Tail...> { \ struct KernelCallHelper<const attr_type&, Tail...> { \
template <int dev_ctx_idx, \ template <int dev_ctx_idx, \
...@@ -246,6 +246,34 @@ namespace phi { ...@@ -246,6 +246,34 @@ namespace phi {
} \ } \
} }
#define PD_SPECIALIZE_KernelCallHelper_FOR_TENSOR_INTARRAY(attr_type) \
template <typename... Tail> \
struct KernelCallHelper<const attr_type&, Tail...> { \
template <int dev_ctx_idx, \
int in_idx, \
int attr_idx, \
int out_idx, \
typename... PreviousArgs> \
static void Compute(KernelContext* ctx, PreviousArgs&... pargs) { \
static_assert(out_idx == 0, \
"Kernel's Attributes should appear before Outputs."); \
const Attribute& t = ctx->AttrAt(attr_idx); \
static Attribute cmp_t = phi::TensorRef(nullptr); \
static Attribute vec_ref = \
std::vector<phi::TensorRef>({phi::TensorRef(nullptr)}); \
attr_type attr1; \
if (cmp_t.index() == t.index()) { \
attr1 = attr_type(*paddle::get<phi::TensorRef>(t).Get()); \
} else if (vec_ref.index() == t.index()) { \
attr1 = attr_type(paddle::get<std::vector<phi::TensorRef>>(t)); \
} else { \
attr1 = paddle::get<attr_type>(t); \
} \
KernelCallHelper<Tail...>:: \
template Compute<dev_ctx_idx, in_idx, attr_idx + 1, out_idx>( \
ctx, pargs..., attr1); \
} \
}
template <typename T> template <typename T>
struct TypeTag {}; struct TypeTag {};
...@@ -325,8 +353,8 @@ struct KernelImpl<Return (*)(DevCtx, Args...), kernel_fn> { ...@@ -325,8 +353,8 @@ struct KernelImpl<Return (*)(DevCtx, Args...), kernel_fn> {
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(DataLayout); PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(DataLayout);
PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(Place); PD_SPECIALIZE_KernelCallHelper_FOR_ATTRIBUTE(Place);
PD_SPECIALIZE_KernelCallHelper_FOR_CONST_ATTRIBUTE_REF(std::string); PD_SPECIALIZE_KernelCallHelper_FOR_CONST_ATTRIBUTE_REF(std::string);
PD_SPECIALIZE_KernelCallHelper_FOR_TENSOR_SCALAR_INTARRAY(Scalar); PD_SPECIALIZE_KernelCallHelper_FOR_TENSOR_SCALAR(Scalar);
PD_SPECIALIZE_KernelCallHelper_FOR_TENSOR_SCALAR_INTARRAY(IntArray); PD_SPECIALIZE_KernelCallHelper_FOR_TENSOR_INTARRAY(IntArray);
PD_SPECIALIZE_KernelCallHelper_FOR_CONST_ATTRIBUTE_REF(std::vector<bool>); PD_SPECIALIZE_KernelCallHelper_FOR_CONST_ATTRIBUTE_REF(std::vector<bool>);
PD_SPECIALIZE_KernelCallHelper_FOR_CONST_ATTRIBUTE_REF(std::vector<int>); PD_SPECIALIZE_KernelCallHelper_FOR_CONST_ATTRIBUTE_REF(std::vector<int>);
PD_SPECIALIZE_KernelCallHelper_FOR_CONST_ATTRIBUTE_REF(std::vector<int64_t>); PD_SPECIALIZE_KernelCallHelper_FOR_CONST_ATTRIBUTE_REF(std::vector<int64_t>);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册