未验证 提交 75c4a24c 编写于 作者: Z zhangbo9674 提交者: GitHub

[IR] Add IrMetaTensor (#56973)

* add meta tensor

* refine code

* fix bug

* fix bug
上级 1b7c1c56
...@@ -16,6 +16,7 @@ limitations under the License. */ ...@@ -16,6 +16,7 @@ limitations under the License. */
#include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/raw_tensor.h" #include "paddle/fluid/framework/raw_tensor.h"
#include "paddle/fluid/framework/string_array.h" #include "paddle/fluid/framework/string_array.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.h"
#include "paddle/fluid/prim/utils/static/desc_tensor.h" #include "paddle/fluid/prim/utils/static/desc_tensor.h"
#include "paddle/fluid/primitive/type/lazy_tensor.h" #include "paddle/fluid/primitive/type/lazy_tensor.h"
...@@ -44,5 +45,6 @@ template class TypeInfoTraits<phi::TensorBase, paddle::prim::DescTensor>; ...@@ -44,5 +45,6 @@ template class TypeInfoTraits<phi::TensorBase, paddle::prim::DescTensor>;
template class TypeInfoTraits<phi::TensorBase, paddle::primitive::LazyTensor>; template class TypeInfoTraits<phi::TensorBase, paddle::primitive::LazyTensor>;
template class TypeInfoTraits<phi::TensorBase, template class TypeInfoTraits<phi::TensorBase,
paddle::framework::VariableRefArray>; paddle::framework::VariableRefArray>;
template class TypeInfoTraits<phi::TensorBase, paddle::dialect::IrMetaTensor>;
} // namespace phi } // namespace phi
...@@ -286,27 +286,25 @@ def GenBuildOutputs( ...@@ -286,27 +286,25 @@ def GenBuildOutputs(
build_output_str = ' VLOG(4) << "Builder construction outputs";\n' build_output_str = ' VLOG(4) << "Builder construction outputs";\n'
CREATE_INPUT_METATENSOR_TEMPLATE = """ CREATE_INPUT_METATENSOR_TEMPLATE = """
VLOG(4) << "Builder construction dense_{name}"; VLOG(4) << "Builder construction dense_{name}";
phi::DenseTensor dense_{name}(std::make_unique<paddle::experimental::DefaultAllocator>(paddle::platform::CPUPlace()).get(), paddle::dialect::IrMetaTensor ir_meta_tensor_{name}(paddle::dialect::TransToPhiDataType({name}.dtype()),
phi::DenseTensorMeta(paddle::dialect::TransToPhiDataType({name}.dtype()),
{name}.dims(), {name}.dims(),
{name}.data_layout(), {name}.data_layout(),
{name}.lod(), {name}.lod(),
{name}.offset())); {name}.offset());
VLOG(4) << "Builder construction meta_{name}"; VLOG(4) << "Builder construction meta_{name}";
phi::MetaTensor meta_{name}(&dense_{name}); phi::MetaTensor meta_{name}(&ir_meta_tensor_{name});
""" """
CREATE_INPUT_VEC_METATENSOR_TEMPLATE = """ std::vector<phi::DenseTensor> vec_dense_{name}; CREATE_INPUT_VEC_METATENSOR_TEMPLATE = """ std::vector<paddle::dialect::IrMetaTensor> vec_ir_meta_tensor_{name};
for (size_t i=0; i < static_cast<size_t>({name}.size()); i++) {{ for (size_t i=0; i < static_cast<size_t>({name}.size()); i++) {{
vec_dense_{name}.push_back(phi::DenseTensor(std::make_unique<paddle::experimental::DefaultAllocator>(paddle::platform::CPUPlace()).get(), vec_ir_meta_tensor_{name}.push_back(paddle::dialect::IrMetaTensor(paddle::dialect::TransToPhiDataType({name}[i].dyn_cast<paddle::dialect::DenseTensorType>().dtype()),
phi::DenseTensorMeta(paddle::dialect::TransToPhiDataType({name}[i].dyn_cast<paddle::dialect::DenseTensorType>().dtype()),
{name}[i].dyn_cast<paddle::dialect::DenseTensorType>().dims(), {name}[i].dyn_cast<paddle::dialect::DenseTensorType>().dims(),
{name}[i].dyn_cast<paddle::dialect::DenseTensorType>().data_layout(), {name}[i].dyn_cast<paddle::dialect::DenseTensorType>().data_layout(),
{name}[i].dyn_cast<paddle::dialect::DenseTensorType>().lod(), {name}[i].dyn_cast<paddle::dialect::DenseTensorType>().lod(),
{name}[i].dyn_cast<paddle::dialect::DenseTensorType>().offset()))); {name}[i].dyn_cast<paddle::dialect::DenseTensorType>().offset()));
}} }}
std::vector<phi::MetaTensor> vec_meta_{name}; std::vector<phi::MetaTensor> vec_meta_{name};
for (size_t i=0; i < vec_dense_{name}.size(); i++) {{ for (size_t i=0; i < vec_ir_meta_tensor_{name}.size(); i++) {{
vec_meta_{name}.push_back(phi::MetaTensor(&vec_dense_{name}[i])); vec_meta_{name}.push_back(phi::MetaTensor(&vec_ir_meta_tensor_{name}[i]));
}} }}
std::vector<const phi::MetaTensor*> meta_{name}; std::vector<const phi::MetaTensor*> meta_{name};
......
...@@ -101,6 +101,7 @@ CC_FILE_TEMPLATE = """// This file is generated by "paddle/fluid/ir/dialect/op_g ...@@ -101,6 +101,7 @@ CC_FILE_TEMPLATE = """// This file is generated by "paddle/fluid/ir/dialect/op_g
#include "{h_file}" #include "{h_file}"
#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h" #include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_type.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h" #include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_attribute.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.h"
#include "paddle/ir/core/builtin_attribute.h" #include "paddle/ir/core/builtin_attribute.h"
#include "paddle/ir/core/builtin_type.h" #include "paddle/ir/core/builtin_type.h"
#include "paddle/ir/core/builtin_op.h" #include "paddle/ir/core/builtin_op.h"
......
...@@ -183,7 +183,7 @@ add_custom_target(ops_api_gen ALL DEPENDS ${ops_api_source_file}) ...@@ -183,7 +183,7 @@ add_custom_target(ops_api_gen ALL DEPENDS ${ops_api_source_file})
cc_library( cc_library(
pd_dialect_core pd_dialect_core
SRCS pd_attribute.cc pd_type.cc SRCS pd_attribute.cc pd_type.cc pd_meta_tensor.cc
DEPS phi pd_interface pd_trait type_info) DEPS phi pd_interface pd_trait type_info)
cc_library( cc_library(
pd_dialect_op pd_dialect_op
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.h"
#include "paddle/ir/core/enforce.h"
namespace paddle {
namespace dialect {
IrMetaTensor::IrMetaTensor(phi::DataType dtype,
const phi::DDim& dims,
phi::DataLayout layout,
const LoD& lod,
size_t offset)
: dims_(dims), dtype_(dtype), layout_(layout), lod_(lod), offset_(offset) {}
IrMetaTensor::IrMetaTensor(const IrMetaTensor& other) {
dims_ = other.dims();
dtype_ = other.dtype();
layout_ = other.layout();
lod_ = other.lod();
offset_ = other.offset();
}
IrMetaTensor& IrMetaTensor::operator=(const IrMetaTensor& other) {
dims_ = other.dims();
dtype_ = other.dtype();
layout_ = other.layout();
lod_ = other.lod();
offset_ = other.offset();
return *this;
}
IrMetaTensor& IrMetaTensor::operator=(IrMetaTensor&& other) noexcept {
dims_ = std::move(other.dims());
dtype_ = other.dtype();
layout_ = other.layout();
lod_ = std::move(other.lod());
offset_ = other.offset();
return *this;
}
int64_t IrMetaTensor::numel() const { return phi::product(dims_); }
const phi::Place& IrMetaTensor::place() const {
IR_THROW("Don't use IrMetaTensor::place method.");
}
void* IrMetaTensor::AllocateFrom(phi::Allocator* allocator,
phi::DataType dtype,
size_t requested_size,
bool fake_alloc) {
IR_THROW("Don't use IrMetaTensor::AllocateFrom method.");
}
} // namespace dialect
} // namespace paddle
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/allocator.h"
#include "paddle/phi/core/tensor_base.h"
#include "paddle/phi/core/tensor_meta.h"
namespace paddle {
namespace dialect {
using LoD = std::vector<std::vector<size_t>>;
class IrMetaTensor : public phi::TensorBase,
public phi::TypeInfoTraits<phi::TensorBase, IrMetaTensor> {
public:
IrMetaTensor(phi::DataType dtype,
const phi::DDim& dims,
phi::DataLayout layout,
const LoD& lod,
size_t offset = 0);
IrMetaTensor(IrMetaTensor&& other) = default;
IrMetaTensor(const IrMetaTensor& other);
IrMetaTensor& operator=(const IrMetaTensor& other);
IrMetaTensor& operator=(IrMetaTensor&& other) noexcept;
virtual ~IrMetaTensor() = default;
public:
static const char* name() { return "IrMetaTensor"; }
int64_t numel() const override;
const phi::DDim& dims() const noexcept override { return dims_; }
const phi::Place& place() const override;
phi::DataType dtype() const noexcept override { return dtype_; }
phi::DataLayout layout() const noexcept override { return layout_; }
const LoD& lod() const noexcept { return lod_; }
size_t offset() const noexcept { return offset_; }
bool valid() const noexcept override { return true; }
bool initialized() const override { return true; }
void* AllocateFrom(phi::Allocator* allocator,
phi::DataType dtype,
size_t requested_size = 0,
bool fake_alloc = false) override;
private:
phi::DDim dims_;
phi::DataType dtype_{phi::DataType::UNDEFINED};
phi::DataLayout layout_{phi::DataLayout::NCHW};
LoD lod_;
size_t offset_{0};
};
} // namespace dialect
} // namespace paddle
...@@ -16,6 +16,7 @@ limitations under the License. */ ...@@ -16,6 +16,7 @@ limitations under the License. */
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/distributed/auto_parallel/dist_tensor.h" #include "paddle/phi/core/distributed/auto_parallel/dist_tensor.h"
#include "paddle/phi/core/enforce.h" #include "paddle/phi/core/enforce.h"
...@@ -271,6 +272,8 @@ const LoD& MetaTensor::lod() const { ...@@ -271,6 +272,8 @@ const LoD& MetaTensor::lod() const {
return static_cast<SparseCooTensor*>(tensor_)->non_zero_elements().lod(); return static_cast<SparseCooTensor*>(tensor_)->non_zero_elements().lod();
} else if (phi::SparseCsrTensor::classof(tensor_)) { } else if (phi::SparseCsrTensor::classof(tensor_)) {
return static_cast<SparseCsrTensor*>(tensor_)->non_zero_elements().lod(); return static_cast<SparseCsrTensor*>(tensor_)->non_zero_elements().lod();
} else if (paddle::dialect::IrMetaTensor::classof(tensor_)) {
return static_cast<paddle::dialect::IrMetaTensor*>(tensor_)->lod();
} else { } else {
PADDLE_THROW(phi::errors::Unimplemented("Unsupported getting lod of `%s`.", PADDLE_THROW(phi::errors::Unimplemented("Unsupported getting lod of `%s`.",
tensor_->type_info().name())); tensor_->type_info().name()));
......
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#include <string> #include <string>
#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_meta_tensor.h"
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/custom/custom_context.h" #include "paddle/phi/backends/custom/custom_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
...@@ -50,6 +51,7 @@ template class TypeInfoTraits<phi::TensorBase, SparseCsrTensor>; ...@@ -50,6 +51,7 @@ template class TypeInfoTraits<phi::TensorBase, SparseCsrTensor>;
template class TypeInfoTraits<phi::TensorBase, StringTensor>; template class TypeInfoTraits<phi::TensorBase, StringTensor>;
template class TypeInfoTraits<phi::TensorBase, TensorArray>; template class TypeInfoTraits<phi::TensorBase, TensorArray>;
template class TypeInfoTraits<phi::TensorBase, phi::distributed::DistTensor>; template class TypeInfoTraits<phi::TensorBase, phi::distributed::DistTensor>;
template class TypeInfoTraits<phi::TensorBase, paddle::dialect::IrMetaTensor>;
template class TypeInfoTraits<phi::DeviceContext, CPUContext>; template class TypeInfoTraits<phi::DeviceContext, CPUContext>;
template class TypeInfoTraits<phi::DeviceContext, CustomContext>; template class TypeInfoTraits<phi::DeviceContext, CustomContext>;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册