未验证 提交 cf9291b9 编写于 作者: 石晓伟 提交者: GitHub

mlir attr types for infrt place (2nd PR), test=develop (#40349)

上级 9262a93c
...@@ -11,6 +11,7 @@ limitations under the License. */ ...@@ -11,6 +11,7 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/infrt/backends/host/phi_allocator.h"
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
namespace infrt { namespace infrt {
...@@ -20,6 +21,14 @@ class CpuPhiContext : public phi::CPUContext { ...@@ -20,6 +21,14 @@ class CpuPhiContext : public phi::CPUContext {
public: public:
using Base = phi::CPUContext; using Base = phi::CPUContext;
using phi::CPUContext::SetEigenDevice; using phi::CPUContext::SetEigenDevice;
CpuPhiContext() {
Init();
SetAllocator(alloc_.get());
}
private:
std::unique_ptr<phi::Allocator> alloc_{std::make_unique<CpuPhiAllocator>()};
}; };
} // namespace backends } // namespace backends
......
...@@ -10,3 +10,6 @@ target_link_libraries(phi-ir-exec infrt) ...@@ -10,3 +10,6 @@ target_link_libraries(phi-ir-exec infrt)
add_executable(phi-exec phi_exec.cc) add_executable(phi-exec phi_exec.cc)
target_link_libraries(phi-exec infrt) target_link_libraries(phi-exec infrt)
gather_srcs(infrt_src SRCS
data_type.cc)
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/phi/data_type.h"
namespace infrt {
phi::Backend cvtTarget2Phi(TargetType target) {
switch (target) {
case TargetType::CPU:
return phi::Backend::CPU;
case TargetType::GPU:
return phi::Backend::GPU;
default:
return phi::Backend::UNDEFINED;
}
}
TargetType cvtTargetFromPhi(phi::Backend backend) {
switch (backend) {
case phi::Backend::CPU:
return TargetType::CPU;
case phi::Backend::GPU:
return TargetType::GPU;
default:
return TargetType::UNK;
}
}
phi::DataType cvtPrecision2Phi(PrecisionType precision) {
#define CONVERT_PRECISION_TO_PHI(Precision) \
case PrecisionType::Precision: \
return phi::DataType::Precision;
switch (precision) {
CONVERT_PRECISION_TO_PHI(FLOAT32)
CONVERT_PRECISION_TO_PHI(FLOAT16)
CONVERT_PRECISION_TO_PHI(FLOAT64)
CONVERT_PRECISION_TO_PHI(UINT8)
CONVERT_PRECISION_TO_PHI(INT8)
CONVERT_PRECISION_TO_PHI(INT16)
CONVERT_PRECISION_TO_PHI(INT32)
CONVERT_PRECISION_TO_PHI(INT64)
CONVERT_PRECISION_TO_PHI(COMPLEX64)
CONVERT_PRECISION_TO_PHI(COMPLEX128)
CONVERT_PRECISION_TO_PHI(BOOL)
default:
return phi::DataType::UNDEFINED;
}
#undef CONVERT_PRECISION_TO_PHI
}
PrecisionType cvtPrecisionFromPhi(phi::DataType datatype) {
#define CONVERT_PRECISION_FROM_PHI(Precision) \
case phi::DataType::Precision: \
return PrecisionType::Precision;
switch (datatype) {
CONVERT_PRECISION_FROM_PHI(FLOAT32)
CONVERT_PRECISION_FROM_PHI(FLOAT16)
CONVERT_PRECISION_FROM_PHI(FLOAT64)
CONVERT_PRECISION_FROM_PHI(UINT8)
CONVERT_PRECISION_FROM_PHI(INT8)
CONVERT_PRECISION_FROM_PHI(INT16)
CONVERT_PRECISION_FROM_PHI(INT32)
CONVERT_PRECISION_FROM_PHI(INT64)
CONVERT_PRECISION_FROM_PHI(COMPLEX64)
CONVERT_PRECISION_FROM_PHI(COMPLEX128)
CONVERT_PRECISION_FROM_PHI(BOOL)
default:
return PrecisionType::UNK;
}
#undef CONVERT_PRECISION_FROM_PHI
}
phi::DataLayout cvtLayout2Phi(LayoutType layout) {
switch (layout) {
case LayoutType::NCHW:
return phi::DataLayout::NCHW;
case LayoutType::NHWC:
return phi::DataLayout::NHWC;
case LayoutType::ANY:
return phi::DataLayout::ANY;
default:
return phi::DataLayout::UNDEFINED;
}
}
LayoutType cvtLayoutFromPhi(phi::DataLayout layout) {
switch (layout) {
case phi::DataLayout::NCHW:
return LayoutType::NCHW;
case phi::DataLayout::NHWC:
return LayoutType::NHWC;
case phi::DataLayout::ANY:
return LayoutType::ANY;
default:
return LayoutType::UNK;
}
}
phi::KernelKey cvtPlace2Phi(const Place& place) {
return phi::KernelKey(cvtTarget2Phi(place.target),
cvtLayout2Phi(place.layout),
cvtPrecision2Phi(place.precision));
}
Place cvtPlaceFromPhi(phi::TensorArgDef tensor_arg) {
return Place(cvtTargetFromPhi(tensor_arg.backend),
cvtPrecisionFromPhi(tensor_arg.dtype),
cvtLayoutFromPhi(tensor_arg.layout));
}
} // namespace infrt
...@@ -14,15 +14,25 @@ ...@@ -14,15 +14,25 @@
#pragma once #pragma once
#include "paddle/infrt/backends/host/phi_allocator.h" #include "paddle/infrt/dialect/infrt/common_type.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/kernel_factory.h"
namespace infrt { namespace infrt {
namespace kernel {
namespace phi {
backends::CpuPhiAllocator CreateCpuAllocator(); phi::Backend cvtTarget2Phi(TargetType target);
TargetType cvtTargetFromPhi(phi::Backend backend);
phi::DataType cvtPrecision2Phi(PrecisionType precision);
PrecisionType cvtPrecisionFromPhi(phi::DataType datatype);
phi::DataLayout cvtLayout2Phi(LayoutType layout);
LayoutType cvtLayoutFromPhi(phi::DataLayout layout);
phi::KernelKey cvtPlace2Phi(const Place& place);
Place cvtPlaceFromPhi(phi::TensorArgDef tensor_arg);
} // namespace phi
} // namespace kernel
} // namespace infrt } // namespace infrt
...@@ -18,12 +18,13 @@ def PHI_DenseTensorDialect : Dialect { ...@@ -18,12 +18,13 @@ def PHI_DenseTensorDialect : Dialect {
} }
// PHI DenseTensor related Op. // PHI DenseTensor related Op.
class PDT_Op<string mnemonic, list<OpTrait> traits = []> : Op<PHI_DenseTensorDialect, mnemonic, !listconcat(traits, [PhiOpTrait, IsolatedFromAbove])> { class PDT_Op<string mnemonic, list<OpTrait> traits = []> : Op<PHI_DenseTensorDialect,
} mnemonic, !listconcat(traits, [PhiOpTrait, IsolatedFromAbove])> {}
class CreateDenseTensorOp<string place, string dtype, string layout> class CreateDenseTensorOp
: PDT_Op<"create_dense_tensor." # place # "." # dtype # "." # layout, [NoSideEffect]> { : PDT_Op<"create_dense_tensor", [NoSideEffect]> {
let arguments = (ins Allocator:$allocator, I64ArrayAttr:$dims, I64ArrayAttr:$lod); let arguments = (ins Context:$context, I64ArrayAttr:$dims,
LayoutAttr:$layout, I64ArrayAttr:$lod, PrecisionAttr:$precision);
let results = (outs DenseTensor:$output); let results = (outs DenseTensor:$output);
} }
...@@ -44,23 +45,16 @@ class PrintDenseTensorOp: ...@@ -44,23 +45,16 @@ class PrintDenseTensorOp:
let assemblyFormat = "`(` $input `:` type($input) `)` attr-dict"; let assemblyFormat = "`(` $input `:` type($input) `)` attr-dict";
} }
class CreateCPUAllocatorOp class CreateContextOp<string target>
: PDT_Op<"create_allocator." # "cpu", [NoSideEffect]> { : PDT_Op<"create_context." # target, [NoSideEffect]> {
let arguments = (ins); let arguments = (ins);
let results = (outs Allocator:$output);
}
class CreateCPUContextOp
: PDT_Op<"create_context." # "cpu", [NoSideEffect]> {
let arguments = (ins Allocator:$input);
let results = (outs Context:$output); let results = (outs Context:$output);
} }
def PDT_CreateDenseTensorOp_cpu_f32_nchw : CreateDenseTensorOp<"cpu", "f32", "nchw">; def PDT_CreateDenseTensorOp : CreateDenseTensorOp;
def PDT_FillDenseTensorOp_f32 : FillDenseTensorOp<F32ArrayAttr, "f32">; def PDT_FillDenseTensorOp_f32 : FillDenseTensorOp<F32ArrayAttr, "f32">;
def PDT_CreateAllocatorOp_cpu : CreateCPUAllocatorOp; def PDT_CreateCPUContextOp : CreateContextOp<"cpu">;
def PDT_CreateContextOp_cpu : CreateCPUContextOp; def PDT_PrintDenseTensor : PrintDenseTensorOp;
def PDT_PrintDenseTensor_cpu : PrintDenseTensorOp;
def FakeKernelOp : PDT_Op<"fake_phi_kernel"> { def FakeKernelOp : PDT_Op<"fake_phi_kernel"> {
let arguments = (ins Context:$dev_ctx, DenseTensor:$x, DenseTensor:$y, BoolAttr:$transpose_x, BoolAttr:$transpose_y); let arguments = (ins Context:$dev_ctx, DenseTensor:$x, DenseTensor:$y, BoolAttr:$transpose_x, BoolAttr:$transpose_y);
......
...@@ -14,119 +14,10 @@ ...@@ -14,119 +14,10 @@
#include "paddle/infrt/dialect/phi/pass/kernel_op_desc.h" #include "paddle/infrt/dialect/phi/pass/kernel_op_desc.h"
#include <glog/logging.h> #include <glog/logging.h>
#include "paddle/phi/core/kernel_factory.h" #include "paddle/infrt/dialect/phi/data_type.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/declarations.h" #include "paddle/phi/kernels/declarations.h"
namespace infrt { namespace infrt {
namespace {
phi::Backend cvtTarget2Phi(TargetType target) {
switch (target) {
case TargetType::CPU:
return phi::Backend::CPU;
case TargetType::GPU:
return phi::Backend::GPU;
default:
return phi::Backend::UNDEFINED;
}
}
TargetType cvtTargetFromPhi(phi::Backend backend) {
switch (backend) {
case phi::Backend::CPU:
return TargetType::CPU;
case phi::Backend::GPU:
return TargetType::GPU;
default:
return TargetType::UNK;
}
}
phi::DataType cvtPrecision2Phi(PrecisionType precision) {
#define CONVERT_PRECISION_TO_PHI(Precision) \
case PrecisionType::Precision: \
return phi::DataType::Precision;
switch (precision) {
CONVERT_PRECISION_TO_PHI(FLOAT32)
CONVERT_PRECISION_TO_PHI(FLOAT16)
CONVERT_PRECISION_TO_PHI(FLOAT64)
CONVERT_PRECISION_TO_PHI(UINT8)
CONVERT_PRECISION_TO_PHI(INT8)
CONVERT_PRECISION_TO_PHI(INT16)
CONVERT_PRECISION_TO_PHI(INT32)
CONVERT_PRECISION_TO_PHI(INT64)
CONVERT_PRECISION_TO_PHI(COMPLEX64)
CONVERT_PRECISION_TO_PHI(COMPLEX128)
CONVERT_PRECISION_TO_PHI(BOOL)
default:
return phi::DataType::UNDEFINED;
}
#undef CONVERT_PRECISION_TO_PHI
}
PrecisionType cvtPrecisionFromPhi(phi::DataType datatype) {
#define CONVERT_PRECISION_FROM_PHI(Precision) \
case phi::DataType::Precision: \
return PrecisionType::Precision;
switch (datatype) {
CONVERT_PRECISION_FROM_PHI(FLOAT32)
CONVERT_PRECISION_FROM_PHI(FLOAT16)
CONVERT_PRECISION_FROM_PHI(FLOAT64)
CONVERT_PRECISION_FROM_PHI(UINT8)
CONVERT_PRECISION_FROM_PHI(INT8)
CONVERT_PRECISION_FROM_PHI(INT16)
CONVERT_PRECISION_FROM_PHI(INT32)
CONVERT_PRECISION_FROM_PHI(INT64)
CONVERT_PRECISION_FROM_PHI(COMPLEX64)
CONVERT_PRECISION_FROM_PHI(COMPLEX128)
CONVERT_PRECISION_FROM_PHI(BOOL)
default:
return PrecisionType::UNK;
}
#undef CONVERT_PRECISION_FROM_PHI
}
phi::DataLayout cvtLayout2Phi(LayoutType layout) {
switch (layout) {
case LayoutType::NCHW:
return phi::DataLayout::NCHW;
case LayoutType::NHWC:
return phi::DataLayout::NHWC;
case LayoutType::ANY:
return phi::DataLayout::ANY;
default:
return phi::DataLayout::UNDEFINED;
}
}
LayoutType cvtLayoutFromPhi(phi::DataLayout layout) {
switch (layout) {
case phi::DataLayout::NCHW:
return LayoutType::NCHW;
case phi::DataLayout::NHWC:
return LayoutType::NHWC;
case phi::DataLayout::ANY:
return LayoutType::ANY;
default:
return LayoutType::UNK;
}
}
phi::KernelKey cvtPlace2Phi(const Place& place) {
return phi::KernelKey(cvtTarget2Phi(place.target),
cvtLayout2Phi(place.layout),
cvtPrecision2Phi(place.precision));
}
Place cvtPlaceFromPhi(phi::TensorArgDef tensor_arg) {
return Place(cvtTargetFromPhi(tensor_arg.backend),
cvtPrecisionFromPhi(tensor_arg.dtype),
cvtLayoutFromPhi(tensor_arg.layout));
}
} // namespace
std::string getPhiTargetPrefix(TargetType target) { std::string getPhiTargetPrefix(TargetType target) {
switch (target) { switch (target) {
......
...@@ -135,20 +135,12 @@ void phiOpCvtPass::diapatchStage() { ...@@ -135,20 +135,12 @@ void phiOpCvtPass::diapatchStage() {
phi_context.end()) { phi_context.end()) {
switch (phi_kernel_desc.kernelType.target) { switch (phi_kernel_desc.kernelType.target) {
case TargetType::CPU: { case TargetType::CPU: {
auto alloctor_value =
builder
.create<infrt::phi::CreateAllocatorOp_cpu>(
kernel_op.getLoc(),
phi::AllocatorType::get(kernel_op.getContext(),
TargetType::CPU))
.output();
auto context_value = auto context_value =
builder builder
.create<infrt::phi::CreateContextOp_cpu>( .create<infrt::phi::CreateCPUContextOp>(
kernel_op.getLoc(), kernel_op.getLoc(),
phi::ContextType::get(kernel_op.getContext(), phi::ContextType::get(kernel_op.getContext(),
TargetType::CPU), TargetType::CPU))
alloctor_value)
.output(); .output();
phi_context[TargetType::CPU] = context_value; phi_context[TargetType::CPU] = context_value;
} break; } break;
......
...@@ -173,6 +173,36 @@ boost::optional<double> MlirToRuntimeTranslator::EmitAttribute( ...@@ -173,6 +173,36 @@ boost::optional<double> MlirToRuntimeTranslator::EmitAttribute(
return boost::none; return boost::none;
} }
template <>
boost::optional<::infrt::TargetType> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) {
if (!attr.isa<::infrt::TargetAttr>()) return boost::none;
if (attr.isa<::infrt::TargetAttr>()) {
return attr.cast<::infrt::TargetAttr>().getTarget();
}
return boost::none;
}
template <>
boost::optional<::infrt::LayoutType> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) {
if (!attr.isa<::infrt::LayoutAttr>()) return boost::none;
if (attr.isa<::infrt::LayoutAttr>()) {
return attr.cast<::infrt::LayoutAttr>().getLayout();
}
return boost::none;
}
template <>
boost::optional<::infrt::PrecisionType> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) {
if (!attr.isa<::infrt::PrecisionAttr>()) return boost::none;
if (attr.isa<::infrt::PrecisionAttr>()) {
return attr.cast<::infrt::PrecisionAttr>().getPrecision();
}
return boost::none;
}
template <> template <>
boost::optional<std::string> MlirToRuntimeTranslator::EmitAttribute( boost::optional<std::string> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) { const mlir::Attribute& attr) {
...@@ -292,6 +322,13 @@ bool MlirToRuntimeTranslator::EmitGeneralOp(mlir::Operation* op) { ...@@ -292,6 +322,13 @@ bool MlirToRuntimeTranslator::EmitGeneralOp(mlir::Operation* op) {
impl_->cur_op->AppendAttribute(new Value(std::move(*v))); impl_->cur_op->AppendAttribute(new Value(std::move(*v)));
} else if (auto v = EmitAttribute<bool>(attr.getValue())) { } else if (auto v = EmitAttribute<bool>(attr.getValue())) {
impl_->cur_op->AppendAttribute(new Value(*v)); impl_->cur_op->AppendAttribute(new Value(*v));
} else if (auto v = EmitAttribute<::infrt::TargetType>(attr.getValue())) {
impl_->cur_op->AppendAttribute(new Value(*v));
} else if (auto v =
EmitAttribute<::infrt::PrecisionType>(attr.getValue())) {
impl_->cur_op->AppendAttribute(new Value(*v));
} else if (auto v = EmitAttribute<::infrt::LayoutType>(attr.getValue())) {
impl_->cur_op->AppendAttribute(new Value(*v));
} else if (auto v = EmitAttribute<std::vector<int16_t>>(attr.getValue())) { } else if (auto v = EmitAttribute<std::vector<int16_t>>(attr.getValue())) {
impl_->cur_op->AppendAttribute(new Value(std::move(*v))); impl_->cur_op->AppendAttribute(new Value(std::move(*v)));
} else if (auto v = EmitAttribute<std::vector<int32_t>>(attr.getValue())) { } else if (auto v = EmitAttribute<std::vector<int32_t>>(attr.getValue())) {
......
...@@ -24,14 +24,6 @@ ValueRef::ValueRef(int64_t val) : Shared<Value>(new Value(val)) {} ...@@ -24,14 +24,6 @@ ValueRef::ValueRef(int64_t val) : Shared<Value>(new Value(val)) {}
ValueRef::ValueRef(float val) : Shared<Value>(new Value(val)) {} ValueRef::ValueRef(float val) : Shared<Value>(new Value(val)) {}
ValueRef::ValueRef(double val) : Shared<Value>(new Value(val)) {} ValueRef::ValueRef(double val) : Shared<Value>(new Value(val)) {}
ValueRef::ValueRef(bool val) : Shared<Value>(new Value(val)) {} ValueRef::ValueRef(bool val) : Shared<Value>(new Value(val)) {}
ValueRef::ValueRef(backends::CpuPhiContext&& val)
: Shared<Value>(new Value(std::move(val))) {}
ValueRef::ValueRef(::phi::CPUContext&& val)
: Shared<Value>(new Value(std::move(val))) {}
ValueRef::ValueRef(::phi::DenseTensor&& val)
: Shared<Value>(new Value(std::move(val))) {}
ValueRef::ValueRef(::phi::MetaTensor&& val)
: Shared<Value>(new Value(std::move(val))) {}
const char* Value::type_info() const { return __type_info__; } const char* Value::type_info() const { return __type_info__; }
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "paddle/infrt/common/object.h" #include "paddle/infrt/common/object.h"
#include "paddle/infrt/common/shared.h" #include "paddle/infrt/common/shared.h"
#include "paddle/infrt/dialect/infrt/common_type.h"
#include "paddle/infrt/host_context/function.h" #include "paddle/infrt/host_context/function.h"
#include "paddle/infrt/support/variant.h" #include "paddle/infrt/support/variant.h"
#include "paddle/infrt/tensor/dense_host_tensor.h" #include "paddle/infrt/tensor/dense_host_tensor.h"
...@@ -64,10 +65,12 @@ using ValueVariantType = ...@@ -64,10 +65,12 @@ using ValueVariantType =
tensor::DenseHostTensor, tensor::DenseHostTensor,
MlirFunctionExecutable*, MlirFunctionExecutable*,
tensor::TensorMap, tensor::TensorMap,
::infrt::PrecisionType,
::infrt::LayoutType,
::infrt::TargetType,
#ifdef INFRT_WITH_PHI #ifdef INFRT_WITH_PHI
::phi::MetaTensor, ::phi::MetaTensor,
::phi::DenseTensor, ::phi::DenseTensor,
backends::CpuPhiAllocator,
backends::CpuPhiContext, backends::CpuPhiContext,
::phi::CPUContext, ::phi::CPUContext,
std::vector<const phi::DenseTensor*>, std::vector<const phi::DenseTensor*>,
...@@ -101,6 +104,9 @@ class Value : public common::Object { ...@@ -101,6 +104,9 @@ class Value : public common::Object {
explicit Value(float x) : data(x) {} explicit Value(float x) : data(x) {}
explicit Value(double x) : data(x) {} explicit Value(double x) : data(x) {}
explicit Value(bool x) : data(x) {} explicit Value(bool x) : data(x) {}
explicit Value(::infrt::TargetType x) : data(x) {}
explicit Value(::infrt::LayoutType x) : data(x) {}
explicit Value(::infrt::PrecisionType x) : data(x) {}
explicit Value(std::string x) : data(x) {} explicit Value(std::string x) : data(x) {}
explicit Value(tensor::TensorMap&& x) : data(x) {} explicit Value(tensor::TensorMap&& x) : data(x) {}
explicit Value(std::vector<int16_t>&& x) : data(x) {} explicit Value(std::vector<int16_t>&& x) : data(x) {}
...@@ -112,11 +118,10 @@ class Value : public common::Object { ...@@ -112,11 +118,10 @@ class Value : public common::Object {
explicit Value(tensor::DenseHostTensor&& x) : data(std::move(x)) {} explicit Value(tensor::DenseHostTensor&& x) : data(std::move(x)) {}
explicit Value(MlirFunctionExecutable* x) : data(x) {} explicit Value(MlirFunctionExecutable* x) : data(x) {}
#ifdef INFRT_WITH_PHI #ifdef INFRT_WITH_PHI
explicit Value(backends::CpuPhiContext&& x) : data(std::move(x)) {}
explicit Value(::phi::CPUContext&& x) : data(std::move(x)) {} explicit Value(::phi::CPUContext&& x) : data(std::move(x)) {}
explicit Value(backends::CpuPhiContext&& x) : data(std::move(x)) {}
explicit Value(::phi::DenseTensor&& x) : data(std::move(x)) {} explicit Value(::phi::DenseTensor&& x) : data(std::move(x)) {}
explicit Value(::phi::MetaTensor&& x) : data(std::move(x)) {} explicit Value(::phi::MetaTensor&& x) : data(std::move(x)) {}
explicit Value(backends::CpuPhiAllocator&& x) : data(std::move(x)) {}
#endif #endif
template <typename T> template <typename T>
...@@ -179,10 +184,6 @@ class ValueRef : common::Shared<Value> { ...@@ -179,10 +184,6 @@ class ValueRef : common::Shared<Value> {
explicit ValueRef(float val); explicit ValueRef(float val);
explicit ValueRef(double val); explicit ValueRef(double val);
explicit ValueRef(bool val); explicit ValueRef(bool val);
explicit ValueRef(::phi::MetaTensor&& val);
explicit ValueRef(backends::CpuPhiContext&& x);
explicit ValueRef(::phi::CPUContext&& x);
explicit ValueRef(::phi::DenseTensor&& x);
using common::Shared<Value>::get; using common::Shared<Value>::get;
using common::Shared<Value>::Reset; using common::Shared<Value>::Reset;
......
...@@ -8,7 +8,6 @@ gather_srcs(infrt_src SRCS ...@@ -8,7 +8,6 @@ gather_srcs(infrt_src SRCS
registry.cc registry.cc
dense_tensor_kernels.cc dense_tensor_kernels.cc
context_kernels.cc context_kernels.cc
allocator_kernels.cc
) )
set(infrt_register_phi_kernels_gen_source_file ${CMAKE_SOURCE_DIR}/paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.cc) set(infrt_register_phi_kernels_gen_source_file ${CMAKE_SOURCE_DIR}/paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.cc)
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/kernel/phi/allocator_kernels.h"
namespace infrt {
namespace kernel {
namespace phi {
backends::CpuPhiAllocator CreateCpuAllocator() { return {}; }
} // namespace phi
} // namespace kernel
} // namespace infrt
...@@ -18,12 +18,11 @@ namespace infrt { ...@@ -18,12 +18,11 @@ namespace infrt {
namespace kernel { namespace kernel {
namespace phi { namespace phi {
::phi::CPUContext CreateCpuContext( ::phi::CPUContext CreateCPUContext() {
infrt::backends::CpuPhiAllocator* allocator) { ::phi::CPUContext ctx{};
::phi::CPUContext context; ctx.Init();
context.SetAllocator(allocator); ctx.SetAllocator(new backends::CpuPhiAllocator{});
context.Init(); return ctx;
return context;
} }
} // namespace phi } // namespace phi
......
...@@ -16,13 +16,14 @@ ...@@ -16,13 +16,14 @@
#include "paddle/infrt/backends/host/phi_allocator.h" #include "paddle/infrt/backends/host/phi_allocator.h"
#include "paddle/infrt/backends/host/phi_context.h" #include "paddle/infrt/backends/host/phi_context.h"
#include "paddle/infrt/host_context/kernel_utils.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
namespace infrt { namespace infrt {
namespace kernel { namespace kernel {
namespace phi { namespace phi {
::phi::CPUContext CreateCpuContext(::infrt::backends::CpuPhiAllocator*); ::phi::CPUContext CreateCPUContext();
} // namespace phi } // namespace phi
} // namespace kernel } // namespace kernel
......
...@@ -13,20 +13,25 @@ ...@@ -13,20 +13,25 @@
// limitations under the License. // limitations under the License.
#include "paddle/infrt/kernel/phi/dense_tensor_kernels.h" #include "paddle/infrt/kernel/phi/dense_tensor_kernels.h"
#include <iostream> #include "paddle/infrt/dialect/phi/data_type.h"
#include "paddle/infrt/kernel/phi/context_kernels.h"
namespace infrt { namespace infrt {
namespace kernel { namespace kernel {
namespace phi { namespace phi {
::phi::DenseTensor CreateDenseTensorCpuF32Nchw( ::phi::DenseTensor CreateDenseTensor(
backends::CpuPhiAllocator* allocator, const ::phi::CPUContext& context,
host_context::Attribute<std::vector<int64_t>> dims, host_context::Attribute<std::vector<int64_t>> dims,
host_context::Attribute<std::vector<int64_t>> lod) { host_context::Attribute<::infrt::LayoutType> layout,
return ::phi::DenseTensor(allocator, host_context::Attribute<std::vector<int64_t>> lod,
::phi::DenseTensorMeta(::phi::DataType::FLOAT32, host_context::Attribute<::infrt::PrecisionType> precision) {
::phi::make_ddim(dims.get()), return ::phi::DenseTensor(
::phi::DataLayout::NCHW, const_cast<::phi::Allocator*>(&context.GetAllocator()),
{})); ::phi::DenseTensorMeta(cvtPrecision2Phi(precision.get()),
::phi::make_ddim(dims.get()),
cvtLayout2Phi(layout.get()),
{}));
} }
void FillDenseTensorF32(::phi::DenseTensor* dense_tensor, void FillDenseTensorF32(::phi::DenseTensor* dense_tensor,
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#pragma once #pragma once
#include "paddle/infrt/backends/host/phi_allocator.h" #include "paddle/infrt/backends/host/phi_allocator.h"
#include "paddle/infrt/dialect/infrt/common_type.h"
#include "paddle/infrt/host_context/kernel_utils.h" #include "paddle/infrt/host_context/kernel_utils.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
...@@ -22,10 +23,12 @@ namespace infrt { ...@@ -22,10 +23,12 @@ namespace infrt {
namespace kernel { namespace kernel {
namespace phi { namespace phi {
::phi::DenseTensor CreateDenseTensorCpuF32Nchw( ::phi::DenseTensor CreateDenseTensor(
backends::CpuPhiAllocator* allocator, const ::phi::CPUContext& context,
host_context::Attribute<std::vector<int64_t>> dims, host_context::Attribute<std::vector<int64_t>> dims,
host_context::Attribute<std::vector<int64_t>> lod); host_context::Attribute<::infrt::LayoutType> layout,
host_context::Attribute<std::vector<int64_t>> lod,
host_context::Attribute<::infrt::PrecisionType> precision);
void FillDenseTensorF32(::phi::DenseTensor* dense_tensor, void FillDenseTensorF32(::phi::DenseTensor* dense_tensor,
host_context::Attribute<std::vector<float>> values); host_context::Attribute<std::vector<float>> values);
......
...@@ -24,7 +24,8 @@ void InferShapedKernelLauncher::CreateKernelFrameForInferShape( ...@@ -24,7 +24,8 @@ void InferShapedKernelLauncher::CreateKernelFrameForInferShape(
frame->GetValues(1, frame->GetNumElements() - 1)) { frame->GetValues(1, frame->GetNumElements() - 1)) {
// TODO(Superjomn) To extend this. // TODO(Superjomn) To extend this.
if (value->is_type<::phi::DenseTensor>()) { if (value->is_type<::phi::DenseTensor>()) {
values.emplace_back(::phi::MetaTensor{&value->get<::phi::DenseTensor>()}); values.emplace_back(new host_context::Value{
::phi::MetaTensor{&value->get<::phi::DenseTensor>()}});
infershape_kernel_frame_builder.AddArgument(values.back().get()); infershape_kernel_frame_builder.AddArgument(values.back().get());
} else { } else {
infershape_kernel_frame_builder.AddArgument(value); infershape_kernel_frame_builder.AddArgument(value);
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "paddle/infrt/host_context/kernel_registry.h" #include "paddle/infrt/host_context/kernel_registry.h"
#include "paddle/infrt/host_context/kernel_utils.h" #include "paddle/infrt/host_context/kernel_utils.h"
#include "paddle/infrt/kernel/phi/allocator_kernels.h"
#include "paddle/infrt/kernel/phi/context_kernels.h" #include "paddle/infrt/kernel/phi/context_kernels.h"
#include "paddle/infrt/kernel/phi/dense_tensor_kernels.h" #include "paddle/infrt/kernel/phi/dense_tensor_kernels.h"
#include "paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h" #include "paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h"
...@@ -33,13 +32,10 @@ namespace infrt { ...@@ -33,13 +32,10 @@ namespace infrt {
namespace kernel { namespace kernel {
void RegisterPhiKernels(host_context::KernelRegistry* registry) { void RegisterPhiKernels(host_context::KernelRegistry* registry) {
registry->AddKernel("phi_dt.create_allocator.cpu",
INFRT_KERNEL(infrt::kernel::phi::CreateCpuAllocator));
registry->AddKernel("phi_dt.create_context.cpu", registry->AddKernel("phi_dt.create_context.cpu",
INFRT_KERNEL(infrt::kernel::phi::CreateCpuContext)); INFRT_KERNEL(infrt::kernel::phi::CreateCPUContext));
registry->AddKernel( registry->AddKernel("phi_dt.create_dense_tensor",
"phi_dt.create_dense_tensor.cpu.f32.nchw", INFRT_KERNEL(infrt::kernel::phi::CreateDenseTensor));
INFRT_KERNEL(infrt::kernel::phi::CreateDenseTensorCpuF32Nchw));
registry->AddKernel("phi_dt.fill_dense_tensor.f32", registry->AddKernel("phi_dt.fill_dense_tensor.f32",
INFRT_KERNEL(infrt::kernel::phi::FillDenseTensorF32)); INFRT_KERNEL(infrt::kernel::phi::FillDenseTensorF32));
registry->AddKernel("phi_dt.print_tensor", registry->AddKernel("phi_dt.print_tensor",
......
...@@ -2,9 +2,10 @@ ...@@ -2,9 +2,10 @@
// CHECK-LABEL: @sign_any_float32_execute // CHECK-LABEL: @sign_any_float32_execute
func @sign_any_float32_execute() { func @sign_any_float32_execute() {
%allocator = "phi_dt.create_allocator.cpu" (): () -> !phi.allocator<CPU> %ctx = "phi_dt.create_context.cpu" (): () -> !phi.context<CPU>
%ctx = "phi_dt.create_context.cpu" (%allocator): (!phi.allocator<CPU>) -> !phi.context<CPU> %t = "phi_dt.create_dense_tensor" (%ctx) {
%t = "phi_dt.create_dense_tensor.cpu.f32.nchw" (%allocator) {dims=[1:i64], lod=[1:i64]}: (!phi.allocator<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>) precision=#infrt.precision<FP32>,
layout=#infrt.layout<NCHW>, lod=[1:i64], dims=[1:i64]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
"phi_dt.fill_dense_tensor.f32"(%t) {value=[3.8:f32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> () "phi_dt.fill_dense_tensor.f32"(%t) {value=[3.8:f32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
%e = "phi_cpu.sign.float32.any"(%ctx, %t) : (!phi.context<CPU>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>) %e = "phi_cpu.sign.float32.any"(%ctx, %t) : (!phi.context<CPU>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册