未验证 提交 cf9291b9 编写于 作者: 石晓伟 提交者: GitHub

mlir attr types for infrt place (2nd PR), test=develop (#40349)

上级 9262a93c
......@@ -11,6 +11,7 @@ limitations under the License. */
#pragma once
#include "paddle/infrt/backends/host/phi_allocator.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
namespace infrt {
......@@ -20,6 +21,14 @@ class CpuPhiContext : public phi::CPUContext {
public:
using Base = phi::CPUContext;
using phi::CPUContext::SetEigenDevice;
CpuPhiContext() {
Init();
SetAllocator(alloc_.get());
}
private:
std::unique_ptr<phi::Allocator> alloc_{std::make_unique<CpuPhiAllocator>()};
};
} // namespace backends
......
......@@ -10,3 +10,6 @@ target_link_libraries(phi-ir-exec infrt)
add_executable(phi-exec phi_exec.cc)
target_link_libraries(phi-exec infrt)
gather_srcs(infrt_src SRCS
data_type.cc)
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/phi/data_type.h"
namespace infrt {
phi::Backend cvtTarget2Phi(TargetType target) {
switch (target) {
case TargetType::CPU:
return phi::Backend::CPU;
case TargetType::GPU:
return phi::Backend::GPU;
default:
return phi::Backend::UNDEFINED;
}
}
TargetType cvtTargetFromPhi(phi::Backend backend) {
switch (backend) {
case phi::Backend::CPU:
return TargetType::CPU;
case phi::Backend::GPU:
return TargetType::GPU;
default:
return TargetType::UNK;
}
}
phi::DataType cvtPrecision2Phi(PrecisionType precision) {
#define CONVERT_PRECISION_TO_PHI(Precision) \
case PrecisionType::Precision: \
return phi::DataType::Precision;
switch (precision) {
CONVERT_PRECISION_TO_PHI(FLOAT32)
CONVERT_PRECISION_TO_PHI(FLOAT16)
CONVERT_PRECISION_TO_PHI(FLOAT64)
CONVERT_PRECISION_TO_PHI(UINT8)
CONVERT_PRECISION_TO_PHI(INT8)
CONVERT_PRECISION_TO_PHI(INT16)
CONVERT_PRECISION_TO_PHI(INT32)
CONVERT_PRECISION_TO_PHI(INT64)
CONVERT_PRECISION_TO_PHI(COMPLEX64)
CONVERT_PRECISION_TO_PHI(COMPLEX128)
CONVERT_PRECISION_TO_PHI(BOOL)
default:
return phi::DataType::UNDEFINED;
}
#undef CONVERT_PRECISION_TO_PHI
}
PrecisionType cvtPrecisionFromPhi(phi::DataType datatype) {
#define CONVERT_PRECISION_FROM_PHI(Precision) \
case phi::DataType::Precision: \
return PrecisionType::Precision;
switch (datatype) {
CONVERT_PRECISION_FROM_PHI(FLOAT32)
CONVERT_PRECISION_FROM_PHI(FLOAT16)
CONVERT_PRECISION_FROM_PHI(FLOAT64)
CONVERT_PRECISION_FROM_PHI(UINT8)
CONVERT_PRECISION_FROM_PHI(INT8)
CONVERT_PRECISION_FROM_PHI(INT16)
CONVERT_PRECISION_FROM_PHI(INT32)
CONVERT_PRECISION_FROM_PHI(INT64)
CONVERT_PRECISION_FROM_PHI(COMPLEX64)
CONVERT_PRECISION_FROM_PHI(COMPLEX128)
CONVERT_PRECISION_FROM_PHI(BOOL)
default:
return PrecisionType::UNK;
}
#undef CONVERT_PRECISION_FROM_PHI
}
phi::DataLayout cvtLayout2Phi(LayoutType layout) {
switch (layout) {
case LayoutType::NCHW:
return phi::DataLayout::NCHW;
case LayoutType::NHWC:
return phi::DataLayout::NHWC;
case LayoutType::ANY:
return phi::DataLayout::ANY;
default:
return phi::DataLayout::UNDEFINED;
}
}
LayoutType cvtLayoutFromPhi(phi::DataLayout layout) {
switch (layout) {
case phi::DataLayout::NCHW:
return LayoutType::NCHW;
case phi::DataLayout::NHWC:
return LayoutType::NHWC;
case phi::DataLayout::ANY:
return LayoutType::ANY;
default:
return LayoutType::UNK;
}
}
phi::KernelKey cvtPlace2Phi(const Place& place) {
return phi::KernelKey(cvtTarget2Phi(place.target),
cvtLayout2Phi(place.layout),
cvtPrecision2Phi(place.precision));
}
Place cvtPlaceFromPhi(phi::TensorArgDef tensor_arg) {
return Place(cvtTargetFromPhi(tensor_arg.backend),
cvtPrecisionFromPhi(tensor_arg.dtype),
cvtLayoutFromPhi(tensor_arg.layout));
}
} // namespace infrt
......@@ -14,15 +14,25 @@
#pragma once
#include "paddle/infrt/backends/host/phi_allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/infrt/dialect/infrt/common_type.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/kernel_factory.h"
namespace infrt {
namespace kernel {
namespace phi {
backends::CpuPhiAllocator CreateCpuAllocator();
phi::Backend cvtTarget2Phi(TargetType target);
TargetType cvtTargetFromPhi(phi::Backend backend);
phi::DataType cvtPrecision2Phi(PrecisionType precision);
PrecisionType cvtPrecisionFromPhi(phi::DataType datatype);
phi::DataLayout cvtLayout2Phi(LayoutType layout);
LayoutType cvtLayoutFromPhi(phi::DataLayout layout);
phi::KernelKey cvtPlace2Phi(const Place& place);
Place cvtPlaceFromPhi(phi::TensorArgDef tensor_arg);
} // namespace phi
} // namespace kernel
} // namespace infrt
......@@ -18,12 +18,13 @@ def PHI_DenseTensorDialect : Dialect {
}
// PHI DenseTensor related Op.
class PDT_Op<string mnemonic, list<OpTrait> traits = []> : Op<PHI_DenseTensorDialect, mnemonic, !listconcat(traits, [PhiOpTrait, IsolatedFromAbove])> {
}
class PDT_Op<string mnemonic, list<OpTrait> traits = []> : Op<PHI_DenseTensorDialect,
mnemonic, !listconcat(traits, [PhiOpTrait, IsolatedFromAbove])> {}
class CreateDenseTensorOp<string place, string dtype, string layout>
: PDT_Op<"create_dense_tensor." # place # "." # dtype # "." # layout, [NoSideEffect]> {
let arguments = (ins Allocator:$allocator, I64ArrayAttr:$dims, I64ArrayAttr:$lod);
class CreateDenseTensorOp
: PDT_Op<"create_dense_tensor", [NoSideEffect]> {
let arguments = (ins Context:$context, I64ArrayAttr:$dims,
LayoutAttr:$layout, I64ArrayAttr:$lod, PrecisionAttr:$precision);
let results = (outs DenseTensor:$output);
}
......@@ -44,23 +45,16 @@ class PrintDenseTensorOp:
let assemblyFormat = "`(` $input `:` type($input) `)` attr-dict";
}
class CreateCPUAllocatorOp
: PDT_Op<"create_allocator." # "cpu", [NoSideEffect]> {
class CreateContextOp<string target>
: PDT_Op<"create_context." # target, [NoSideEffect]> {
let arguments = (ins);
let results = (outs Allocator:$output);
}
class CreateCPUContextOp
: PDT_Op<"create_context." # "cpu", [NoSideEffect]> {
let arguments = (ins Allocator:$input);
let results = (outs Context:$output);
}
def PDT_CreateDenseTensorOp_cpu_f32_nchw : CreateDenseTensorOp<"cpu", "f32", "nchw">;
def PDT_CreateDenseTensorOp : CreateDenseTensorOp;
def PDT_FillDenseTensorOp_f32 : FillDenseTensorOp<F32ArrayAttr, "f32">;
def PDT_CreateAllocatorOp_cpu : CreateCPUAllocatorOp;
def PDT_CreateContextOp_cpu : CreateCPUContextOp;
def PDT_PrintDenseTensor_cpu : PrintDenseTensorOp;
def PDT_CreateCPUContextOp : CreateContextOp<"cpu">;
def PDT_PrintDenseTensor : PrintDenseTensorOp;
def FakeKernelOp : PDT_Op<"fake_phi_kernel"> {
let arguments = (ins Context:$dev_ctx, DenseTensor:$x, DenseTensor:$y, BoolAttr:$transpose_x, BoolAttr:$transpose_y);
......
......@@ -14,119 +14,10 @@
#include "paddle/infrt/dialect/phi/pass/kernel_op_desc.h"
#include <glog/logging.h>
#include "paddle/phi/core/kernel_factory.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/infrt/dialect/phi/data_type.h"
#include "paddle/phi/kernels/declarations.h"
namespace infrt {
namespace {
phi::Backend cvtTarget2Phi(TargetType target) {
switch (target) {
case TargetType::CPU:
return phi::Backend::CPU;
case TargetType::GPU:
return phi::Backend::GPU;
default:
return phi::Backend::UNDEFINED;
}
}
TargetType cvtTargetFromPhi(phi::Backend backend) {
switch (backend) {
case phi::Backend::CPU:
return TargetType::CPU;
case phi::Backend::GPU:
return TargetType::GPU;
default:
return TargetType::UNK;
}
}
phi::DataType cvtPrecision2Phi(PrecisionType precision) {
#define CONVERT_PRECISION_TO_PHI(Precision) \
case PrecisionType::Precision: \
return phi::DataType::Precision;
switch (precision) {
CONVERT_PRECISION_TO_PHI(FLOAT32)
CONVERT_PRECISION_TO_PHI(FLOAT16)
CONVERT_PRECISION_TO_PHI(FLOAT64)
CONVERT_PRECISION_TO_PHI(UINT8)
CONVERT_PRECISION_TO_PHI(INT8)
CONVERT_PRECISION_TO_PHI(INT16)
CONVERT_PRECISION_TO_PHI(INT32)
CONVERT_PRECISION_TO_PHI(INT64)
CONVERT_PRECISION_TO_PHI(COMPLEX64)
CONVERT_PRECISION_TO_PHI(COMPLEX128)
CONVERT_PRECISION_TO_PHI(BOOL)
default:
return phi::DataType::UNDEFINED;
}
#undef CONVERT_PRECISION_TO_PHI
}
PrecisionType cvtPrecisionFromPhi(phi::DataType datatype) {
#define CONVERT_PRECISION_FROM_PHI(Precision) \
case phi::DataType::Precision: \
return PrecisionType::Precision;
switch (datatype) {
CONVERT_PRECISION_FROM_PHI(FLOAT32)
CONVERT_PRECISION_FROM_PHI(FLOAT16)
CONVERT_PRECISION_FROM_PHI(FLOAT64)
CONVERT_PRECISION_FROM_PHI(UINT8)
CONVERT_PRECISION_FROM_PHI(INT8)
CONVERT_PRECISION_FROM_PHI(INT16)
CONVERT_PRECISION_FROM_PHI(INT32)
CONVERT_PRECISION_FROM_PHI(INT64)
CONVERT_PRECISION_FROM_PHI(COMPLEX64)
CONVERT_PRECISION_FROM_PHI(COMPLEX128)
CONVERT_PRECISION_FROM_PHI(BOOL)
default:
return PrecisionType::UNK;
}
#undef CONVERT_PRECISION_FROM_PHI
}
phi::DataLayout cvtLayout2Phi(LayoutType layout) {
switch (layout) {
case LayoutType::NCHW:
return phi::DataLayout::NCHW;
case LayoutType::NHWC:
return phi::DataLayout::NHWC;
case LayoutType::ANY:
return phi::DataLayout::ANY;
default:
return phi::DataLayout::UNDEFINED;
}
}
LayoutType cvtLayoutFromPhi(phi::DataLayout layout) {
switch (layout) {
case phi::DataLayout::NCHW:
return LayoutType::NCHW;
case phi::DataLayout::NHWC:
return LayoutType::NHWC;
case phi::DataLayout::ANY:
return LayoutType::ANY;
default:
return LayoutType::UNK;
}
}
phi::KernelKey cvtPlace2Phi(const Place& place) {
return phi::KernelKey(cvtTarget2Phi(place.target),
cvtLayout2Phi(place.layout),
cvtPrecision2Phi(place.precision));
}
Place cvtPlaceFromPhi(phi::TensorArgDef tensor_arg) {
return Place(cvtTargetFromPhi(tensor_arg.backend),
cvtPrecisionFromPhi(tensor_arg.dtype),
cvtLayoutFromPhi(tensor_arg.layout));
}
} // namespace
std::string getPhiTargetPrefix(TargetType target) {
switch (target) {
......
......@@ -135,20 +135,12 @@ void phiOpCvtPass::diapatchStage() {
phi_context.end()) {
switch (phi_kernel_desc.kernelType.target) {
case TargetType::CPU: {
auto alloctor_value =
builder
.create<infrt::phi::CreateAllocatorOp_cpu>(
kernel_op.getLoc(),
phi::AllocatorType::get(kernel_op.getContext(),
TargetType::CPU))
.output();
auto context_value =
builder
.create<infrt::phi::CreateContextOp_cpu>(
.create<infrt::phi::CreateCPUContextOp>(
kernel_op.getLoc(),
phi::ContextType::get(kernel_op.getContext(),
TargetType::CPU),
alloctor_value)
TargetType::CPU))
.output();
phi_context[TargetType::CPU] = context_value;
} break;
......
......@@ -173,6 +173,36 @@ boost::optional<double> MlirToRuntimeTranslator::EmitAttribute(
return boost::none;
}
template <>
boost::optional<::infrt::TargetType> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) {
if (!attr.isa<::infrt::TargetAttr>()) return boost::none;
if (attr.isa<::infrt::TargetAttr>()) {
return attr.cast<::infrt::TargetAttr>().getTarget();
}
return boost::none;
}
template <>
boost::optional<::infrt::LayoutType> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) {
if (!attr.isa<::infrt::LayoutAttr>()) return boost::none;
if (attr.isa<::infrt::LayoutAttr>()) {
return attr.cast<::infrt::LayoutAttr>().getLayout();
}
return boost::none;
}
template <>
boost::optional<::infrt::PrecisionType> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) {
if (!attr.isa<::infrt::PrecisionAttr>()) return boost::none;
if (attr.isa<::infrt::PrecisionAttr>()) {
return attr.cast<::infrt::PrecisionAttr>().getPrecision();
}
return boost::none;
}
template <>
boost::optional<std::string> MlirToRuntimeTranslator::EmitAttribute(
const mlir::Attribute& attr) {
......@@ -292,6 +322,13 @@ bool MlirToRuntimeTranslator::EmitGeneralOp(mlir::Operation* op) {
impl_->cur_op->AppendAttribute(new Value(std::move(*v)));
} else if (auto v = EmitAttribute<bool>(attr.getValue())) {
impl_->cur_op->AppendAttribute(new Value(*v));
} else if (auto v = EmitAttribute<::infrt::TargetType>(attr.getValue())) {
impl_->cur_op->AppendAttribute(new Value(*v));
} else if (auto v =
EmitAttribute<::infrt::PrecisionType>(attr.getValue())) {
impl_->cur_op->AppendAttribute(new Value(*v));
} else if (auto v = EmitAttribute<::infrt::LayoutType>(attr.getValue())) {
impl_->cur_op->AppendAttribute(new Value(*v));
} else if (auto v = EmitAttribute<std::vector<int16_t>>(attr.getValue())) {
impl_->cur_op->AppendAttribute(new Value(std::move(*v)));
} else if (auto v = EmitAttribute<std::vector<int32_t>>(attr.getValue())) {
......
......@@ -24,14 +24,6 @@ ValueRef::ValueRef(int64_t val) : Shared<Value>(new Value(val)) {}
ValueRef::ValueRef(float val) : Shared<Value>(new Value(val)) {}
ValueRef::ValueRef(double val) : Shared<Value>(new Value(val)) {}
ValueRef::ValueRef(bool val) : Shared<Value>(new Value(val)) {}
ValueRef::ValueRef(backends::CpuPhiContext&& val)
: Shared<Value>(new Value(std::move(val))) {}
ValueRef::ValueRef(::phi::CPUContext&& val)
: Shared<Value>(new Value(std::move(val))) {}
ValueRef::ValueRef(::phi::DenseTensor&& val)
: Shared<Value>(new Value(std::move(val))) {}
ValueRef::ValueRef(::phi::MetaTensor&& val)
: Shared<Value>(new Value(std::move(val))) {}
const char* Value::type_info() const { return __type_info__; }
......
......@@ -22,6 +22,7 @@
#include "paddle/infrt/common/object.h"
#include "paddle/infrt/common/shared.h"
#include "paddle/infrt/dialect/infrt/common_type.h"
#include "paddle/infrt/host_context/function.h"
#include "paddle/infrt/support/variant.h"
#include "paddle/infrt/tensor/dense_host_tensor.h"
......@@ -64,10 +65,12 @@ using ValueVariantType =
tensor::DenseHostTensor,
MlirFunctionExecutable*,
tensor::TensorMap,
::infrt::PrecisionType,
::infrt::LayoutType,
::infrt::TargetType,
#ifdef INFRT_WITH_PHI
::phi::MetaTensor,
::phi::DenseTensor,
backends::CpuPhiAllocator,
backends::CpuPhiContext,
::phi::CPUContext,
std::vector<const phi::DenseTensor*>,
......@@ -101,6 +104,9 @@ class Value : public common::Object {
explicit Value(float x) : data(x) {}
explicit Value(double x) : data(x) {}
explicit Value(bool x) : data(x) {}
explicit Value(::infrt::TargetType x) : data(x) {}
explicit Value(::infrt::LayoutType x) : data(x) {}
explicit Value(::infrt::PrecisionType x) : data(x) {}
explicit Value(std::string x) : data(x) {}
explicit Value(tensor::TensorMap&& x) : data(x) {}
explicit Value(std::vector<int16_t>&& x) : data(x) {}
......@@ -112,11 +118,10 @@ class Value : public common::Object {
explicit Value(tensor::DenseHostTensor&& x) : data(std::move(x)) {}
explicit Value(MlirFunctionExecutable* x) : data(x) {}
#ifdef INFRT_WITH_PHI
explicit Value(backends::CpuPhiContext&& x) : data(std::move(x)) {}
explicit Value(::phi::CPUContext&& x) : data(std::move(x)) {}
explicit Value(backends::CpuPhiContext&& x) : data(std::move(x)) {}
explicit Value(::phi::DenseTensor&& x) : data(std::move(x)) {}
explicit Value(::phi::MetaTensor&& x) : data(std::move(x)) {}
explicit Value(backends::CpuPhiAllocator&& x) : data(std::move(x)) {}
#endif
template <typename T>
......@@ -179,10 +184,6 @@ class ValueRef : common::Shared<Value> {
explicit ValueRef(float val);
explicit ValueRef(double val);
explicit ValueRef(bool val);
explicit ValueRef(::phi::MetaTensor&& val);
explicit ValueRef(backends::CpuPhiContext&& x);
explicit ValueRef(::phi::CPUContext&& x);
explicit ValueRef(::phi::DenseTensor&& x);
using common::Shared<Value>::get;
using common::Shared<Value>::Reset;
......
......@@ -8,7 +8,6 @@ gather_srcs(infrt_src SRCS
registry.cc
dense_tensor_kernels.cc
context_kernels.cc
allocator_kernels.cc
)
set(infrt_register_phi_kernels_gen_source_file ${CMAKE_SOURCE_DIR}/paddle/infrt/kernel/phi/infershaped/infershaped_kernel_launchers.cc)
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/kernel/phi/allocator_kernels.h"
namespace infrt {
namespace kernel {
namespace phi {
backends::CpuPhiAllocator CreateCpuAllocator() { return {}; }
} // namespace phi
} // namespace kernel
} // namespace infrt
......@@ -18,12 +18,11 @@ namespace infrt {
namespace kernel {
namespace phi {
::phi::CPUContext CreateCpuContext(
infrt::backends::CpuPhiAllocator* allocator) {
::phi::CPUContext context;
context.SetAllocator(allocator);
context.Init();
return context;
::phi::CPUContext CreateCPUContext() {
::phi::CPUContext ctx{};
ctx.Init();
ctx.SetAllocator(new backends::CpuPhiAllocator{});
return ctx;
}
} // namespace phi
......
......@@ -16,13 +16,14 @@
#include "paddle/infrt/backends/host/phi_allocator.h"
#include "paddle/infrt/backends/host/phi_context.h"
#include "paddle/infrt/host_context/kernel_utils.h"
#include "paddle/phi/core/dense_tensor.h"
namespace infrt {
namespace kernel {
namespace phi {
::phi::CPUContext CreateCpuContext(::infrt::backends::CpuPhiAllocator*);
::phi::CPUContext CreateCPUContext();
} // namespace phi
} // namespace kernel
......
......@@ -13,20 +13,25 @@
// limitations under the License.
#include "paddle/infrt/kernel/phi/dense_tensor_kernels.h"
#include <iostream>
#include "paddle/infrt/dialect/phi/data_type.h"
#include "paddle/infrt/kernel/phi/context_kernels.h"
namespace infrt {
namespace kernel {
namespace phi {
::phi::DenseTensor CreateDenseTensorCpuF32Nchw(
backends::CpuPhiAllocator* allocator,
::phi::DenseTensor CreateDenseTensor(
const ::phi::CPUContext& context,
host_context::Attribute<std::vector<int64_t>> dims,
host_context::Attribute<std::vector<int64_t>> lod) {
return ::phi::DenseTensor(allocator,
::phi::DenseTensorMeta(::phi::DataType::FLOAT32,
::phi::make_ddim(dims.get()),
::phi::DataLayout::NCHW,
{}));
host_context::Attribute<::infrt::LayoutType> layout,
host_context::Attribute<std::vector<int64_t>> lod,
host_context::Attribute<::infrt::PrecisionType> precision) {
return ::phi::DenseTensor(
const_cast<::phi::Allocator*>(&context.GetAllocator()),
::phi::DenseTensorMeta(cvtPrecision2Phi(precision.get()),
::phi::make_ddim(dims.get()),
cvtLayout2Phi(layout.get()),
{}));
}
void FillDenseTensorF32(::phi::DenseTensor* dense_tensor,
......
......@@ -15,6 +15,7 @@
#pragma once
#include "paddle/infrt/backends/host/phi_allocator.h"
#include "paddle/infrt/dialect/infrt/common_type.h"
#include "paddle/infrt/host_context/kernel_utils.h"
#include "paddle/phi/core/dense_tensor.h"
......@@ -22,10 +23,12 @@ namespace infrt {
namespace kernel {
namespace phi {
::phi::DenseTensor CreateDenseTensorCpuF32Nchw(
backends::CpuPhiAllocator* allocator,
::phi::DenseTensor CreateDenseTensor(
const ::phi::CPUContext& context,
host_context::Attribute<std::vector<int64_t>> dims,
host_context::Attribute<std::vector<int64_t>> lod);
host_context::Attribute<::infrt::LayoutType> layout,
host_context::Attribute<std::vector<int64_t>> lod,
host_context::Attribute<::infrt::PrecisionType> precision);
void FillDenseTensorF32(::phi::DenseTensor* dense_tensor,
host_context::Attribute<std::vector<float>> values);
......
......@@ -24,7 +24,8 @@ void InferShapedKernelLauncher::CreateKernelFrameForInferShape(
frame->GetValues(1, frame->GetNumElements() - 1)) {
// TODO(Superjomn) To extend this.
if (value->is_type<::phi::DenseTensor>()) {
values.emplace_back(::phi::MetaTensor{&value->get<::phi::DenseTensor>()});
values.emplace_back(new host_context::Value{
::phi::MetaTensor{&value->get<::phi::DenseTensor>()}});
infershape_kernel_frame_builder.AddArgument(values.back().get());
} else {
infershape_kernel_frame_builder.AddArgument(value);
......
......@@ -19,7 +19,6 @@
#include "paddle/infrt/host_context/kernel_registry.h"
#include "paddle/infrt/host_context/kernel_utils.h"
#include "paddle/infrt/kernel/phi/allocator_kernels.h"
#include "paddle/infrt/kernel/phi/context_kernels.h"
#include "paddle/infrt/kernel/phi/dense_tensor_kernels.h"
#include "paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h"
......@@ -33,13 +32,10 @@ namespace infrt {
namespace kernel {
void RegisterPhiKernels(host_context::KernelRegistry* registry) {
registry->AddKernel("phi_dt.create_allocator.cpu",
INFRT_KERNEL(infrt::kernel::phi::CreateCpuAllocator));
registry->AddKernel("phi_dt.create_context.cpu",
INFRT_KERNEL(infrt::kernel::phi::CreateCpuContext));
registry->AddKernel(
"phi_dt.create_dense_tensor.cpu.f32.nchw",
INFRT_KERNEL(infrt::kernel::phi::CreateDenseTensorCpuF32Nchw));
INFRT_KERNEL(infrt::kernel::phi::CreateCPUContext));
registry->AddKernel("phi_dt.create_dense_tensor",
INFRT_KERNEL(infrt::kernel::phi::CreateDenseTensor));
registry->AddKernel("phi_dt.fill_dense_tensor.f32",
INFRT_KERNEL(infrt::kernel::phi::FillDenseTensorF32));
registry->AddKernel("phi_dt.print_tensor",
......
......@@ -2,9 +2,10 @@
// CHECK-LABEL: @sign_any_float32_execute
func @sign_any_float32_execute() {
%allocator = "phi_dt.create_allocator.cpu" (): () -> !phi.allocator<CPU>
%ctx = "phi_dt.create_context.cpu" (%allocator): (!phi.allocator<CPU>) -> !phi.context<CPU>
%t = "phi_dt.create_dense_tensor.cpu.f32.nchw" (%allocator) {dims=[1:i64], lod=[1:i64]}: (!phi.allocator<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%ctx = "phi_dt.create_context.cpu" (): () -> !phi.context<CPU>
%t = "phi_dt.create_dense_tensor" (%ctx) {
precision=#infrt.precision<FP32>,
layout=#infrt.layout<NCHW>, lod=[1:i64], dims=[1:i64]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
"phi_dt.fill_dense_tensor.f32"(%t) {value=[3.8:f32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
%e = "phi_cpu.sign.float32.any"(%ctx, %t) : (!phi.context<CPU>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册