提交 5009a3b6 编写于 作者: J jackzhang235

fix some little error

上级 53ef685e
...@@ -75,7 +75,7 @@ int TransposeConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -75,7 +75,7 @@ int TransposeConverter(void* ctx, OpLite* op, KernelBase* kernel) {
std::vector<int> axis_nhwc; std::vector<int> axis_nhwc;
if (axis.size() == 4) { if (axis.size() == 4) {
axis_nhwc = axis_to_nhwc4d(axis); axis_nhwc = axis_to_nhwc4d(axis);
} else if (axis.size(0 == 3)) { } else if (axis.size() == 3) {
axis_nhwc = axis_to_nhw3d(axis); axis_nhwc = axis_to_nhw3d(axis);
} else { } else {
CHECK(0) << "Unsupport dim in mlu transpose"; CHECK(0) << "Unsupport dim in mlu transpose";
......
...@@ -29,12 +29,12 @@ REGISTER_LITE_KERNEL( ...@@ -29,12 +29,12 @@ REGISTER_LITE_KERNEL(
kNHWC, kNHWC,
paddle::lite::kernels::mlu::LayoutNhwcToNchwCompute<PRECISION(kFloat)>, paddle::lite::kernels::mlu::LayoutNhwcToNchwCompute<PRECISION(kFloat)>,
def_layout_nhwc2nchw_fp32) def_layout_nhwc2nchw_fp32)
.BindInput("Inputs", .BindInput("Input",
{LiteType::GetTensorTy(TARGET(kMLU), {LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat), PRECISION(kFloat),
DATALAYOUT(kNHWC))}) DATALAYOUT(kNHWC))})
.BindOutput("Outputs", .BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kMLU), {LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat), PRECISION(kFloat),
DATALAYOUT(kNCHW))}) DATALAYOUT(kNCHW))})
.Finalize(); .Finalize();
...@@ -46,12 +46,12 @@ REGISTER_LITE_KERNEL( ...@@ -46,12 +46,12 @@ REGISTER_LITE_KERNEL(
kNHWC, kNHWC,
paddle::lite::kernels::mlu::LayoutNhwcToNchwCompute<PRECISION(kFP16)>, paddle::lite::kernels::mlu::LayoutNhwcToNchwCompute<PRECISION(kFP16)>,
def_layout_nhwc2nchw_fp16) def_layout_nhwc2nchw_fp16)
.BindInput("Inputs", .BindInput("Input",
{LiteType::GetTensorTy(TARGET(kMLU), {LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat), PRECISION(kFloat),
DATALAYOUT(kNHWC))}) DATALAYOUT(kNHWC))})
.BindOutput("Outputs", .BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kMLU), {LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat), PRECISION(kFloat),
DATALAYOUT(kNCHW))}) DATALAYOUT(kNCHW))})
.Finalize(); .Finalize();
...@@ -63,12 +63,12 @@ REGISTER_LITE_KERNEL( ...@@ -63,12 +63,12 @@ REGISTER_LITE_KERNEL(
kNHWC, kNHWC,
paddle::lite::kernels::mlu::LayoutNchwToNhwcCompute<PRECISION(kFloat)>, paddle::lite::kernels::mlu::LayoutNchwToNhwcCompute<PRECISION(kFloat)>,
def_layout_nchw2nhwc_fp32) def_layout_nchw2nhwc_fp32)
.BindInput("Inputs", .BindInput("Input",
{LiteType::GetTensorTy(TARGET(kMLU), {LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat), PRECISION(kFloat),
DATALAYOUT(kNCHW))}) DATALAYOUT(kNCHW))})
.BindOutput("Outputs", .BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kMLU), {LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat), PRECISION(kFloat),
DATALAYOUT(kNHWC))}) DATALAYOUT(kNHWC))})
.Finalize(); .Finalize();
...@@ -80,12 +80,12 @@ REGISTER_LITE_KERNEL( ...@@ -80,12 +80,12 @@ REGISTER_LITE_KERNEL(
kNHWC, kNHWC,
paddle::lite::kernels::mlu::LayoutNchwToNhwcCompute<PRECISION(kFP16)>, paddle::lite::kernels::mlu::LayoutNchwToNhwcCompute<PRECISION(kFP16)>,
def_layout_nchw2nhwc_fp16) def_layout_nchw2nhwc_fp16)
.BindInput("Inputs", .BindInput("Input",
{LiteType::GetTensorTy(TARGET(kMLU), {LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat), PRECISION(kFloat),
DATALAYOUT(kNCHW))}) DATALAYOUT(kNCHW))})
.BindOutput("Outputs", .BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kMLU), {LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat), PRECISION(kFloat),
DATALAYOUT(kNHWC))}) DATALAYOUT(kNHWC))})
.Finalize(); .Finalize();
...@@ -86,9 +86,8 @@ class LayoutNchwToNhwcCompute ...@@ -86,9 +86,8 @@ class LayoutNchwToNhwcCompute
CHECK(0) << "Unsupport dim in mlu layout nchw to nhwc"; CHECK(0) << "Unsupport dim in mlu layout nchw to nhwc";
} }
LayoutTransCompute<lite::TargetType::X86, float>( LayoutTransCompute<lite::TargetType::kX86, float>(
x_dims, context, *x, out, axis); x_dims, context, *x, out, axis);
)
} }
std::string doc() const override { std::string doc() const override {
...@@ -129,9 +128,8 @@ class LayoutNhwcToNchwCompute ...@@ -129,9 +128,8 @@ class LayoutNhwcToNchwCompute
CHECK(0) << "Unsupport dim in mlu layout nhwc to nchw"; CHECK(0) << "Unsupport dim in mlu layout nhwc to nchw";
} }
LayoutTransCompute<lite::TargetType::X86, float>( LayoutTransCompute<lite::TargetType::kX86, float>(
x_dims, context, *x, out, axis); x_dims, context, *x, out, axis);
)
} }
std::string doc() const override { std::string doc() const override {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册