提交 5009a3b6 编写于 作者: J jackzhang235

fix some little error

上级 53ef685e
......@@ -75,7 +75,7 @@ int TransposeConverter(void* ctx, OpLite* op, KernelBase* kernel) {
std::vector<int> axis_nhwc;
if (axis.size() == 4) {
axis_nhwc = axis_to_nhwc4d(axis);
} else if (axis.size(0 == 3)) {
} else if (axis.size() == 3) {
axis_nhwc = axis_to_nhw3d(axis);
} else {
CHECK(0) << "Unsupport dim in mlu transpose";
......
......@@ -29,12 +29,12 @@ REGISTER_LITE_KERNEL(
kNHWC,
paddle::lite::kernels::mlu::LayoutNhwcToNchwCompute<PRECISION(kFloat)>,
def_layout_nhwc2nchw_fp32)
.BindInput("Inputs",
{LiteType::GetTensorTy(TARGET(kMLU),
.BindInput("Input",
{LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat),
DATALAYOUT(kNHWC))})
.BindOutput("Outputs",
{LiteType::GetTensorTy(TARGET(kMLU),
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.Finalize();
......@@ -46,12 +46,12 @@ REGISTER_LITE_KERNEL(
kNHWC,
paddle::lite::kernels::mlu::LayoutNhwcToNchwCompute<PRECISION(kFP16)>,
def_layout_nhwc2nchw_fp16)
.BindInput("Inputs",
{LiteType::GetTensorTy(TARGET(kMLU),
.BindInput("Input",
{LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat),
DATALAYOUT(kNHWC))})
.BindOutput("Outputs",
{LiteType::GetTensorTy(TARGET(kMLU),
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.Finalize();
......@@ -63,12 +63,12 @@ REGISTER_LITE_KERNEL(
kNHWC,
paddle::lite::kernels::mlu::LayoutNchwToNhwcCompute<PRECISION(kFloat)>,
def_layout_nchw2nhwc_fp32)
.BindInput("Inputs",
{LiteType::GetTensorTy(TARGET(kMLU),
.BindInput("Input",
{LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("Outputs",
{LiteType::GetTensorTy(TARGET(kMLU),
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat),
DATALAYOUT(kNHWC))})
.Finalize();
......@@ -80,12 +80,12 @@ REGISTER_LITE_KERNEL(
kNHWC,
paddle::lite::kernels::mlu::LayoutNchwToNhwcCompute<PRECISION(kFP16)>,
def_layout_nchw2nhwc_fp16)
.BindInput("Inputs",
{LiteType::GetTensorTy(TARGET(kMLU),
.BindInput("Input",
{LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat),
DATALAYOUT(kNCHW))})
.BindOutput("Outputs",
{LiteType::GetTensorTy(TARGET(kMLU),
.BindOutput("Out",
{LiteType::GetTensorTy(TARGET(kHost),
PRECISION(kFloat),
DATALAYOUT(kNHWC))})
.Finalize();
......@@ -86,9 +86,8 @@ class LayoutNchwToNhwcCompute
CHECK(0) << "Unsupport dim in mlu layout nchw to nhwc";
}
LayoutTransCompute<lite::TargetType::X86, float>(
LayoutTransCompute<lite::TargetType::kX86, float>(
x_dims, context, *x, out, axis);
)
}
std::string doc() const override {
......@@ -129,9 +128,8 @@ class LayoutNhwcToNchwCompute
CHECK(0) << "Unsupport dim in mlu layout nhwc to nchw";
}
LayoutTransCompute<lite::TargetType::X86, float>(
LayoutTransCompute<lite::TargetType::kX86, float>(
x_dims, context, *x, out, axis);
)
}
std::string doc() const override {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册