提交 56c1f666 编写于 作者: D dingminghui 提交者: jackzhang235

fix(mlu): fix unittest error caused by cancelling expanding tensor to 4 dims

上级 cf688607
......@@ -48,18 +48,24 @@ int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) {
auto mean = scope->FindVar(mean_var_name)->GetMutable<Tensor>();
auto mean_dims = mean->dims().Vectorize();
if (mean_dims.size() < 4) {
mean_dims.insert(mean_dims.begin(), 4 - mean_dims.size(), 1);
}
auto mean_tensor = graph->AddNode(
mean_var_name, mean_dims, CNML_CONST, CNML_CNHW, graph->FPType());
mean_var_name, mean_dims, CNML_CONST, CNML_NHWC, graph->FPType());
auto variance = scope->FindVar(variance_var_name)->GetMutable<Tensor>();
auto variance_dims = variance->dims().Vectorize();
if (variance_dims.size() < 4) {
variance_dims.insert(variance_dims.begin(), 4 - variance_dims.size(), 1);
}
auto variance_tensor = graph->AddNode(
variance_var_name, variance_dims, CNML_CONST, CNML_CNHW, graph->FPType());
variance_var_name, variance_dims, CNML_CONST, CNML_NHWC, graph->FPType());
auto scale = scope->FindVar(scale_var_name)->GetMutable<Tensor>();
auto bias = scope->FindVar(bias_var_name)->GetMutable<Tensor>();
int co = static_cast<int>(mean_dims[0]);
int co = static_cast<int>(mean_dims[3]);
std::vector<float> variance_trans(co);
std::vector<float> mean_trans(co);
......
......@@ -87,6 +87,7 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
is_group_mode = false;
}
auto input_tensor = graph->GetNode(input_var_name);
const auto output_tensor = graph->AddNode(
output_var_name, output_shape, CNML_TENSOR, CNML_NCHW, graph->FPType());
std::vector<int64_t> cnml_filter_shape = {
......@@ -142,7 +143,7 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
std::vector<int64_t> bias_shape;
if (bias_data_size == oc) {
// 0: {oc}
bias_shape = {oc};
bias_shape = {1, 1, 1, oc};
} else if (bias_data_size == output_data_size / bs) {
LOG(FATAL) << "Unsupported ... ...";
// 1: {1, oc, oh, ow}
......@@ -156,11 +157,8 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
<< " isn't supported in conv2d Op when output dimension is "
<< output_dims;
}
bias_tensor = graph->AddNode(bias_var_name,
bias_dims.Vectorize(),
CNML_CONST,
CNML_CNHW,
graph->FPType());
bias_tensor = graph->AddNode(
bias_var_name, bias_shape, CNML_CONST, CNML_NHWC, graph->FPType());
graph->BindConstData(bias_var_name, bias);
}
......@@ -184,14 +182,14 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
paddings[0],
paddings[0]));
const auto mean_tensor = graph->AddNode("first_conv_mean_tensor",
std::vector<int64_t>{3},
std::vector<int64_t>{1, 1, 1, 3},
CNML_CONST,
CNML_CNHW,
CNML_NHWC,
graph->FPType());
const auto std_tensor = graph->AddNode("first_conv_std_tensor",
std::vector<int64_t>{3},
std::vector<int64_t>{1, 1, 1, 3},
CNML_CONST,
CNML_CNHW,
CNML_NHWC,
graph->FPType());
graph->BindConstRawData("first_conv_mean_tensor",
......@@ -203,11 +201,11 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
3,
false);
graph->GetNode(input_var_name)->set_mlu_dtype(CNML_DATA_UINT8);
input_tensor->set_mlu_dtype(CNML_DATA_UINT8);
CNML_CALL(cnmlCreateConvFirstOpForward(
&conv_op,
conv_param,
graph->GetNode(input_var_name)->mlu_tensor(),
input_tensor->mlu_tensor(),
mean_tensor->mlu_tensor(),
output_tensor->mlu_tensor(),
filter_tensor->mlu_tensor(),
......@@ -224,7 +222,7 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
CNML_CALL(cnmlCreateConvDepthwiseOpForward(
&conv_op,
conv_depthwise_param,
graph->GetNode(input_var_name)->mlu_tensor(),
input_tensor->mlu_tensor(),
output_tensor->mlu_tensor(),
filter_tensor->mlu_tensor(),
bias_tensor ? bias_tensor->mlu_tensor() : nullptr));
......@@ -241,7 +239,7 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
CNML_CALL(cnmlCreateConvGroupOpForward(
&conv_op,
conv_param,
graph->GetNode(input_var_name)->mlu_tensor(),
input_tensor->mlu_tensor(),
output_tensor->mlu_tensor(),
filter_tensor->mlu_tensor(),
bias_tensor ? bias_tensor->mlu_tensor() : nullptr,
......@@ -263,7 +261,7 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
CNML_CALL(cnmlCreateConvOpForward(
&conv_op,
conv_param,
graph->GetNode(input_var_name)->mlu_tensor(),
input_tensor->mlu_tensor(),
output_tensor->mlu_tensor(),
filter_tensor->mlu_tensor(),
bias_tensor ? bias_tensor->mlu_tensor() : nullptr));
......
......@@ -33,15 +33,15 @@ int DropoutConverter(void* ctx, OpLite* op, KernelBase* kernel) {
// Create act node and set params from op
auto x_var_name = op_info->Input("X").front();
auto out_var_name = op_info->Output("Out").front();
auto mask_var_name = op_info->Output("Mask").front();
/* auto mask_var_name = op_info->Output("Mask").front(); */
auto output = scope->FindVar(out_var_name)->GetMutable<Tensor>();
auto output_dims = output->dims().Vectorize();
auto output_tensor = graph->AddNode(
out_var_name, output_dims, CNML_TENSOR, CNML_NCHW, graph->FPType());
auto mask = scope->FindVar(mask_var_name)->GetMutable<Tensor>();
auto mask_dims = mask->dims().Vectorize();
auto mask_tensor = graph->AddNode(
mask_var_name, mask_dims, CNML_TENSOR, CNML_NCHW, graph->FPType());
/* auto mask = scope->FindVar(mask_var_name)->GetMutable<Tensor>(); */
/* auto mask_dims = mask->dims().Vectorize(); */
/* auto mask_tensor = graph->AddNode( */
/* mask_var_name, mask_dims, CNML_TENSOR, CNML_NCHW, graph->FPType()); */
// is_test is true by default
// if(op_info->HasAttr("is_test")){
......
......@@ -82,15 +82,15 @@ int FCConverter(void* ctx, OpLite* op, KernelBase* kernel) {
if (HasInputArg(op_info, scope, "Bias")) {
bias_var_name = op_info->Input("Bias").front();
auto bias = scope->FindVar(bias_var_name)->GetMutable<lite::Tensor>();
auto bias_dims = bias->dims();
auto bias_dims = bias->dims().Vectorize();
CHECK(!graph->HasNode(bias_var_name));
if (bias_dims.size() < 4u) {
bias_dims.insert(bias_dims.begin(), 4 - bias_dims.size(), 1);
}
// CHECK_EQ(bias_dims.production(), n);
bias_tensor = graph->AddNode(bias_var_name,
bias_dims.Vectorize(),
CNML_CONST,
CNML_CNHW,
graph->FPType());
bias_tensor = graph->AddNode(
bias_var_name, bias_dims, CNML_CONST, CNML_NHWC, graph->FPType());
graph->BindConstData(bias_var_name, bias);
}
cnmlBaseOp_t fc_op;
......
......@@ -175,9 +175,9 @@ void test_fc(const std::vector<int64_t>& input_shape,
TEST(MLUBridges, fc) {
for (bool use_bias : {true, false}) {
// test_fc({1, 8, 8, 1}, {64, 4}, 1, use_bias);
// test_fc({1, 5, 5, 1}, {25, 7}, 1, use_bias);
// test_fc({1, 4, 1, 1}, {4, 8}, 1, use_bias);
test_fc({1, 8, 8, 1}, {64, 4}, 1, use_bias);
test_fc({1, 5, 5, 1}, {25, 7}, 1, use_bias);
test_fc({1, 4, 1, 1}, {4, 8}, 1, use_bias);
test_fc({1, 1024, 1, 1}, {1024, 32}, 1, use_bias);
}
}
......
......@@ -61,6 +61,7 @@ int LayoutConverter(void* ctx, OpLite* op, KernelBase* kernel) {
}
output_tensor = graph->AddNode(
out_var_name, output_dims, CNML_TENSOR, CNML_NCHW, graph->FPType());
VLOG(3) << "layout transpose nchw to nhwc" << std::endl;
} else {
switch (x_dims.size()) {
case 2:
......@@ -78,6 +79,7 @@ int LayoutConverter(void* ctx, OpLite* op, KernelBase* kernel) {
default:
CHECK(0) << "Unsupport shpae";
}
VLOG(3) << "layout transpose nhwc to nchw" << std::endl;
output_tensor = graph->AddNode(out_var_name,
output_dims,
CNML_TENSOR,
......
......@@ -35,9 +35,16 @@ int SoftmaxConverter(void* ctx, OpLite* op, KernelBase* kernel) {
auto out_var_name = op_info->Output("Out").front();
auto output = scope->FindVar(out_var_name)->GetMutable<Tensor>();
auto output_dims = output->dims().Vectorize();
auto x_shape =
scope->FindVar(x_var_name)->GetMutable<Tensor>()->dims().Vectorize();
// nchw axis to nhwc aixs
int nchw_to_nhwc_aixs_map[4] = {0, 3, 1, 2};
std::vector<int> nchw2nhwc_axis(x_shape.size());
nchw2nhwc_axis[0] = 0;
if (x_shape.size() > 1) nchw2nhwc_axis[1] = x_shape.size() - 1;
for (size_t i = 2; i < x_shape.size(); ++i) {
nchw2nhwc_axis[i] = i - 1;
}
int axis = 1;
if (op_info->HasAttr("axis")) {
axis = op_info->GetAttr<int>("axis");
......@@ -45,7 +52,7 @@ int SoftmaxConverter(void* ctx, OpLite* op, KernelBase* kernel) {
axis = output_dims.size() + axis;
}
}
int nhwc_axis = nchw_to_nhwc_aixs_map[axis];
int nhwc_axis = nchw2nhwc_axis[axis];
auto output_tensor = graph->AddNode(
out_var_name, output_dims, CNML_TENSOR, CNML_NCHW, graph->FPType());
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册