未验证 提交 00b0344b 编写于 作者: M MaxwellDing 提交者: GitHub

[MLU] fix: use new scale api, test=develop (#3942)

上级 42ab4d55
...@@ -107,8 +107,7 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -107,8 +107,7 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
CNML_FILTER, CNML_FILTER,
CNML_NCHW, CNML_NCHW,
graph->FPType()); graph->FPType());
const auto weight_scale = const auto weight_scale = op_info->GetInputScale(filter_var_name);
op_info->GetAttr<std::vector<float>>("weight_scale");
if (filter->precision() == PrecisionType::kUnk || if (filter->precision() == PrecisionType::kUnk ||
filter->precision() == PrecisionType::kInt8) { filter->precision() == PrecisionType::kInt8) {
...@@ -162,7 +161,7 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -162,7 +161,7 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
graph->BindConstData(bias_var_name, bias); graph->BindConstData(bias_var_name, bias);
} }
const auto input_scale = op_info->GetAttr<float>("input_scale"); const auto input_scale = op_info->GetInputScale(input_var_name)[0];
bool use_first_conv = false; bool use_first_conv = false;
if (lite::TargetWrapperMlu::UseFirstConv() && input_dims[1] == 3) { if (lite::TargetWrapperMlu::UseFirstConv() && input_dims[1] == 3) {
......
...@@ -224,8 +224,10 @@ void test_conv(int bs, ...@@ -224,8 +224,10 @@ void test_conv(int bs,
opdesc_mlu.SetAttr("groups", groups); opdesc_mlu.SetAttr("groups", groups);
opdesc_mlu.SetAttr("fuse_relu", static_cast<bool>(fuse_relu)); opdesc_mlu.SetAttr("fuse_relu", static_cast<bool>(fuse_relu));
opdesc_mlu.SetAttr("weight_scale", std::vector<float>(oc, filter_scale)); OpInfo op_info(opdesc_mlu);
opdesc_mlu.SetAttr("input_scale", input_scale); op_info.SetInputScale(filter_int_var_name,
std::vector<float>(oc, filter_scale));
op_info.SetInputScale(input_var_name, {input_scale});
if (has_bias) { if (has_bias) {
if (is_channel_bias) { if (is_channel_bias) {
...@@ -234,7 +236,7 @@ void test_conv(int bs, ...@@ -234,7 +236,7 @@ void test_conv(int bs,
bias->Resize({output_shape}); bias->Resize({output_shape});
} }
FillTensor<float>(bias); FillTensor<float>(bias);
opdesc_mlu.SetInput("Bias", {bias_var_name}); op_info.SetInput("Bias", {bias_var_name});
} }
for (int i = 0; i < bs; i++) { for (int i = 0; i < bs; i++) {
...@@ -248,7 +250,7 @@ void test_conv(int bs, ...@@ -248,7 +250,7 @@ void test_conv(int bs,
} }
// create and convert op to MLU model, then run it on MLU // create and convert op to MLU model, then run it on MLU
auto op = CreateOp<operators::ConvOpLite>(opdesc_mlu, &scope); auto op = CreateOp<operators::ConvOpLite>(op_info, &scope);
LaunchOp(op, {input_var_name}, {output_var_name}); LaunchOp(op, {input_var_name}, {output_var_name});
// compare results // compare results
auto* output_data = output->mutable_data<float>(); auto* output_data = output->mutable_data<float>();
......
...@@ -68,7 +68,7 @@ int FCConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -68,7 +68,7 @@ int FCConverter(void* ctx, OpLite* op, KernelBase* kernel) {
auto w_tensor = graph->AddNode( auto w_tensor = graph->AddNode(
w_var_name, cnml_w_shape, CNML_FILTER, CNML_NCHW, graph->FPType()); w_var_name, cnml_w_shape, CNML_FILTER, CNML_NCHW, graph->FPType());
auto input_scale = op_info->GetAttr<float>("input_scale"); auto input_scale = op_info->GetInputScale(x_var_name)[0];
auto output_tensor = graph->AddNode(output_var_name, auto output_tensor = graph->AddNode(output_var_name,
output->dims().Vectorize(), output->dims().Vectorize(),
...@@ -101,7 +101,7 @@ int FCConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -101,7 +101,7 @@ int FCConverter(void* ctx, OpLite* op, KernelBase* kernel) {
bias_tensor ? bias_tensor->mlu_tensor() : nullptr)); bias_tensor ? bias_tensor->mlu_tensor() : nullptr));
graph->SetComputingDataType( graph->SetComputingDataType(
fc_op, graph->GetNode(x_var_name)->mlu_tensor(), 1 / input_scale); fc_op, graph->GetNode(x_var_name)->mlu_tensor(), 1 / input_scale);
auto weight_scale = op_info->GetAttr<std::vector<float>>("weight_scale"); auto weight_scale = op_info->GetInputScale(w_var_name);
// LOG(INFO) << "W precision " << int(w->precision()); // LOG(INFO) << "W precision " << int(w->precision());
if (w->precision() == PrecisionType::kUnk || if (w->precision() == PrecisionType::kUnk ||
......
...@@ -131,14 +131,15 @@ void test_fc(const std::vector<int64_t>& input_shape, ...@@ -131,14 +131,15 @@ void test_fc(const std::vector<int64_t>& input_shape,
fc_op_desc_mlu.SetOutput("Out", {out_var_name}); fc_op_desc_mlu.SetOutput("Out", {out_var_name});
fc_op_desc_mlu.SetAttr("in_num_col_dims", static_cast<int>(in_num_col_dims)); fc_op_desc_mlu.SetAttr("in_num_col_dims", static_cast<int>(in_num_col_dims));
fc_op_desc_mlu.SetAttr("weight_scale", OpInfo op_info(fc_op_desc_mlu);
std::vector<float>(w_shape[1], w_scale)); op_info.SetInputScale(w_int_var_name,
fc_op_desc_mlu.SetAttr("input_scale", input_scale); std::vector<float>(w_shape[1], w_scale));
op_info.SetInputScale(input_var_name, {input_scale});
if (has_bias) { if (has_bias) {
fc_op_desc_mlu.SetInput("Bias", {bias_var_name}); op_info.SetInput("Bias", {bias_var_name});
} }
auto fc_op_mlu = CreateOp<operators::FcOpLite>(fc_op_desc_mlu, &scope); auto fc_op_mlu = CreateOp<operators::FcOpLite>(op_info, &scope);
Tensor input_tmp, out_tmp; Tensor input_tmp, out_tmp;
input_tmp.Resize(input_shape); input_tmp.Resize(input_shape);
......
...@@ -49,8 +49,7 @@ int LrnConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -49,8 +49,7 @@ int LrnConverter(void* ctx, OpLite* op, KernelBase* kernel) {
<< "Unsuport WithinChannel"; << "Unsuport WithinChannel";
} }
auto local_size = op_info->GetAttr<int>("n"); auto local_size = op_info->GetAttr<int>("n");
CHECK(op_info->HasAttr("input_scale")); auto input_scale = op_info->GetInputScale(x_var_name)[0];
auto input_scale = op_info->GetAttr<float>("input_scale");
VLOG(5) << "lrn input scale: " << input_scale; VLOG(5) << "lrn input scale: " << input_scale;
cnmlLrnOpParam_t param; cnmlLrnOpParam_t param;
......
...@@ -178,9 +178,10 @@ void test_lrn(float alpha, ...@@ -178,9 +178,10 @@ void test_lrn(float alpha,
opdesc.SetAttr("k", k); opdesc.SetAttr("k", k);
opdesc.SetAttr("n", local_size); opdesc.SetAttr("n", local_size);
opdesc.SetAttr("norm_region", norm_region); opdesc.SetAttr("norm_region", norm_region);
opdesc.SetAttr<float>("input_scale", (*dmax - *dmin) / 255.f); OpInfo op_info(opdesc);
op_info.SetInputScale(x_var_name, {(*dmax - *dmin) / 255.f});
auto op = CreateOp<operators::LrnOpLite>(opdesc, &scope); auto op = CreateOp<operators::LrnOpLite>(op_info, &scope);
// baseline // baseline
lrn_compute_ref(op); lrn_compute_ref(op);
...@@ -213,7 +214,7 @@ void test_lrn(float alpha, ...@@ -213,7 +214,7 @@ void test_lrn(float alpha,
auto output_data = output_trans.mutable_data<float>(); auto output_data = output_trans.mutable_data<float>();
auto* output_ref_data = out_ref->mutable_data<float>(); auto* output_ref_data = out_ref->mutable_data<float>();
for (size_t i = 0; i < out->data_size(); i++) { for (size_t i = 0; i < out->data_size(); i++) {
EXPECT_NEAR(output_data[i], output_ref_data[i], 1e-4); EXPECT_NEAR(output_data[i], output_ref_data[i], 5e-4);
} }
} }
......
...@@ -54,10 +54,11 @@ class SubgraphEngine : public subgraph::Engine { ...@@ -54,10 +54,11 @@ class SubgraphEngine : public subgraph::Engine {
VLOG(4) << "[MLU] PADDLE_LITE_MLU_SAVE_OFFLINE_MODEL is " VLOG(4) << "[MLU] PADDLE_LITE_MLU_SAVE_OFFLINE_MODEL is "
<< GetBoolFromEnv("PADDLE_LITE_MLU_SAVE_OFFLINE_MODEL"); << GetBoolFromEnv("PADDLE_LITE_MLU_SAVE_OFFLINE_MODEL");
VLOG(4) << "[MLU] PADDLE_LITE_MLU_DISABLE_BATCH_SIZE_CHANGEABLE is " VLOG(4) << "[MLU] PADDLE_LITE_MLU_DISABLE_BATCH_SIZE_CHANGEABLE is "
<< GetBoolFromEnv("PADDLE_LITE_MLU_DISABLE_BATCH_SIZE_CHANGEABLE"); << GetBoolFromEnv("PADDLE_LITE_MLU_DISABLE_BATCH_SIZE_CHANGEABLE",
true);
VLOG(4) << "[MLU] LITE_DISABLE_MLU_CAST is " VLOG(4) << "[MLU] LITE_DISABLE_MLU_CAST is "
<< GetBoolFromEnv("LITE_DISABLE_MLU_CAST"); << GetBoolFromEnv("LITE_DISABLE_MLU_CAST");
if (GetBoolFromEnv("PADDLE_LITE_MLU_DISABLE_BATCH_SIZE_CHANGEABLE")) { if (GetBoolFromEnv("PADDLE_LITE_MLU_DISABLE_BATCH_SIZE_CHANGEABLE", true)) {
disable_batch_size_changeable_ = true; disable_batch_size_changeable_ = true;
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册