提交 d6709eb9 编写于 作者: C cen.li

* resnet50 result ok

* test=develop
上级 1e160622
......@@ -43,6 +43,8 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op,
i_x_shape_data[i] = static_cast<int>(x_shape_data[i]);
}
int channel_size = x_dims[1];
auto scale_var_name = op_info->Input("Scale").front();
auto scale = scope->FindVar(scale_var_name)->GetMutable<lite::Tensor>();
......@@ -68,31 +70,26 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op,
auto epsilon = op_info->GetAttr<float>("epsilon");
auto unique_bn_out_name = lite::bm::UniqueName("batch_norm_out");
add_batchnorm_layer(graph_ctx->bm_compiler_handle,
const_cast<const int*>(i_x_shape_data),
x_dims.size(),
static_cast<const char*>(x_var_name.c_str()),
const_cast<const int*>(i_output_shape_data),
output_dims.size(),
static_cast<const char*>(unique_bn_out_name.c_str()),
static_cast<const char*>(unique_op_name.c_str()),
static_cast<const float*>(mean->mutable_data<float>()),
static_cast<const float*>(variance->mutable_data<float>()),
1.f,
epsilon,
0,
1);
auto* scale_data = scale->mutable_data<float>();
auto* bias_data = bias->mutable_data<float>();
auto* mean_data = mean->mutable_data<float>();
auto* variance_data = variance->mutable_data<float>();
for (int c = 0; c < channel_size; c++) {
float inv_scale = 1.f / (std::sqrt(variance_data[c] + epsilon));
bias_data[c] = bias_data[c] - inv_scale * scale_data[c] * mean_data[c];
scale_data[c] = inv_scale * scale_data[c];
}
const int input_num = 1;
int **shape = new int *[input_num];
int *dim = new int[input_num];
const char **name = new const char *[input_num];
name[0] = static_cast<const char*>(unique_bn_out_name.c_str());
dim[0] = output_dims.size();
shape[0] = i_output_shape_data;
name[0] = static_cast<const char*>(x_var_name.c_str());
dim[0] = x_dims.size();
shape[0] = i_x_shape_data;
auto unique_scale_name = lite::bm::UniqueName("scale");
add_scale_layer(graph_ctx->bm_compiler_handle,
input_num,
shape,
......@@ -101,12 +98,12 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op,
const_cast<const int*>(i_output_shape_data),
output_dims.size(),
static_cast<const char*>(output_var_name.c_str()),
static_cast<const char*>(unique_scale_name.c_str()),
static_cast<const char*>(unique_op_name.c_str()),
static_cast<const float*>(scale->mutable_data<float>()),
static_cast<const float*>(bias->mutable_data<float>()),
1,
1,
0);
1);
delete [] shape;
delete [] name;
......
......@@ -15,6 +15,7 @@
#include "lite/kernels/bm/bridges/registry.h"
#include "bmcompiler_if.h"
#include "bmcompiler_if_lite.h"
#include "bmcompiler_defs.h"
namespace paddle {
namespace lite {
......@@ -50,7 +51,6 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi
shape[0] = i_x_shape_data;
auto y_var_name = op_info->Input("Y").front();
auto y = scope->FindVar(y_var_name)->GetMutable<lite::Tensor>();
auto y_dims = y->dims();
name[1] = static_cast<const char*>(y_var_name.c_str());
......@@ -61,7 +61,6 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi
i_y_shape_data[i] = static_cast<int>(y_shape_data[i]);
}
shape[1] = i_y_shape_data;
bool y_is_const = input_nodes.find(y_var_name) == input_nodes.end();
// output
......@@ -105,25 +104,25 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi
coeff);
} else {
const float* y_data = const_cast<const float*>(y->mutable_data<float>());
const float* x_data = const_cast<const float*>(x->mutable_data<float>());
bm_add_const_tensor(graph_ctx->bm_compiler_handle,
name[0],
name[1],
shape[0],
dim[0],
static_cast<bm_data_type_t>(0),
static_cast<bm_data_type_t>(DTYPE_FP32),
static_cast<const void*>(y_data));
add_binary_layer_v2(graph_ctx->bm_compiler_handle,
name[0],
shape[0],
dim[0],
0,
nullptr,
name[0],
static_cast<const float*>(x_data),
name[1],
shape[0],
dim[0],
0,
nullptr,
static_cast<const float*>(y_data),
static_cast<const char*>(output_var_name.c_str()),
0);
}
......
......@@ -15,6 +15,7 @@
#include "lite/kernels/bm/bridges/registry.h"
#include "lite/backends/bm/builder.h"
#include "bmcompiler_if.h"
#include "bmcompiler_op_code.h"
namespace paddle {
namespace lite {
......@@ -34,59 +35,48 @@ node_map_type ScaleConverter(const std::shared_ptr<lite::OpLite> scale_op,
auto unique_op_name = lite::bm::UniqueName(op_type);
// input
const int input_num = 1;
int **shape = new int *[input_num];
int *dim = new int[input_num];
const char **name = new const char *[input_num];
auto x_var_name = op_info->Input("X").front();
auto x = scope->FindVar(x_var_name)->GetMutable<lite::Tensor>();
auto x_dims = x->dims();
name[0] = static_cast<const char*>(x_var_name.c_str());
dim[0] = x_dims.size();
const long int* x_shape_data = const_cast<const long int*>(&x_dims.data()[0]);
int i_x_shape_data[x_dims.size()];
for (size_t i = 0; i < x_dims.size(); i++) {
i_x_shape_data[i] = static_cast<int>(x_shape_data[i]);
}
shape[0] = i_x_shape_data;
// output
auto output_var_name = op_info->Output("Out").front();
auto output = scope->FindVar(output_var_name)->GetMutable<lite::Tensor>();
auto output_dims = output->dims();
const long int* output_shape_data = const_cast<const long int*>(&output_dims.data()[0]);
int i_output_shape_data[output_dims.size()];
for (size_t i = 0; i < output_dims.size(); i++) {
i_output_shape_data[i] = static_cast<int>(output_shape_data[i]);
}
auto scale = op_info->GetAttr<float>("scale");
auto bias = op_info->GetAttr<float>("bias");
auto bias_after_scale = op_info->GetAttr<bool>("bias_after_scale");
if (bias_after_scale) {
if (!bias_after_scale) {
bias *= scale;
}
add_scale_layer(graph_ctx->bm_compiler_handle,
input_num,
shape,
dim,
name,
const_cast<const int*>(i_output_shape_data),
output_dims.size(),
auto unique_op_scale_name = lite::bm::UniqueName(op_type);
add_const_binary_layer(graph_ctx->bm_compiler_handle,
static_cast<const char*>(x_var_name.c_str()),
const_cast<const int*>(i_x_shape_data),
x_dims.size(),
scale,
static_cast<const char*>(unique_op_scale_name.c_str()),
BINARY_MUL,
0);
add_const_binary_layer(graph_ctx->bm_compiler_handle,
static_cast<const char*>(unique_op_scale_name.c_str()),
const_cast<const int*>(i_x_shape_data),
x_dims.size(),
bias,
static_cast<const char*>(output_var_name.c_str()),
static_cast<const char*>(unique_op_name.c_str()),
&scale,
&bias,
1,
1,
BINARY_ADD,
0);
delete [] shape;
delete [] dim;
delete [] name;
output_nodes[output_var_name] = output_var_name;
return output_nodes;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册