提交 d6709eb9 编写于 作者: C cen.li

* resnet50 result ok

* test=develop
上级 1e160622
...@@ -43,6 +43,8 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op, ...@@ -43,6 +43,8 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op,
i_x_shape_data[i] = static_cast<int>(x_shape_data[i]); i_x_shape_data[i] = static_cast<int>(x_shape_data[i]);
} }
int channel_size = x_dims[1];
auto scale_var_name = op_info->Input("Scale").front(); auto scale_var_name = op_info->Input("Scale").front();
auto scale = scope->FindVar(scale_var_name)->GetMutable<lite::Tensor>(); auto scale = scope->FindVar(scale_var_name)->GetMutable<lite::Tensor>();
...@@ -68,31 +70,26 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op, ...@@ -68,31 +70,26 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op,
auto epsilon = op_info->GetAttr<float>("epsilon"); auto epsilon = op_info->GetAttr<float>("epsilon");
auto unique_bn_out_name = lite::bm::UniqueName("batch_norm_out"); auto unique_bn_out_name = lite::bm::UniqueName("batch_norm_out");
add_batchnorm_layer(graph_ctx->bm_compiler_handle, auto* scale_data = scale->mutable_data<float>();
const_cast<const int*>(i_x_shape_data), auto* bias_data = bias->mutable_data<float>();
x_dims.size(), auto* mean_data = mean->mutable_data<float>();
static_cast<const char*>(x_var_name.c_str()), auto* variance_data = variance->mutable_data<float>();
const_cast<const int*>(i_output_shape_data),
output_dims.size(), for (int c = 0; c < channel_size; c++) {
static_cast<const char*>(unique_bn_out_name.c_str()), float inv_scale = 1.f / (std::sqrt(variance_data[c] + epsilon));
static_cast<const char*>(unique_op_name.c_str()), bias_data[c] = bias_data[c] - inv_scale * scale_data[c] * mean_data[c];
static_cast<const float*>(mean->mutable_data<float>()), scale_data[c] = inv_scale * scale_data[c];
static_cast<const float*>(variance->mutable_data<float>()), }
1.f,
epsilon,
0,
1);
const int input_num = 1; const int input_num = 1;
int **shape = new int *[input_num]; int **shape = new int *[input_num];
int *dim = new int[input_num]; int *dim = new int[input_num];
const char **name = new const char *[input_num]; const char **name = new const char *[input_num];
name[0] = static_cast<const char*>(unique_bn_out_name.c_str()); name[0] = static_cast<const char*>(x_var_name.c_str());
dim[0] = output_dims.size(); dim[0] = x_dims.size();
shape[0] = i_output_shape_data; shape[0] = i_x_shape_data;
auto unique_scale_name = lite::bm::UniqueName("scale");
add_scale_layer(graph_ctx->bm_compiler_handle, add_scale_layer(graph_ctx->bm_compiler_handle,
input_num, input_num,
shape, shape,
...@@ -101,12 +98,12 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op, ...@@ -101,12 +98,12 @@ node_map_type BatchNormConverter(const std::shared_ptr<lite::OpLite> bn_op,
const_cast<const int*>(i_output_shape_data), const_cast<const int*>(i_output_shape_data),
output_dims.size(), output_dims.size(),
static_cast<const char*>(output_var_name.c_str()), static_cast<const char*>(output_var_name.c_str()),
static_cast<const char*>(unique_scale_name.c_str()), static_cast<const char*>(unique_op_name.c_str()),
static_cast<const float*>(scale->mutable_data<float>()), static_cast<const float*>(scale->mutable_data<float>()),
static_cast<const float*>(bias->mutable_data<float>()), static_cast<const float*>(bias->mutable_data<float>()),
1, 1,
1, 1,
0); 1);
delete [] shape; delete [] shape;
delete [] name; delete [] name;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "lite/kernels/bm/bridges/registry.h" #include "lite/kernels/bm/bridges/registry.h"
#include "bmcompiler_if.h" #include "bmcompiler_if.h"
#include "bmcompiler_if_lite.h" #include "bmcompiler_if_lite.h"
#include "bmcompiler_defs.h"
namespace paddle { namespace paddle {
namespace lite { namespace lite {
...@@ -50,7 +51,6 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi ...@@ -50,7 +51,6 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi
shape[0] = i_x_shape_data; shape[0] = i_x_shape_data;
auto y_var_name = op_info->Input("Y").front(); auto y_var_name = op_info->Input("Y").front();
auto y = scope->FindVar(y_var_name)->GetMutable<lite::Tensor>(); auto y = scope->FindVar(y_var_name)->GetMutable<lite::Tensor>();
auto y_dims = y->dims(); auto y_dims = y->dims();
name[1] = static_cast<const char*>(y_var_name.c_str()); name[1] = static_cast<const char*>(y_var_name.c_str());
...@@ -61,7 +61,6 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi ...@@ -61,7 +61,6 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi
i_y_shape_data[i] = static_cast<int>(y_shape_data[i]); i_y_shape_data[i] = static_cast<int>(y_shape_data[i]);
} }
shape[1] = i_y_shape_data; shape[1] = i_y_shape_data;
bool y_is_const = input_nodes.find(y_var_name) == input_nodes.end(); bool y_is_const = input_nodes.find(y_var_name) == input_nodes.end();
// output // output
...@@ -105,25 +104,25 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi ...@@ -105,25 +104,25 @@ node_map_type ElementwiseConverter(const std::shared_ptr<lite::OpLite> elementwi
coeff); coeff);
} else { } else {
const float* y_data = const_cast<const float*>(y->mutable_data<float>()); const float* y_data = const_cast<const float*>(y->mutable_data<float>());
const float* x_data = const_cast<const float*>(x->mutable_data<float>());
bm_add_const_tensor(graph_ctx->bm_compiler_handle, bm_add_const_tensor(graph_ctx->bm_compiler_handle,
name[0], name[1],
shape[0], shape[0],
dim[0], dim[0],
static_cast<bm_data_type_t>(0), static_cast<bm_data_type_t>(DTYPE_FP32),
static_cast<const void*>(y_data)); static_cast<const void*>(y_data));
add_binary_layer_v2(graph_ctx->bm_compiler_handle, add_binary_layer_v2(graph_ctx->bm_compiler_handle,
name[0], name[0],
shape[0], shape[0],
dim[0], dim[0],
0, 0,
nullptr, static_cast<const float*>(x_data),
name[0], name[1],
shape[0], shape[0],
dim[0], dim[0],
0, 0,
nullptr, static_cast<const float*>(y_data),
static_cast<const char*>(output_var_name.c_str()), static_cast<const char*>(output_var_name.c_str()),
0); 0);
} }
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "lite/kernels/bm/bridges/registry.h" #include "lite/kernels/bm/bridges/registry.h"
#include "lite/backends/bm/builder.h" #include "lite/backends/bm/builder.h"
#include "bmcompiler_if.h" #include "bmcompiler_if.h"
#include "bmcompiler_op_code.h"
namespace paddle { namespace paddle {
namespace lite { namespace lite {
...@@ -34,59 +35,48 @@ node_map_type ScaleConverter(const std::shared_ptr<lite::OpLite> scale_op, ...@@ -34,59 +35,48 @@ node_map_type ScaleConverter(const std::shared_ptr<lite::OpLite> scale_op,
auto unique_op_name = lite::bm::UniqueName(op_type); auto unique_op_name = lite::bm::UniqueName(op_type);
// input // input
const int input_num = 1;
int **shape = new int *[input_num];
int *dim = new int[input_num];
const char **name = new const char *[input_num];
auto x_var_name = op_info->Input("X").front(); auto x_var_name = op_info->Input("X").front();
auto x = scope->FindVar(x_var_name)->GetMutable<lite::Tensor>(); auto x = scope->FindVar(x_var_name)->GetMutable<lite::Tensor>();
auto x_dims = x->dims(); auto x_dims = x->dims();
name[0] = static_cast<const char*>(x_var_name.c_str());
dim[0] = x_dims.size();
const long int* x_shape_data = const_cast<const long int*>(&x_dims.data()[0]); const long int* x_shape_data = const_cast<const long int*>(&x_dims.data()[0]);
int i_x_shape_data[x_dims.size()]; int i_x_shape_data[x_dims.size()];
for (size_t i = 0; i < x_dims.size(); i++) { for (size_t i = 0; i < x_dims.size(); i++) {
i_x_shape_data[i] = static_cast<int>(x_shape_data[i]); i_x_shape_data[i] = static_cast<int>(x_shape_data[i]);
} }
shape[0] = i_x_shape_data;
// output // output
auto output_var_name = op_info->Output("Out").front(); auto output_var_name = op_info->Output("Out").front();
auto output = scope->FindVar(output_var_name)->GetMutable<lite::Tensor>();
auto output_dims = output->dims();
const long int* output_shape_data = const_cast<const long int*>(&output_dims.data()[0]);
int i_output_shape_data[output_dims.size()];
for (size_t i = 0; i < output_dims.size(); i++) {
i_output_shape_data[i] = static_cast<int>(output_shape_data[i]);
}
auto scale = op_info->GetAttr<float>("scale"); auto scale = op_info->GetAttr<float>("scale");
auto bias = op_info->GetAttr<float>("bias"); auto bias = op_info->GetAttr<float>("bias");
auto bias_after_scale = op_info->GetAttr<bool>("bias_after_scale"); auto bias_after_scale = op_info->GetAttr<bool>("bias_after_scale");
if (bias_after_scale) {
if (!bias_after_scale) {
bias *= scale; bias *= scale;
} }
add_scale_layer(graph_ctx->bm_compiler_handle,
input_num, auto unique_op_scale_name = lite::bm::UniqueName(op_type);
shape, add_const_binary_layer(graph_ctx->bm_compiler_handle,
dim, static_cast<const char*>(x_var_name.c_str()),
name, const_cast<const int*>(i_x_shape_data),
const_cast<const int*>(i_output_shape_data), x_dims.size(),
output_dims.size(), scale,
static_cast<const char*>(unique_op_scale_name.c_str()),
BINARY_MUL,
0);
add_const_binary_layer(graph_ctx->bm_compiler_handle,
static_cast<const char*>(unique_op_scale_name.c_str()),
const_cast<const int*>(i_x_shape_data),
x_dims.size(),
bias,
static_cast<const char*>(output_var_name.c_str()), static_cast<const char*>(output_var_name.c_str()),
static_cast<const char*>(unique_op_name.c_str()), BINARY_ADD,
&scale,
&bias,
1,
1,
0); 0);
delete [] shape;
delete [] dim;
delete [] name;
output_nodes[output_var_name] = output_var_name; output_nodes[output_var_name] = output_var_name;
return output_nodes; return output_nodes;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册