From d6709eb93e006c268c4fa4fbde99e8a5746c02a2 Mon Sep 17 00:00:00 2001 From: "cen.li" Date: Mon, 16 Dec 2019 20:18:39 +0800 Subject: [PATCH] * resnet50 result ok * test=develop --- lite/kernels/bm/bridges/batch_norm_op.cc | 41 ++++++++-------- lite/kernels/bm/bridges/elementwise_ops.cc | 17 ++++--- lite/kernels/bm/bridges/scale_op.cc | 54 +++++++++------------- 3 files changed, 49 insertions(+), 63 deletions(-) diff --git a/lite/kernels/bm/bridges/batch_norm_op.cc b/lite/kernels/bm/bridges/batch_norm_op.cc index 6d8f254290..26d4d929bf 100644 --- a/lite/kernels/bm/bridges/batch_norm_op.cc +++ b/lite/kernels/bm/bridges/batch_norm_op.cc @@ -43,6 +43,8 @@ node_map_type BatchNormConverter(const std::shared_ptr bn_op, i_x_shape_data[i] = static_cast(x_shape_data[i]); } + int channel_size = x_dims[1]; + auto scale_var_name = op_info->Input("Scale").front(); auto scale = scope->FindVar(scale_var_name)->GetMutable(); @@ -67,32 +69,27 @@ node_map_type BatchNormConverter(const std::shared_ptr bn_op, auto epsilon = op_info->GetAttr("epsilon"); auto unique_bn_out_name = lite::bm::UniqueName("batch_norm_out"); - - add_batchnorm_layer(graph_ctx->bm_compiler_handle, - const_cast(i_x_shape_data), - x_dims.size(), - static_cast(x_var_name.c_str()), - const_cast(i_output_shape_data), - output_dims.size(), - static_cast(unique_bn_out_name.c_str()), - static_cast(unique_op_name.c_str()), - static_cast(mean->mutable_data()), - static_cast(variance->mutable_data()), - 1.f, - epsilon, - 0, - 1); - + + auto* scale_data = scale->mutable_data(); + auto* bias_data = bias->mutable_data(); + auto* mean_data = mean->mutable_data(); + auto* variance_data = variance->mutable_data(); + + for (int c = 0; c < channel_size; c++) { + float inv_scale = 1.f / (std::sqrt(variance_data[c] + epsilon)); + bias_data[c] = bias_data[c] - inv_scale * scale_data[c] * mean_data[c]; + scale_data[c] = inv_scale * scale_data[c]; + } + const int input_num = 1; int **shape = new int *[input_num]; int *dim = new int[input_num]; const char **name = new const char *[input_num]; - name[0] = static_cast(unique_bn_out_name.c_str()); - dim[0] = output_dims.size(); - shape[0] = i_output_shape_data; + name[0] = static_cast(x_var_name.c_str()); + dim[0] = x_dims.size(); + shape[0] = i_x_shape_data; - auto unique_scale_name = lite::bm::UniqueName("scale"); add_scale_layer(graph_ctx->bm_compiler_handle, input_num, shape, @@ -101,12 +98,12 @@ node_map_type BatchNormConverter(const std::shared_ptr bn_op, const_cast(i_output_shape_data), output_dims.size(), static_cast(output_var_name.c_str()), - static_cast(unique_scale_name.c_str()), + static_cast(unique_op_name.c_str()), static_cast(scale->mutable_data()), static_cast(bias->mutable_data()), 1, 1, - 0); + 1); delete [] shape; delete [] name; diff --git a/lite/kernels/bm/bridges/elementwise_ops.cc b/lite/kernels/bm/bridges/elementwise_ops.cc index c38e94f7d9..0e1cfd6a40 100644 --- a/lite/kernels/bm/bridges/elementwise_ops.cc +++ b/lite/kernels/bm/bridges/elementwise_ops.cc @@ -15,6 +15,7 @@ #include "lite/kernels/bm/bridges/registry.h" #include "bmcompiler_if.h" #include "bmcompiler_if_lite.h" +#include "bmcompiler_defs.h" namespace paddle { namespace lite { @@ -50,7 +51,6 @@ node_map_type ElementwiseConverter(const std::shared_ptr elementwi shape[0] = i_x_shape_data; auto y_var_name = op_info->Input("Y").front(); - auto y = scope->FindVar(y_var_name)->GetMutable(); auto y_dims = y->dims(); name[1] = static_cast(y_var_name.c_str()); @@ -61,7 +61,6 @@ node_map_type ElementwiseConverter(const std::shared_ptr elementwi i_y_shape_data[i] = static_cast(y_shape_data[i]); } shape[1] = i_y_shape_data; - bool y_is_const = input_nodes.find(y_var_name) == input_nodes.end(); // output @@ -105,25 +104,25 @@ node_map_type ElementwiseConverter(const std::shared_ptr elementwi coeff); } else { const float* y_data = const_cast(y->mutable_data()); + const float* x_data = const_cast(x->mutable_data()); bm_add_const_tensor(graph_ctx->bm_compiler_handle, - name[0], + name[1], shape[0], dim[0], - static_cast(0), + static_cast(DTYPE_FP32), static_cast(y_data)); - - + add_binary_layer_v2(graph_ctx->bm_compiler_handle, name[0], shape[0], dim[0], 0, - nullptr, - name[0], + static_cast(x_data), + name[1], shape[0], dim[0], 0, - nullptr, + static_cast(y_data), static_cast(output_var_name.c_str()), 0); } diff --git a/lite/kernels/bm/bridges/scale_op.cc b/lite/kernels/bm/bridges/scale_op.cc index 1a4f4ecad4..f6a3ade28b 100644 --- a/lite/kernels/bm/bridges/scale_op.cc +++ b/lite/kernels/bm/bridges/scale_op.cc @@ -15,6 +15,7 @@ #include "lite/kernels/bm/bridges/registry.h" #include "lite/backends/bm/builder.h" #include "bmcompiler_if.h" +#include "bmcompiler_op_code.h" namespace paddle { namespace lite { @@ -34,59 +35,48 @@ node_map_type ScaleConverter(const std::shared_ptr scale_op, auto unique_op_name = lite::bm::UniqueName(op_type); // input - const int input_num = 1; - int **shape = new int *[input_num]; - int *dim = new int[input_num]; - const char **name = new const char *[input_num]; - auto x_var_name = op_info->Input("X").front(); auto x = scope->FindVar(x_var_name)->GetMutable(); auto x_dims = x->dims(); - name[0] = static_cast(x_var_name.c_str()); - dim[0] = x_dims.size(); const long int* x_shape_data = const_cast(&x_dims.data()[0]); int i_x_shape_data[x_dims.size()]; for (size_t i = 0; i < x_dims.size(); i++) { i_x_shape_data[i] = static_cast(x_shape_data[i]); } - shape[0] = i_x_shape_data; // output auto output_var_name = op_info->Output("Out").front(); - auto output = scope->FindVar(output_var_name)->GetMutable(); - auto output_dims = output->dims(); - const long int* output_shape_data = const_cast(&output_dims.data()[0]); - int i_output_shape_data[output_dims.size()]; - for (size_t i = 0; i < output_dims.size(); i++) { - i_output_shape_data[i] = static_cast(output_shape_data[i]); - } auto scale = op_info->GetAttr("scale"); auto bias = op_info->GetAttr("bias"); auto bias_after_scale = op_info->GetAttr("bias_after_scale"); - if (bias_after_scale) { + + if (!bias_after_scale) { bias *= scale; } - - add_scale_layer(graph_ctx->bm_compiler_handle, - input_num, - shape, - dim, - name, - const_cast(i_output_shape_data), - output_dims.size(), + + + auto unique_op_scale_name = lite::bm::UniqueName(op_type); + add_const_binary_layer(graph_ctx->bm_compiler_handle, + static_cast(x_var_name.c_str()), + const_cast(i_x_shape_data), + x_dims.size(), + scale, + static_cast(unique_op_scale_name.c_str()), + BINARY_MUL, + 0); + + + add_const_binary_layer(graph_ctx->bm_compiler_handle, + static_cast(unique_op_scale_name.c_str()), + const_cast(i_x_shape_data), + x_dims.size(), + bias, static_cast(output_var_name.c_str()), - static_cast(unique_op_name.c_str()), - &scale, - &bias, - 1, - 1, + BINARY_ADD, 0); - delete [] shape; - delete [] dim; - delete [] name; output_nodes[output_var_name] = output_var_name; return output_nodes; } -- GitLab