提交 74829148 编写于 作者: B baolei.an

[LITE][BM] support efficienet,test=develop

上级 36deb84b
...@@ -36,7 +36,7 @@ lite_cc_library(subgraph_bridge_shape_op_bm SRCS shape_op.cc DEPS ${bm_subgraph_ ...@@ -36,7 +36,7 @@ lite_cc_library(subgraph_bridge_shape_op_bm SRCS shape_op.cc DEPS ${bm_subgraph_
lite_cc_library(subgraph_bridge_split_op_bm SRCS split_op.cc DEPS ${bm_subgraph_bridge_deps}) lite_cc_library(subgraph_bridge_split_op_bm SRCS split_op.cc DEPS ${bm_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_matmul_op_bm SRCS matmul_op.cc DEPS ${bm_subgraph_bridge_deps}) lite_cc_library(subgraph_bridge_matmul_op_bm SRCS matmul_op.cc DEPS ${bm_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_density_prior_box_op_bm SRCS density_prior_box_op.cc DEPS ${bm_subgraph_bridge_deps}) lite_cc_library(subgraph_bridge_density_prior_box_op_bm SRCS density_prior_box_op.cc DEPS ${bm_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_swish_op_bm SRCS swish_op.cc DEPS ${bm_subgraph_bridge_deps})
set(bm_subgraph_bridges set(bm_subgraph_bridges
subgraph_bridge_registry subgraph_bridge_registry
...@@ -71,4 +71,5 @@ set(bm_subgraph_bridges ...@@ -71,4 +71,5 @@ set(bm_subgraph_bridges
subgraph_bridge_split_op_bm subgraph_bridge_split_op_bm
subgraph_bridge_matmul_op_bm subgraph_bridge_matmul_op_bm
subgraph_bridge_density_prior_box_op_bm subgraph_bridge_density_prior_box_op_bm
subgraph_bridge_swish_op_bm
CACHE INTERNAL "bm_subgraph_bridges") CACHE INTERNAL "bm_subgraph_bridges")
...@@ -51,15 +51,23 @@ int DropoutConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -51,15 +51,23 @@ int DropoutConverter(void* ctx, OpLite* op, KernelBase* kernel) {
auto dropout_prob = op_info->GetAttr<float>("dropout_prob"); auto dropout_prob = op_info->GetAttr<float>("dropout_prob");
auto dropout_implementation = auto dropout_implementation =
op_info->GetAttr<std::string>("dropout_implementation"); op_info->GetAttr<std::string>("dropout_implementation");
CHECK_EQ(dropout_implementation, "downgrade_in_infer");
add_const_binary_layer(graph->GetCompilerHandle(), if (dropout_implementation == "downgrade_in_infer") {
static_cast<const char*>(x_var_name.c_str()), add_const_binary_layer(graph->GetCompilerHandle(),
const_cast<const int*>(&i_x_shape_data[0]), static_cast<const char*>(x_var_name.c_str()),
x_dims.size(), const_cast<const int*>(&i_x_shape_data[0]),
1.f - dropout_prob, x_dims.size(),
static_cast<const char*>(output_var_name.c_str()), 1.f - dropout_prob,
BINARY_MUL, static_cast<const char*>(output_var_name.c_str()),
0); BINARY_MUL,
0);
} else {
add_identity_layer(graph->GetCompilerHandle(),
static_cast<const char*>(x_var_name.c_str()),
const_cast<const int*>(&i_x_shape_data[0]),
x_dims.size(),
static_cast<const char*>(output_var_name.c_str()));
}
graph->AddNode(output_var_name); graph->AddNode(output_var_name);
return SUCCESS; return SUCCESS;
......
...@@ -127,6 +127,7 @@ int ElementwiseConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -127,6 +127,7 @@ int ElementwiseConverter(void* ctx, OpLite* op, KernelBase* kernel) {
const float* x_data = const_cast<const float*>(x->mutable_data<float>()); const float* x_data = const_cast<const float*>(x->mutable_data<float>());
auto unique_op_name = lite::subgraph::bm::UniqueName("expand_ndims"); auto unique_op_name = lite::subgraph::bm::UniqueName("expand_ndims");
std::vector<int32_t> i_expand_shape_data(3); std::vector<int32_t> i_expand_shape_data(3);
LOG(INFO) << x_dims << " " << y_dims << " " << output_dims;
if (x_is_const && y_is_const) { if (x_is_const && y_is_const) {
float* cpu_data = compute_elementwise_both_const(op); float* cpu_data = compute_elementwise_both_const(op);
bm_add_const_tensor(graph->GetCompilerHandle(), bm_add_const_tensor(graph->GetCompilerHandle(),
...@@ -162,6 +163,9 @@ int ElementwiseConverter(void* ctx, OpLite* op, KernelBase* kernel) { ...@@ -162,6 +163,9 @@ int ElementwiseConverter(void* ctx, OpLite* op, KernelBase* kernel) {
shape[1] = &i_expand_shape_data[0]; shape[1] = &i_expand_shape_data[0];
y_data = nullptr; y_data = nullptr;
} }
} else if (4 == dim[1] && 1 == shape[1][2] && 1 == shape[1][3]) {
LOG(INFO) << "aaaaaaa";
y_data = nullptr;
} }
add_binary_layer_v2(graph->GetCompilerHandle(), add_binary_layer_v2(graph->GetCompilerHandle(),
name[0], name[0],
......
...@@ -61,3 +61,4 @@ USE_SUBGRAPH_BRIDGE(matmul, kBM); ...@@ -61,3 +61,4 @@ USE_SUBGRAPH_BRIDGE(matmul, kBM);
USE_SUBGRAPH_BRIDGE(max_pool2d_with_index, kBM); USE_SUBGRAPH_BRIDGE(max_pool2d_with_index, kBM);
USE_SUBGRAPH_BRIDGE(sigmoid, kBM); USE_SUBGRAPH_BRIDGE(sigmoid, kBM);
USE_SUBGRAPH_BRIDGE(density_prior_box, kBM); USE_SUBGRAPH_BRIDGE(density_prior_box, kBM);
USE_SUBGRAPH_BRIDGE(swish, kBM);
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <bmcompiler_if.h>
#include <bmcompiler_op_code.h>
#include "lite/kernels/bm/bridges/graph.h"
#include "lite/kernels/bm/bridges/utility.h"
#include "lite/kernels/npu/bridges/registry.h"
namespace paddle {
namespace lite {
namespace subgraph {
namespace bm {
int SwishConverter(void* ctx, OpLite* op, KernelBase* kernel) {
CHECK(ctx != nullptr);
CHECK(op != nullptr);
auto graph = static_cast<Graph*>(ctx);
auto scope = op->scope();
auto op_info = op->op_info();
auto op_type = op_info->Type();
// input
auto x_var_name = op_info->Input("X").front();
auto x = scope->FindVar(x_var_name)->GetMutable<lite::Tensor>();
auto x_dims = x->dims();
const int64_t* x_shape_data = const_cast<const int64_t*>(&x_dims.data()[0]);
std::vector<int> i_x_shape_data(x_dims.size());
for (size_t i = 0; i < x_dims.size(); i++) {
i_x_shape_data[i] = static_cast<int>(x_shape_data[i]);
}
// output
auto output_var_name = op_info->Output("Out").front();
auto output = scope->FindVar(output_var_name)->GetMutable<lite::Tensor>();
auto output_dims = output->dims();
std::vector<int32_t> i_output_shape_data(output_dims.size());
for (size_t i = 0; i < output_dims.size(); i++) {
i_output_shape_data[i] = output_dims[i];
}
auto unique_sigmoid_name =
lite::subgraph::bm::UniqueName(op_type + "_sigmoid");
auto beta = op_info->GetAttr<float>("beta");
CHECK_EQ(beta, 1.f);
add_active_layer(graph->GetCompilerHandle(),
const_cast<const int*>(&i_x_shape_data[0]),
x_dims.size(),
static_cast<const char*>(x_var_name.c_str()),
const_cast<const int*>(&i_output_shape_data[0]),
output_dims.size(),
static_cast<const char*>(unique_sigmoid_name.c_str()),
ACTIVE_SIGMOID);
add_batch_matmul_layer(graph->GetCompilerHandle(),
static_cast<const char*>(x_var_name.c_str()),
const_cast<const int*>(&i_x_shape_data[0]),
x_dims.size(),
0,
nullptr,
static_cast<const char*>(unique_sigmoid_name.c_str()),
const_cast<const int*>(&i_output_shape_data[0]),
output_dims.size(),
0,
nullptr,
static_cast<const char*>(output_var_name.c_str()));
graph->AddNode(output_var_name);
return SUCCESS;
}
} // namespace bm
} // namespace subgraph
} // namespace lite
} // namespace paddle
REGISTER_SUBGRAPH_BRIDGE(swish,
kBM,
paddle::lite::subgraph::bm::SwishConverter);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册