scale_op.cc 3.1 KB
Newer Older
C
cen.li 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

C
cen.li 已提交
15
#include "lite/kernels/bm/bridges/registry.h"
C
cen.li 已提交
16 17
#include "lite/backends/bm/builder.h"
#include "bmcompiler_if.h"
C
cen.li 已提交
18
#include "bmcompiler_op_code.h"
C
cen.li 已提交
19 20 21 22 23

namespace paddle {
namespace lite {
namespace kernels {
namespace bm {
C
cen.li 已提交
24
namespace bridges {
C
cen.li 已提交
25

C
cen.li 已提交
26 27
node_map_type ScaleConverter(const std::shared_ptr<lite::OpLite> scale_op,
                            graph_ctx_type* graph_ctx,
C
cen.li 已提交
28
                            const node_map_type& input_nodes) {
C
cen.li 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
    // output converted nodes
    node_map_type output_nodes;
    
    auto scope = scale_op->scope();
    auto op_info = scale_op->op_info();
    auto op_type = op_info->Type();
    auto unique_op_name = lite::bm::UniqueName(op_type);
    
    // input
    auto x_var_name = op_info->Input("X").front();
    auto x = scope->FindVar(x_var_name)->GetMutable<lite::Tensor>();
    auto x_dims = x->dims();
    const long int* x_shape_data = const_cast<const long int*>(&x_dims.data()[0]);
    
    int i_x_shape_data[x_dims.size()];
    for (size_t i = 0; i < x_dims.size(); i++) {
        i_x_shape_data[i] = static_cast<int>(x_shape_data[i]);
    }
    
    // output
    auto output_var_name = op_info->Output("Out").front();
    
    auto scale = op_info->GetAttr<float>("scale");
    auto bias = op_info->GetAttr<float>("bias");
    auto bias_after_scale = op_info->GetAttr<bool>("bias_after_scale");
C
cen.li 已提交
54 55

    if (!bias_after_scale) {
C
cen.li 已提交
56 57
        bias *= scale;
    }
C
cen.li 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75

  
    auto unique_op_scale_name = lite::bm::UniqueName(op_type); 
    add_const_binary_layer(graph_ctx->bm_compiler_handle,
                           static_cast<const char*>(x_var_name.c_str()),
                           const_cast<const int*>(i_x_shape_data),
                           x_dims.size(),
                           scale,
                           static_cast<const char*>(unique_op_scale_name.c_str()),
                           BINARY_MUL,
                           0);


    add_const_binary_layer(graph_ctx->bm_compiler_handle,
                    static_cast<const char*>(unique_op_scale_name.c_str()),
                    const_cast<const int*>(i_x_shape_data),
                    x_dims.size(),
                    bias,
C
cen.li 已提交
76
                    static_cast<const char*>(output_var_name.c_str()),
C
cen.li 已提交
77
                    BINARY_ADD,
C
cen.li 已提交
78 79 80 81
                    0);

    output_nodes[output_var_name] = output_var_name;
    return output_nodes;
C
cen.li 已提交
82
}
C
cen.li 已提交
83

C
cen.li 已提交
84
}  // namespace bridges
C
cen.li 已提交
85 86 87 88
}  // namespace bm
}  // namespace kernels
}  // namespace lite
}  // namespace paddle
C
cen.li 已提交
89 90

REGISTER_BM_BRIDGE(scale, paddle::lite::kernels::bm::bridges::ScaleConverter);