// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include "lite/kernels/bm/bridges/graph.h" #include "lite/kernels/bm/bridges/utility.h" #include "lite/kernels/npu/bridges/registry.h" namespace paddle { namespace lite { namespace subgraph { namespace bm { int ConvTransposeConverter(void* ctx, OpLite* op, KernelBase* kernel) { CHECK(ctx != nullptr); CHECK(op != nullptr); auto graph = static_cast(ctx); auto scope = op->scope(); auto op_info = op->op_info(); auto op_type = op_info->Type(); auto unique_op_name = lite::subgraph::bm::UniqueName(op_type); auto input_var_name = op_info->Input("Input").front(); auto input = scope->FindVar(input_var_name)->GetMutable(); auto input_dims = input->dims(); auto output_var_name = op_info->Output("Output").front(); auto output = scope->FindVar(output_var_name)->GetMutable(); auto output_dims = output->dims(); auto filter_var_name = op_info->Input("Filter").front(); auto filter = scope->FindVar(filter_var_name)->GetMutable(); auto filter_dims = filter->dims(); CHECK_EQ(input_dims.size(), 4); CHECK_EQ(output_dims.size(), 4); CHECK_EQ(filter_dims.size(), 4); bool has_bias = lite::subgraph::bm::HasInputArg(op_info, scope, "Bias"); float* bias_data = nullptr; if (has_bias) { auto bias_var_name = op_info->Input("Bias").front(); auto* bias = scope->FindVar(bias_var_name)->GetMutable(); bias_data = static_cast(bias->mutable_data()); } const int64_t* input_shape_data = const_cast(&input_dims.data()[0]); const int64_t* output_shape_data = const_cast(&output_dims.data()[0]); std::vector i_input_shape_data(input_dims.size()); std::vector i_output_shape_data(output_dims.size()); for (size_t i = 0; i < input_dims.size(); i++) { i_input_shape_data[i] = static_cast(input_shape_data[i]); } for (size_t i = 0; i < output_dims.size(); i++) { i_output_shape_data[i] = static_cast(output_shape_data[i]); } const float* filter_data = const_cast(filter->mutable_data()); auto groups = op_info->GetAttr("groups"); auto paddings = op_info->GetAttr>("paddings"); auto strides = op_info->GetAttr>("strides"); auto dilations = op_info->GetAttr>("dilations"); bool fuse_relu = false; if (op_info->HasAttr("fuse_relu")) { fuse_relu = op_info->GetAttr("fuse_relu"); } CHECK_EQ(fuse_relu, false); add_deconv_layer(graph->GetCompilerHandle(), const_cast(&i_input_shape_data[0]), input_dims.size(), static_cast(input_var_name.c_str()), const_cast(&i_output_shape_data[0]), output_dims.size(), static_cast(output_var_name.c_str()), static_cast(unique_op_name.c_str()), filter_data, bias_data, filter_dims.data()[2], filter_dims.data()[3], groups, paddings[0], paddings[0], paddings[1], paddings[1], strides[0], strides[1], dilations[0], dilations[1], static_cast(has_bias)); graph->AddNode(output_var_name); return SUCCESS; } } // namespace bm } // namespace subgraph } // namespace lite } // namespace paddle REGISTER_SUBGRAPH_BRIDGE(conv2d_transpose, kBM, paddle::lite::subgraph::bm::ConvTransposeConverter); REGISTER_SUBGRAPH_BRIDGE(depthwise_conv2d_transpose, kBM, paddle::lite::subgraph::bm::ConvTransposeConverter);