From 5533eddb097271b58b79bcd438c9cdd6cbd428c8 Mon Sep 17 00:00:00 2001 From: Jiansong Wang Date: Wed, 16 Sep 2020 17:01:38 +0800 Subject: [PATCH] Remove/edit some comments. --- lite/api/CMakeLists.txt | 6 +- .../backends/imagination_nna/imgdnn_manager.h | 7 +++ .../imagination_nna/bridges/CMakeLists.txt | 56 ------------------- .../imagination_nna/bridges/batch_norm_op.cc | 15 +---- lite/kernels/imagination_nna/bridges/graph.cc | 2 +- lite/kernels/imagination_nna/bridges/graph.h | 35 +++++------- .../bridges/paddle_use_bridges.h | 1 - .../imagination_nna/bridges/pool_op.cc | 13 ++--- .../imagination_nna/bridges/utility.cc | 2 - .../imagination_nna/subgraph_compute.cc | 14 +---- tools/codestyle/clang_format.hook | 2 +- 11 files changed, 37 insertions(+), 116 deletions(-) diff --git a/lite/api/CMakeLists.txt b/lite/api/CMakeLists.txt index 846051c09f..bfd17d89a8 100644 --- a/lite/api/CMakeLists.txt +++ b/lite/api/CMakeLists.txt @@ -87,7 +87,7 @@ else() target_link_libraries(paddle_light_api_shared ${rknpu_builder_libs} ${rknpu_runtime_libs}) endif() if (LITE_WITH_IMAGINATION_NNA) - # Need to add IMG IMAGINATION_NNA runtime libs (libhiai.so) dependency + # Need to add IMG IMAGINATION_NNA runtime libs (libimgdnn.so, libnnasession.so) dependency #target_link_libraries(paddle_light_api_shared ${nna_builder_libs} ${nna_runtime_libs}) endif() endif() @@ -123,8 +123,8 @@ if(LITE_WITH_RKNPU) endif() if(LITE_WITH_IMAGINATION_NNA) - set(light_api_deps ${light_api_deps} ${nna_deps}) - set(cxx_api_deps ${cxx_api_deps} ${nna_deps}) + set(light_api_deps ${light_api_deps} ${imagination_nna_deps}) + set(cxx_api_deps ${cxx_api_deps} ${imagination_nna_deps}) endif() if(LITE_WITH_HUAWEI_ASCEND_NPU) diff --git a/lite/backends/imagination_nna/imgdnn_manager.h b/lite/backends/imagination_nna/imgdnn_manager.h index 00d8a8e019..714c40ad2a 100644 --- a/lite/backends/imagination_nna/imgdnn_manager.h +++ b/lite/backends/imagination_nna/imgdnn_manager.h @@ -222,6 +222,13 @@ class ImgdnnManager { return desc; } + imgdnn_tensor_descriptor getTensorDescriptor(imgdnn_tensor tensor) { + imgdnn_tensor_descriptor desc; + err_ = imgdnnGetTensorDescriptor(tensor, &desc); + ASSERT(err_ != IMGDNN_SUCCESS, "GetTensorDescriptors failed!"); + return desc; + } + size_t getDescriptorSize(const imgdnn_tensor_descriptor *const descriptor) { size_t size = imgdnnGetDescriptorSize(descriptor, &err_); ASSERT(err_ != IMGDNN_SUCCESS, "GetDescriptorSize failed!"); diff --git a/lite/kernels/imagination_nna/bridges/CMakeLists.txt b/lite/kernels/imagination_nna/bridges/CMakeLists.txt index d20e2be839..ceec94a8a8 100644 --- a/lite/kernels/imagination_nna/bridges/CMakeLists.txt +++ b/lite/kernels/imagination_nna/bridges/CMakeLists.txt @@ -9,37 +9,8 @@ set(imagination_nna_subgraph_bridge_deps subgraph_bridge_registry subgraph_bridg lite_cc_library(subgraph_bridge_fc_op_imagination_nna SRCS fc_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) lite_cc_library(subgraph_bridge_conv_op_imagination_nna SRCS conv_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_matmul_op_imagination_nna SRCS matmul_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_mul_op_imagination_nna SRCS mul_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) lite_cc_library(subgraph_bridge_act_op_imagination_nna SRCS act_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_scale_op_imagination_nna SRCS scale_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_softmax_op_imagination_nna SRCS softmax_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) lite_cc_library(subgraph_bridge_pool_op_imagination_nna SRCS pool_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_batch_norm_op_imagination_nna SRCS batch_norm_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_elementwise_ops_imagination_nna SRCS elementwise_ops.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_reshape_op_imagination_nna SRCS reshape_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_conv_transpose_op_imagination_nna SRCS conv_transpose_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_interpolate_op_imagination_nna SRCS interpolate_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_transpose_op_imagination_nna SRCS transpose_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_split_op_imagination_nna SRCS split_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_concat_op_imagination_nna SRCS concat_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_shuffle_channel_op_imagination_nna SRCS shuffle_channel_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_pad2d_op_imagination_nna SRCS pad2d_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_reduce_mean_op_imagination_nna SRCS reduce_mean_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_unsqueeze_op_imagination_nna SRCS unsqueeze_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_gather_op_imagination_nna SRCS gather_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_lookup_table_op_imagination_nna SRCS lookup_table_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_argmax_op_imagination_nna SRCS argmax_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_instance_norm_op_imagination_nna SRCS instance_norm_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_dropout_op_imagination_nna SRCS dropout_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_topk_op_imagination_nna SRCS topk_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_layer_norm_op_imagination_nna SRCS layer_norm_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_fill_constant_op_imagination_nna SRCS fill_constant_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_fill_constant_batch_size_like_op_imagination_nna SRCS fill_constant_batch_size_like_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_increment_op_imagination_nna SRCS increment_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_compare_op_imagination_nna SRCS compare_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) -#lite_cc_library(subgraph_bridge_shape_op_imagination_nna SRCS shape_op.cc DEPS ${imagination_nna_subgraph_bridge_deps}) - set(imagination_nna_subgraph_bridges subgraph_bridge_registry @@ -47,35 +18,8 @@ set(imagination_nna_subgraph_bridges subgraph_bridge_graph_imagination_nna subgraph_bridge_fc_op_imagination_nna subgraph_bridge_conv_op_imagination_nna - #subgraph_bridge_matmul_op_imagination_nna - #subgraph_bridge_mul_op_imagination_nna subgraph_bridge_act_op_imagination_nna - #subgraph_bridge_scale_op_imagination_nna - #subgraph_bridge_softmax_op_imagination_nna subgraph_bridge_pool_op_imagination_nna - #subgraph_bridge_batch_norm_op_imagination_nna - #subgraph_bridge_elementwise_ops_imagination_nna - #subgraph_bridge_reshape_op_imagination_nna - #subgraph_bridge_conv_transpose_op_imagination_nna - #subgraph_bridge_interpolate_op_imagination_nna - #subgraph_bridge_transpose_op_imagination_nna - #subgraph_bridge_split_op_imagination_nna - #subgraph_bridge_concat_op_imagination_nna - #subgraph_bridge_shuffle_channel_op_imagination_nna - #subgraph_bridge_pad2d_op_imagination_nna - #subgraph_bridge_reduce_mean_op_imagination_nna - #subgraph_bridge_unsqueeze_op_imagination_nna - #subgraph_bridge_gather_op_imagination_nna - #subgraph_bridge_lookup_table_op_imagination_nna - #subgraph_bridge_argmax_op_imagination_nna - #subgraph_bridge_instance_norm_op_imagination_nna - #subgraph_bridge_dropout_op_imagination_nna - #subgraph_bridge_topk_op_imagination_nna - #subgraph_bridge_layer_norm_op_imagination_nna - #subgraph_bridge_fill_constant_op_imagination_nna - #subgraph_bridge_fill_constant_batch_size_like_op_imagination_nna - #subgraph_bridge_increment_op_imagination_nna - #subgraph_bridge_compare_op_imagination_nna CACHE INTERNAL "imagination_nna_subgraph_bridges") message(STATUS "+++++ imagination_nna_subgraph_bridges: ${imagination_nna_subgraph_bridges}") diff --git a/lite/kernels/imagination_nna/bridges/batch_norm_op.cc b/lite/kernels/imagination_nna/bridges/batch_norm_op.cc index e7871a89a0..ccdfe2afa4 100644 --- a/lite/kernels/imagination_nna/bridges/batch_norm_op.cc +++ b/lite/kernels/imagination_nna/bridges/batch_norm_op.cc @@ -30,7 +30,7 @@ int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) { auto scope = op->scope(); VLOG(3) << "[NNA] Converting " + op_type + "..."; - // Get iimagination_nnat and output vars and op attributes + // Get input and output vars and op attributes auto x_name = op_info->Input("X").front(); auto x = scope->FindMutableTensor(x_name); auto x_dims = x->dims(); @@ -43,24 +43,14 @@ int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) { auto variance_name = op_info->Input("Variance").front(); auto variance = scope->FindMutableTensor(variance_name); auto y_name = op_info->Output("Y").front(); - // float momentum = op_info->GetAttr("momentum"); float epsilon = op_info->GetAttr("epsilon"); - // int mode = 1; // bnScale, bnBias tensor dims are 1xCx1x1 - /* - bool use_global_stats = !op_info->HasAttr("use_global_stats") || - op_info->GetAttr("use_global_stats"); - if (!use_global_stats) { - LOG(WARNING) << "[NNA] Only use_global_stats=true is supported by DDK"; - } - */ // X node std::shared_ptr x_node = nullptr; if (graph->Has(x_name)) { x_node = graph->Get(x_name); } else { - // x_node = graph->Add(x_name, *x); - LOG(WARNING) << "BatchNormConverter:x_node not in graph"; + LOG(WARNING) << "[Imagination NNA] BatchNormConverter: x_node not in graph"; } ConvNetBuilder& builder = graph->GetBuilder(); @@ -71,7 +61,6 @@ int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) { bn_out = builder.createScaleLayer( bn_out, true, scale->mutable_data(), bias->mutable_data()); - // PrecisionType precision = x->precision(); imgdnn_tensor_descriptor desc; imgdnn_err_code err = imgdnnGetTensorDescriptor(bn_out, &desc); CHECK(err == IMGDNN_SUCCESS) << "fail get tensor description(BN)"; diff --git a/lite/kernels/imagination_nna/bridges/graph.cc b/lite/kernels/imagination_nna/bridges/graph.cc index f86f6544ec..420685c885 100644 --- a/lite/kernels/imagination_nna/bridges/graph.cc +++ b/lite/kernels/imagination_nna/bridges/graph.cc @@ -45,7 +45,7 @@ std::shared_ptr Graph::Add(const std::string& name, const void* const const_data, std::vector shape, const TensorInfo& qnt, - Node::Role role /* = Node::Role::kData*/) { + Node::Role role) { auto node = std::make_shared(qnt.type, qnt.layout, role); auto idx = Add(name, node); CHECK_GE(idx, 1); diff --git a/lite/kernels/imagination_nna/bridges/graph.h b/lite/kernels/imagination_nna/bridges/graph.h index 03fc6d8768..4d7e2c9584 100644 --- a/lite/kernels/imagination_nna/bridges/graph.h +++ b/lite/kernels/imagination_nna/bridges/graph.h @@ -79,28 +79,16 @@ class Graph { public: explicit Graph(lite::imagination_nna::ImgdnnManager* pMgr) { pImgdnnMgr = pMgr; - std::cout << "graph construct" << std::endl; } - ~Graph() { std::cout << "Graph deconst" << std::endl; } - - // Add 1 - int Add(const std::string& name, std::shared_ptr node); - - // Add 2, weights,bias + // Add constant tensor, such as weights,bias std::shared_ptr Add(const std::string& name, const void* const const_data, std::vector shape, const TensorInfo& qnt, - Node::Role role /* = Node::Role::kData*/); - - // Add 3 - std::shared_ptr Add(const std::string& name, - const Tensor& tensor, - std::vector shape, - const TensorInfo& qnt, Node::Role role); - // Add 4 + + // Add input tensor std::shared_ptr Add(const std::string& name, const Tensor& tensor, const TensorInfo& qnt, @@ -108,15 +96,14 @@ class Graph { return Add(name, tensor, tensor.dims().Vectorize(), qnt, role); } - // Used to add intermediate tensor - // Add 5 + // Add intermediate activation tensor int Add(const std::string& name, - imgdnn_tensor tensor, + imgdnn_tensor img_tensor, imgdnn_type type, DataLayoutType layout = DATALAYOUT(kNCHW)) { Node::Role role = Node::Role::kData; auto node = std::make_shared(type, layout, role); - node->set_data(tensor); + node->set_data(img_tensor); return Add(name, node); // call Add 1 } @@ -135,11 +122,19 @@ class Graph { } private: + int Add(const std::string& name, std::shared_ptr node); + + std::shared_ptr Add(const std::string& name, + const Tensor& tensor, + std::vector shape, + const TensorInfo& qnt, + Node::Role role); + std::unordered_map>> nodes_; lite::imagination_nna::ImgdnnManager* pImgdnnMgr{nullptr}; }; -} // namespace imagination_nna +} // namespace imagination_nna } // namespace subgraph } // namespace lite } // namespace paddle diff --git a/lite/kernels/imagination_nna/bridges/paddle_use_bridges.h b/lite/kernels/imagination_nna/bridges/paddle_use_bridges.h index d19a2b1370..5c13aba0b0 100644 --- a/lite/kernels/imagination_nna/bridges/paddle_use_bridges.h +++ b/lite/kernels/imagination_nna/bridges/paddle_use_bridges.h @@ -19,4 +19,3 @@ USE_SUBGRAPH_BRIDGE(conv2d, kImaginationNNA); USE_SUBGRAPH_BRIDGE(depthwise_conv2d, kImaginationNNA); USE_SUBGRAPH_BRIDGE(fc, kImaginationNNA); USE_SUBGRAPH_BRIDGE(pool2d, kImaginationNNA); -// USE_SUBGRAPH_BRIDGE(softmax, kImaginationNNA); diff --git a/lite/kernels/imagination_nna/bridges/pool_op.cc b/lite/kernels/imagination_nna/bridges/pool_op.cc index 0f5d41f2b9..7594d59fed 100644 --- a/lite/kernels/imagination_nna/bridges/pool_op.cc +++ b/lite/kernels/imagination_nna/bridges/pool_op.cc @@ -97,9 +97,9 @@ int PoolConverter(void* ctx, OpLite* op, KernelBase* kernel) { ksize); // ceil mode - /* bool ceil_mode = - op_info->HasAttr("ceil_mode") && op_info->GetAttr("ceil_mode"); - */ + if (op_info->HasAttr("ceil_mode")) + LOG(WARNING) << "[NNA] imgdnn has no ceil_mode: " + << op_info->GetAttr("ceil_mode"); unsigned int img_ksize[2] = {(unsigned int)ksize[0], (unsigned int)ksize[1]}; unsigned int img_stride[2] = {(unsigned int)strides[0], @@ -126,11 +126,8 @@ int PoolConverter(void* ctx, OpLite* op, KernelBase* kernel) { pad_to_end, img_pool_type); - // LOG(INFO) << "pooling op output:" << static_cast(pooling_out); - - imgdnn_tensor_descriptor desc; - imgdnn_err_code err = imgdnnGetTensorDescriptor(pooling_out, &desc); - CHECK(err == IMGDNN_SUCCESS) << "fail get tensor description(POOL)"; + imgdnn_tensor_descriptor desc = + graph->GetBuilder()->getTensorDescriptor(pooling_out); graph->Add(out_name, pooling_out, desc.type); diff --git a/lite/kernels/imagination_nna/bridges/utility.cc b/lite/kernels/imagination_nna/bridges/utility.cc index 493bf236d1..aee47d04f2 100644 --- a/lite/kernels/imagination_nna/bridges/utility.cc +++ b/lite/kernels/imagination_nna/bridges/utility.cc @@ -55,9 +55,7 @@ void TensorInfoReset(TensorInfo* qnt) { qnt->count = 0; qnt->axis = 0; qnt->scales.clear(); - // qnt.scales.shrink_to_fit(); qnt->zero_points.clear(); - // qnt.zero_points.shrink_to_fit(); qnt->layout = DATALAYOUT(kNCHW); } diff --git a/lite/kernels/imagination_nna/subgraph_compute.cc b/lite/kernels/imagination_nna/subgraph_compute.cc index aa54979156..43f43a0f02 100644 --- a/lite/kernels/imagination_nna/subgraph_compute.cc +++ b/lite/kernels/imagination_nna/subgraph_compute.cc @@ -97,8 +97,6 @@ bool SubgraphEngine::BuildDeviceProgram() { imgdnn_mgr_.getNetworkObjectInputs( std::numeric_limits::max(), nullptr, &num_inputs); CHECK_EQ(num_inputs, device_inames_.size()); - // origin_idims_.resize(num_inputs); - // origin_itensors_.resize(num_inputs); device_itensors_.resize(num_inputs); imgdnn_mgr_.getNetworkObjectInputs( num_inputs, device_itensors_.data(), nullptr); @@ -108,9 +106,6 @@ bool SubgraphEngine::BuildDeviceProgram() { auto node = graph.Get(device_inames_[i]); auto type = node->type(); auto layout = node->layout(); - // origin_itensors_[i] = scope_->FindMutableTensor(device_inames_[i]); - // CHECK(origin_itensors_[i]); - // origin_idims_[i] = origin_itensors_[i]->dims(); VLOG(3) << "[NNA] Inputs[" << i << "] name: " << device_inames_[i] << " type: " << type << " layout: " << DataLayoutToStr(layout); } @@ -119,8 +114,6 @@ bool SubgraphEngine::BuildDeviceProgram() { imgdnn_mgr_.getNetworkObjectOutputs( std::numeric_limits::max(), nullptr, &num_outputs); CHECK_EQ(num_outputs, device_onames_.size()); - // origin_odims_.resize(num_outputs); - // origin_otensors_.resize(num_outputs); device_otensors_.resize(num_outputs); imgdnn_mgr_.getNetworkObjectOutputs( num_outputs, device_otensors_.data(), nullptr); @@ -129,9 +122,6 @@ bool SubgraphEngine::BuildDeviceProgram() { auto node = graph.Get(device_onames_[i]); auto type = node->type(); auto layout = node->layout(); - // origin_otensors_[i] = scope_->FindMutableTensor(device_onames_[i]); - // CHECK(origin_otensors_[i]); - // origin_odims_[i] = origin_otensors_[i]->dims(); VLOG(3) << "[NNA] Outputs[" << i << "] name: " << device_onames_[i] << " type: " << type << " layout: " << DataLayoutToStr(layout); // Prepare the device output tensors @@ -161,8 +151,10 @@ bool SubgraphEngine::BuildDeviceProgram() { } bool SubgraphEngine::LaunchDeviceProgram() { - if (!device_program_ready) // build device program fail + if (!device_program_ready) { + LOG(WARNING) << "[NNA] Build device program fail, run origin program"; LaunchOriginProgram(); + } // Set input buffer for (size_t i = 0; i < origin_itensors_.size(); i++) { diff --git a/tools/codestyle/clang_format.hook b/tools/codestyle/clang_format.hook index 063ec099d9..1d92821686 100755 --- a/tools/codestyle/clang_format.hook +++ b/tools/codestyle/clang_format.hook @@ -1,7 +1,7 @@ #!/bin/bash set -e -readonly VERSION="6.0.0" +readonly VERSION="3.8" version=$(clang-format -version) -- GitLab