From 3a959784863a9b1cd58efa85349867990e9afb15 Mon Sep 17 00:00:00 2001 From: huzhiqiang <912790387@qq.com> Date: Tue, 28 Apr 2020 19:14:42 +0800 Subject: [PATCH] [Compile] change compiling option `SHUTDOWN_LOG` into `WITH_LOG` #3514 --- CMakeLists.txt | 2 +- cmake/configure.cmake | 4 +- docs/demo_guides/rknpu.md | 4 +- lite/api/CMakeLists.txt | 2 +- lite/backends/opencl/cl_utility.h | 2 +- lite/demo/cxx/README.md | 2 +- lite/demo/java/README.md | 2 +- .../opencl/activation_image_compute.cc | 6 +-- .../opencl/bilinear_interp_image_compute.cc | 8 ++-- .../kernels/opencl/box_coder_image_compute.cc | 6 +-- lite/kernels/opencl/concat_image_compute.cc | 6 +-- lite/kernels/opencl/conv_image_compute.cc | 38 +++++++++---------- .../opencl/elementwise_add_buffer_compute.cc | 4 +- .../opencl/elementwise_add_image_compute.cc | 6 +-- .../opencl/elementwise_mul_image_compute.cc | 6 +-- .../opencl/elementwise_sub_image_compute.cc | 8 ++-- lite/kernels/opencl/fc_buffer_compute.cc | 4 +- .../opencl/grid_sampler_image_compute.cc | 4 +- .../opencl/instance_norm_image_compute.cc | 8 ++-- lite/kernels/opencl/io_copy_buffer_compute.cc | 6 +-- lite/kernels/opencl/layout_image_compute.cc | 8 ++-- lite/kernels/opencl/lrn_image_compute.cc | 8 ++-- .../opencl/nearest_interp_image_compute.cc | 2 +- lite/kernels/opencl/pad2d_image_compute.cc | 8 ++-- lite/kernels/opencl/pool_image_compute.cc | 8 ++-- lite/kernels/opencl/reshape_image_compute.cc | 6 +-- lite/tools/build.bat | 2 +- lite/tools/build.sh | 16 ++++---- lite/tools/build_android.sh | 12 +++--- lite/tools/build_ios.sh | 10 ++--- lite/tools/build_linux.sh | 6 +-- lite/tools/build_npu.sh | 6 +-- lite/tools/build_rknpu.sh | 8 ++-- lite/tools/ci_build.sh | 6 +-- lite/utils/CMakeLists.txt | 2 +- lite/utils/cp_logging.h | 2 +- lite/utils/logging.cc | 4 +- lite/utils/logging.h | 8 ++-- lite/utils/replace_stl/stream.cc | 2 +- 39 files changed, 124 insertions(+), 128 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 065bcbe349..eab1fe0579 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -97,7 +97,7 @@ lite_option(LITE_WITH_FPGA "Enable FPGA support in lite" OFF) lite_option(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK "Enable light-weight framework" OFF) lite_option(LITE_WITH_PROFILE "Enable profile mode in lite framework" OFF) lite_option(LITE_WITH_PRECISION_PROFILE "Enable precision profile in profile mode ON in lite" OFF) -lite_option(LITE_SHUTDOWN_LOG "Shutdown log system or not." OFF) +lite_option(LITE_WITH_LOG "Enable log printing or not." ON) lite_option(LITE_ON_TINY_PUBLISH "Publish tiny predictor lib." OFF) lite_option(LITE_ON_MODEL_OPTIMIZE_TOOL "Build the model optimize tool" OFF) # publish options diff --git a/cmake/configure.cmake b/cmake/configure.cmake index cf99645409..1b0890e0db 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -186,8 +186,8 @@ if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) add_definitions("-DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK") endif() -if (LITE_SHUTDOWN_LOG) - add_definitions("-DLITE_SHUTDOWN_LOG") +if (LITE_WITH_LOG) + add_definitions("-DLITE_WITH_LOG") endif() if (LITE_ON_TINY_PUBLISH) diff --git a/docs/demo_guides/rknpu.md b/docs/demo_guides/rknpu.md index 3290b2c2f8..d1fdb68903 100644 --- a/docs/demo_guides/rknpu.md +++ b/docs/demo_guides/rknpu.md @@ -131,8 +131,8 @@ $ git clone https://github.com/airockchip/rknpu_ddk.git ``` - 编译full_publish and tiny_publish for armv8(注意:RKNPU_DDK只支持armv8) ```shell -$ ./lite/tools/build.sh --arm_os=armlinux --arm_abi=armv8 --arm_lang=gcc --build_extra=ON --shutdown_log=OFF --build_rknpu=ON --rknpu_ddk_root=./rknpu_ddk full_publish -$ ./lite/tools/build.sh --arm_os=armlinux --arm_abi=armv8 --arm_lang=gcc --build_extra=ON --shutdown_log=OFF --build_rknpu=ON --rknpu_ddk_root=./rknpu_ddk tiny_publish +$ ./lite/tools/build.sh --arm_os=armlinux --arm_abi=armv8 --arm_lang=gcc --build_extra=ON --with_log=ON --build_rknpu=ON --rknpu_ddk_root=./rknpu_ddk full_publish +$ ./lite/tools/build.sh --arm_os=armlinux --arm_abi=armv8 --arm_lang=gcc --build_extra=ON --with_log=ON --build_rknpu=ON --rknpu_ddk_root=./rknpu_ddk tiny_publish ``` - 将编译生成的build.lite.armlinux.armv8.gcc/inference_lite_lib.armlinux.armv8.rknpu/cxx/include替换PaddleLite-armlinux-demo/Paddle-Lite/include目录; - 将编译生成的build.lite.armlinux.armv8.gcc/inference_lite_lib.armlinux.armv8.rknpu/cxx/lib/libpaddle_light_api_shared.so替换PaddleLite-armlinux-demo/Paddle-Lite/libs/armv8/libpaddle_light_api_shared.so文件。 diff --git a/lite/api/CMakeLists.txt b/lite/api/CMakeLists.txt index 506f2eab72..c9855dd0a0 100644 --- a/lite/api/CMakeLists.txt +++ b/lite/api/CMakeLists.txt @@ -1,4 +1,4 @@ -if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK OR LITE_SHUTDOWN_LOG) +if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK OR (NOT LITE_WITH_LOG)) lite_cc_library(place SRCS paddle_place.cc DEPS logging) else() lite_cc_library(place SRCS paddle_place.cc DEPS glog) diff --git a/lite/backends/opencl/cl_utility.h b/lite/backends/opencl/cl_utility.h index de01f896a6..7ca12c1f80 100644 --- a/lite/backends/opencl/cl_utility.h +++ b/lite/backends/opencl/cl_utility.h @@ -32,7 +32,7 @@ const char* opencl_error_to_str(cl_int error); __FILE__, \ __LINE__); \ } -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG #define CL_CHECK_FATAL(err_code__) \ if (err_code__ != CL_SUCCESS) { \ LOG(FATAL) << string_format( \ diff --git a/lite/demo/cxx/README.md b/lite/demo/cxx/README.md index c2bdb25f4e..ff579a7f36 100644 --- a/lite/demo/cxx/README.md +++ b/lite/demo/cxx/README.md @@ -54,7 +54,7 @@ git checkout release/v2.3 --arm_lang=gcc \ --android_stl=c++_static \ --build_extra=ON \ - --shutdown_log=OFF \ + --with_log=ON \ full_publish ``` diff --git a/lite/demo/java/README.md b/lite/demo/java/README.md index 904726d744..4cf651a829 100644 --- a/lite/demo/java/README.md +++ b/lite/demo/java/README.md @@ -24,7 +24,7 @@ cmake .. \ -DLITE_WITH_ARM=ON \ -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \ -DWITH_TESTING=OFF \ --DLITE_SHUTDOWN_LOG=ON \ +-DLITE_WITH_LOG=OFF \ -DLITE_ON_TINY_PUBLISH=ON \ -DARM_TARGET_OS=android -DARM_TARGET_ARCH_ABI=armv8 -DARM_TARGET_LANG=gcc diff --git a/lite/kernels/opencl/activation_image_compute.cc b/lite/kernels/opencl/activation_image_compute.cc index 944a59ce15..da957d8bde 100644 --- a/lite/kernels/opencl/activation_image_compute.cc +++ b/lite/kernels/opencl/activation_image_compute.cc @@ -39,7 +39,7 @@ class ActivationComputeImageDefault void PrepareForRun() override { act_param_ = param_.get_mutable(); int act_type = static_cast(act_param_->active_type); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(1) << "ActivationTypeToStr(act_param_->active_type):" << ActivationTypeToStr(act_param_->active_type); #endif @@ -72,7 +72,7 @@ class ActivationComputeImageDefault LOG(FATAL) << "This act type:" << act_type << " doesn't support."; return; } -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(1) << "kernel_func_name_:" << kernel_func_name_; #endif @@ -129,7 +129,7 @@ class ActivationComputeImageDefault status = kernel.setArg(3, scale_); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG const auto& x_dims = act_param_->X->dims(); const auto& y_dims = act_param_->Out->dims(); // useless: check dim only VLOG(4) << TargetToStr(act_param_->X->target()); diff --git a/lite/kernels/opencl/bilinear_interp_image_compute.cc b/lite/kernels/opencl/bilinear_interp_image_compute.cc index a078301883..84fd3312c3 100644 --- a/lite/kernels/opencl/bilinear_interp_image_compute.cc +++ b/lite/kernels/opencl/bilinear_interp_image_compute.cc @@ -79,7 +79,7 @@ class BilinearInterpImageCompute int out_h = out_dims[2]; int out_w = out_dims[3]; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x->target():" << TargetToStr(x->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); VLOG(4) << "x->dims():" << in_dims; @@ -92,7 +92,7 @@ class BilinearInterpImageCompute auto* out_img = out->mutable_data( out_image_shape["width"], out_image_shape["height"]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG // VLOG(4) << "x_image: " << x_img; // VLOG(4) << "out_image: " << out_img; VLOG(4) << "out_image_shape[w,h]: " << out_image_shape["width"] << " " @@ -114,7 +114,7 @@ class BilinearInterpImageCompute DDim(std::vector{ static_cast(out_image_shape["width"]), static_cast(out_image_shape["height"])})); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "default_work_size: " << default_work_size[0] << ", " << default_work_size[1] << ", " << default_work_size[2]; #endif @@ -150,7 +150,7 @@ class BilinearInterpImageCompute nullptr, nullptr); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_work_size:[2D]:" << global_work_size[0] << " " << global_work_size[1] << " " << global_work_size[2]; #endif diff --git a/lite/kernels/opencl/box_coder_image_compute.cc b/lite/kernels/opencl/box_coder_image_compute.cc index 00509f5aac..84298b29d4 100644 --- a/lite/kernels/opencl/box_coder_image_compute.cc +++ b/lite/kernels/opencl/box_coder_image_compute.cc @@ -61,7 +61,7 @@ class BoxCoderComputeImage : public KernelLiteproposals->mutable_data( image_shape["width"], image_shape["height"]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "boxcoder input shape: "; #endif @@ -93,7 +93,7 @@ class BoxCoderComputeImage : public KernelLiteproposals->target()); VLOG(4) << "output shape: " << out_dims[0] << ", " << out_dims[1] << ", " << out_dims[2] << ", " << out_dims[3]; @@ -130,7 +130,7 @@ class BoxCoderComputeImage : public KernelLitedims()[inputs[0]->dims().size() - 1]; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "concat input shape: "; for (size_t i = 0; i < inputs.size(); i++) { VLOG(4) << "inputs [" << i << "]" @@ -149,7 +149,7 @@ class ConcatComputeImage : public KernelLite(image_shape["height"])}; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << TargetToStr(param.output->target()); VLOG(4) << "image_shape(w,h):" << image_shape["width"] << " " << image_shape["height"]; @@ -204,7 +204,7 @@ class ConcatComputeImage : public KernelLitedata(); int in_w = in_dims[in_dims.size() - 1]; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "image_shape(w,h):" << image_shape["width"] << " " << image_shape["height"]; #endif diff --git a/lite/kernels/opencl/conv_image_compute.cc b/lite/kernels/opencl/conv_image_compute.cc index 9e5f365fdb..3de4512cb1 100644 --- a/lite/kernels/opencl/conv_image_compute.cc +++ b/lite/kernels/opencl/conv_image_compute.cc @@ -541,12 +541,12 @@ void ConvImageCompute::Conv2d1x1opt(bool is_turn) { int input_c = input_dims[1]; auto dilations = *param.dilations; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG // VLOG(4) << "out_image: " << out_image; VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << "," << global_work_size_[1] << "," << global_work_size_[2] << "}"; #endif -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ conv2d_1x1 params ============"; VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," << input_image_shape["height"]; @@ -846,7 +846,7 @@ void ConvImageCompute::Conv2d3x3opt(bool is_turn) { const bool is_element_wise_bias = has_bias && param.output->dims() == param.bias->dims(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ conv2d params ============"; // VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," // << input_image_shape["height"]; @@ -893,7 +893,7 @@ void ConvImageCompute::Conv2d3x3opt(bool is_turn) { status = kernel.setArg(++arg_idx, *filter_image); CL_CHECK_FATAL(status); if (has_bias) { -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "set bias_image: "; #endif status = kernel.setArg(++arg_idx, *bias_image); @@ -922,7 +922,7 @@ void ConvImageCompute::Conv2d3x3opt(bool is_turn) { status = kernel.setArg(++arg_idx, output_height); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG // VLOG(4) << "out_image: " << out_image; VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << "," << global_work_size_[1] << "," << global_work_size_[2] << "}"; @@ -975,7 +975,7 @@ void ConvImageCompute::Conv2d5x5(bool is_turn) { int input_c = input_dims[1]; auto dilations = *param.dilations; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ conv2d params ============"; VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," << input_image_shape["height"]; @@ -1025,7 +1025,7 @@ void ConvImageCompute::Conv2d5x5(bool is_turn) { status = kernel.setArg(++arg_idx, *filter_image); CL_CHECK_FATAL(status); if (has_bias) { -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "set bias_image: "; #endif status = kernel.setArg(++arg_idx, *bias_image); @@ -1052,7 +1052,7 @@ void ConvImageCompute::Conv2d5x5(bool is_turn) { status = kernel.setArg(++arg_idx, output_height); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG // VLOG(4) << "out_image: " << out_image; VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << "," << global_work_size_[1] << "," << global_work_size_[2] << "}"; @@ -1103,7 +1103,7 @@ void ConvImageCompute::Conv2d5x5opt(bool is_turn) { has_bias && param.output->dims() == param.bias->dims(); // default_work_size[2] = h_blk; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ conv2d params ============"; // VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," // << input_image_shape["height"]; @@ -1223,7 +1223,7 @@ void ConvImageCompute::Conv2d7x7(bool is_turn) { int input_c = input_dims[1]; auto dilations = *param.dilations; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ conv2d params ============"; VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," << input_image_shape["height"]; @@ -1273,7 +1273,7 @@ void ConvImageCompute::Conv2d7x7(bool is_turn) { status = kernel.setArg(++arg_idx, *filter_image); CL_CHECK_FATAL(status); if (has_bias) { -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "set bias_image: "; #endif status = kernel.setArg(++arg_idx, *bias_image); @@ -1300,7 +1300,7 @@ void ConvImageCompute::Conv2d7x7(bool is_turn) { status = kernel.setArg(++arg_idx, output_height); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG // VLOG(4) << "out_image: " << out_image; VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << "," << global_work_size_[1] << "," << global_work_size_[2] << "}"; @@ -1349,7 +1349,7 @@ void ConvImageCompute::Conv2d7x7opt(bool is_turn) { const bool is_element_wise_bias = has_bias && param.output->dims() == param.bias->dims(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ conv2d 7x7 params ============"; // VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," // << input_image_shape["height"]; @@ -1479,7 +1479,7 @@ void ConvImageCompute::DepthwiseConv2d3x3s1(bool is_turn) { const cl::Image2D* bias_image = nullptr; if (has_bias) { bias_image = bias_gpu_image_->data(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "set bias_image: "; #endif status = kernel.setArg(++arg_idx, *bias_image); @@ -1546,7 +1546,7 @@ void ConvImageCompute::DepthwiseConv2d3x3(bool is_turn) { auto kernel = kernel_; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "setArg"; VLOG(4) << "strides = " << strides[0]; VLOG(4) << "offset = " << offset; @@ -1576,7 +1576,7 @@ void ConvImageCompute::DepthwiseConv2d3x3(bool is_turn) { const cl::Image2D* bias_image = nullptr; if (has_bias) { bias_image = bias_gpu_image_->data(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "set bias_image: "; #endif status = kernel.setArg(++arg_idx, *bias_image); @@ -1649,7 +1649,7 @@ void ConvImageCompute::DepthwiseConv2d(bool is_turn) { int input_c = input_dims[1]; auto dilations = *param.dilations; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ depthwise conv2d params ============"; VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," << input_image_shape["height"]; @@ -1700,7 +1700,7 @@ void ConvImageCompute::DepthwiseConv2d(bool is_turn) { status = kernel.setArg(++arg_idx, *filter_image); CL_CHECK_FATAL(status); if (has_bias) { -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "set bias_image: "; #endif status = kernel.setArg(++arg_idx, *bias_image); @@ -1731,7 +1731,7 @@ void ConvImageCompute::DepthwiseConv2d(bool is_turn) { status = kernel.setArg(++arg_idx, filter_height); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << "," << global_work_size_[1] << "," << global_work_size_[2] << "}"; #endif diff --git a/lite/kernels/opencl/elementwise_add_buffer_compute.cc b/lite/kernels/opencl/elementwise_add_buffer_compute.cc index 237de7b6fa..85fcac6b85 100644 --- a/lite/kernels/opencl/elementwise_add_buffer_compute.cc +++ b/lite/kernels/opencl/elementwise_add_buffer_compute.cc @@ -43,7 +43,7 @@ void ElementwiseAddCompute::Run() { STL::stringstream kernel_key; kernel_key << kernel_func_name_ << build_options_ << time_stamp_; auto kernel = context.cl_context()->GetKernel(kernel_key.str()); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << TargetToStr(ele_param_->X->target()); VLOG(4) << TargetToStr(ele_param_->Y->target()); VLOG(4) << TargetToStr(ele_param_->Out->target()); @@ -86,7 +86,7 @@ void ElementwiseAddCompute::UpdateParams() { for (int i = static_cast(y_dims.size() + axis); i < x_dims.size(); ++i) { num_ *= x_dims[i]; } -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "axis: " << axis; VLOG(4) << "batch: " << batch_; VLOG(4) << "channels: " << channels_; diff --git a/lite/kernels/opencl/elementwise_add_image_compute.cc b/lite/kernels/opencl/elementwise_add_image_compute.cc index c507dcb43d..4af02e8b73 100644 --- a/lite/kernels/opencl/elementwise_add_image_compute.cc +++ b/lite/kernels/opencl/elementwise_add_image_compute.cc @@ -83,7 +83,7 @@ void ElementwiseAddImageCompute::ReInitWhenNeeded() { void ElementwiseAddImageCompute::GetGlobalWorkSize() { global_work_size_ = cl::NDRange{static_cast(x_img_shape_[0]), static_cast(x_img_shape_[1])}; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_work_size:[2D]:" << x_img_shape_[0] << " " << x_img_shape_[1]; #endif @@ -102,7 +102,7 @@ void ElementwiseAddImageCompute::Run() { auto* out_img = out->mutable_data(out_img_shape_[0], out_img_shape_[1]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x->target():" << TargetToStr(x->target()); VLOG(4) << "y->target():" << TargetToStr(y->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); @@ -129,7 +129,7 @@ void ElementwiseAddImageCompute::Run() { } else if (y_dims.size() == 1) { if (axis == x_dims.size() - 1 || axis == x_dims.size() - 3) { const int tensor_w = x_dims[x_dims.size() - 1]; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "tensor_w:" << tensor_w; #endif status = kernel.setArg(0, *x_img); diff --git a/lite/kernels/opencl/elementwise_mul_image_compute.cc b/lite/kernels/opencl/elementwise_mul_image_compute.cc index 1f17d60097..dcedee86de 100644 --- a/lite/kernels/opencl/elementwise_mul_image_compute.cc +++ b/lite/kernels/opencl/elementwise_mul_image_compute.cc @@ -85,7 +85,7 @@ class ElementwiseMulImageCompute auto* y = ele_param_->Y; auto* out = ele_param_->Out; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x->target():" << TargetToStr(x->target()); VLOG(4) << "y->target():" << TargetToStr(y->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); @@ -108,7 +108,7 @@ class ElementwiseMulImageCompute auto* out_img = out->mutable_data(out_img_shape[0], out_img_shape[1]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x_img_shape[w,h]:" << x_img_width << " " << x_img_height; VLOG(4) << "y_img_shape[w,h]:" << y_img_shape[0] << " " << y_img_shape[1]; VLOG(4) << "out_img_shape[w,h]:" << out_img_shape[0] << " " @@ -194,7 +194,7 @@ class ElementwiseMulImageCompute nullptr, nullptr); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_work_size:[2D]:" << x_img_width << " " << x_img_height; #endif } diff --git a/lite/kernels/opencl/elementwise_sub_image_compute.cc b/lite/kernels/opencl/elementwise_sub_image_compute.cc index cae6338959..8a29cde6a4 100644 --- a/lite/kernels/opencl/elementwise_sub_image_compute.cc +++ b/lite/kernels/opencl/elementwise_sub_image_compute.cc @@ -64,7 +64,7 @@ void ElementwiseSubImageCompute::Run() { auto* out = ele_param_->Out; auto axis = ele_param_->axis; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x->target():" << TargetToStr(x->target()); VLOG(4) << "y->target():" << TargetToStr(y->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); @@ -87,7 +87,7 @@ void ElementwiseSubImageCompute::Run() { auto* out_img = out->mutable_data(out_img_shape[0], out_img_shape[1]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x_img_shape[w,h]:" << x_img_width << " " << x_img_height; VLOG(4) << "y_img_shape[w,h]:" << y_img_shape[0] << " " << y_img_shape[1]; VLOG(4) << "out_img_shape[w,h]:" << out_img_shape[0] << " " @@ -110,7 +110,7 @@ void ElementwiseSubImageCompute::Run() { } else if (y_dims.size() == 1) { if (axis == x->dims().size() - 1 || axis == x->dims().size() - 3) { int tensor_w = x->dims()[x->dims().size() - 1]; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "tensor_w:" << tensor_w; #endif cl_int status = kernel.setArg(arg_idx, *x_img); @@ -134,7 +134,7 @@ void ElementwiseSubImageCompute::Run() { auto global_work_size = cl::NDRange{static_cast(x_img_width), static_cast(x_img_height)}; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_work_size:[2D]:" << x_img_width << " " << x_img_height; #endif diff --git a/lite/kernels/opencl/fc_buffer_compute.cc b/lite/kernels/opencl/fc_buffer_compute.cc index 107575ac6d..38ca4fb796 100644 --- a/lite/kernels/opencl/fc_buffer_compute.cc +++ b/lite/kernels/opencl/fc_buffer_compute.cc @@ -52,7 +52,7 @@ class FcCompute n_ = w_dims[1]; CHECK_EQ(k_, static_cast(w_dims[0])); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x_dims:" << x_dims[0] << " " << x_dims[1] << " " << x_dims[2] << " " << x_dims[3]; VLOG(4) << "w_dims:" << w_dims[0] << " " << w_dims[1] << " " << w_dims[2] @@ -66,7 +66,7 @@ class FcCompute } else { // gemm kernel_func_name_ = "fc_gemm_4x4"; } -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(1) << "kernel_func_name_:" << kernel_func_name_; #endif diff --git a/lite/kernels/opencl/grid_sampler_image_compute.cc b/lite/kernels/opencl/grid_sampler_image_compute.cc index c4daf6ae42..e9151e18ef 100644 --- a/lite/kernels/opencl/grid_sampler_image_compute.cc +++ b/lite/kernels/opencl/grid_sampler_image_compute.cc @@ -80,7 +80,7 @@ class GridSamplerImageCompute : public KernelLite(default_work_size[0]), static_cast(default_work_size[1]), static_cast(default_work_size[2] / 4)}; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "default_work_size: " << default_work_size[0] << ", " << default_work_size[1] << ", " << default_work_size[2]; VLOG(4) << "global_work_size_:[2D]:" << global_work_size_[0] << " " @@ -102,7 +102,7 @@ class GridSamplerImageCompute : public KernelLitemutable_data(out_img_shape_[0], out_img_shape_[1]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG auto in_dims = x->dims(); VLOG(4) << "x->target():" << TargetToStr(x->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); diff --git a/lite/kernels/opencl/instance_norm_image_compute.cc b/lite/kernels/opencl/instance_norm_image_compute.cc index bf7c2aab35..d014588941 100644 --- a/lite/kernels/opencl/instance_norm_image_compute.cc +++ b/lite/kernels/opencl/instance_norm_image_compute.cc @@ -96,7 +96,7 @@ class InstanceNormImageCompute : public KernelLite(lws1), static_cast(lws2)}; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_work_size:" << static_cast(global_work_size[0]) << " " << static_cast(global_work_size[1]) << " " << static_cast(global_work_size[2]); @@ -200,7 +200,7 @@ class InstanceNormImageCompute : public KernelLitetarget():" << TargetToStr(x->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); VLOG(4) << "x->dims():" << in_dims; @@ -211,7 +211,7 @@ class InstanceNormImageCompute : public KernelLitemutable_data( out_image_shape["width"], out_image_shape["height"]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "out_image_shape[w,h]: " << out_image_shape["width"] << " " << out_image_shape["height"]; @@ -229,7 +229,7 @@ class InstanceNormImageCompute : public KernelLite(group_size_y), static_cast(1)}; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "local_work_size:[2D]:" << local_work_size[0] << " " << local_work_size[1] << " " << local_work_size[2]; VLOG(4) << "global_work_size:[2D]:" << global_work_size[0] << " " diff --git a/lite/kernels/opencl/io_copy_buffer_compute.cc b/lite/kernels/opencl/io_copy_buffer_compute.cc index f981c5ca11..31fc563c95 100644 --- a/lite/kernels/opencl/io_copy_buffer_compute.cc +++ b/lite/kernels/opencl/io_copy_buffer_compute.cc @@ -42,7 +42,7 @@ class IoCopyHostToOpenCLCompute CHECK(param.x->target() == TARGET(kHost) || param.x->target() == TARGET(kARM)); auto mem_size = param.x->memory_size(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "param.x->memory_size():" << mem_size; VLOG(2) << "param.x->dims().size():" << param.x->dims().size(); VLOG(2) << "param.x->dims():" << param.x->dims(); @@ -87,7 +87,7 @@ class IoCopykOpenCLToHostCompute CHECK(param.x->target() == TARGET(kOpenCL)); auto mem_size = param.x->memory_size(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "copy size " << mem_size; VLOG(2) << "param.x->dims().size():" << param.x->dims().size(); VLOG(2) << "param.x->dims():" << param.x->dims(); @@ -106,7 +106,7 @@ class IoCopykOpenCLToHostCompute auto& context = ctx_->As(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "--- Find the sync event for the target cl tensor. ---"; #endif CLRuntime::Global()->command_queue().finish(); diff --git a/lite/kernels/opencl/layout_image_compute.cc b/lite/kernels/opencl/layout_image_compute.cc index ce22426611..3c7a6ae42f 100644 --- a/lite/kernels/opencl/layout_image_compute.cc +++ b/lite/kernels/opencl/layout_image_compute.cc @@ -76,7 +76,7 @@ class LayoutComputeBufferChwToImageDefault const int Stride1 = out_H * out_W; const int Stride0 = out_W; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "param.process_type:" << param.process_type; VLOG(2) << "x_dims:" << x_dims; VLOG(2) << "param.x->memory_size():" << param.x->memory_size(); @@ -118,7 +118,7 @@ class LayoutComputeBufferChwToImageDefault status = kernel.setArg(++arg_idx, static_cast(Stride2)); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3] << " " << (new_dims[0] * new_dims[2]); #endif @@ -186,7 +186,7 @@ class LayoutComputeImageDefaultToBufferChw new_dims[4 - x_dims.size() + j] = x_dims[j]; } -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "param.process_type:" << param.process_type; VLOG(2) << "x_dims:" << x_dims; VLOG(2) << "param.x->memory_size():" << param.x->memory_size(); @@ -228,7 +228,7 @@ class LayoutComputeImageDefaultToBufferChw CL_CHECK_FATAL(status); status = kernel.setArg(++arg_idx, static_cast(C)); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3] << " " << (new_dims[0] * new_dims[2]); #endif diff --git a/lite/kernels/opencl/lrn_image_compute.cc b/lite/kernels/opencl/lrn_image_compute.cc index 91e94fd4a5..8e70189b88 100644 --- a/lite/kernels/opencl/lrn_image_compute.cc +++ b/lite/kernels/opencl/lrn_image_compute.cc @@ -65,7 +65,7 @@ class LrnImageCompute : public KernelLitedims(); auto in_dims = x->dims(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x->target(): " << TargetToStr(x->target()); VLOG(4) << "out->target(): " << TargetToStr(out->target()); VLOG(4) << "x->dims(): " << in_dims; @@ -84,7 +84,7 @@ class LrnImageCompute : public KernelLitemutable_data( out_image_shape["width"], out_image_shape["height"]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG // VLOG(4) << "out_image" << out_img; VLOG(4) << "out_image_shape[w,h]:" << out_image_shape["width"] << " " << out_image_shape["height"]; @@ -102,7 +102,7 @@ class LrnImageCompute : public KernelLite{ static_cast(out_image_shape["width"]), static_cast(out_image_shape["height"])})); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "default_work_size: " << default_work_size[0] << ", " << default_work_size[1] << ", " << default_work_size[3]; #endif @@ -136,7 +136,7 @@ class LrnImageCompute : public KernelLite(out_dims_w)); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << TargetToStr(param.X->target()); VLOG(4) << TargetToStr(param.Out->target()); VLOG(4) << "out_image_shape(w,h):" << out_image_shape["width"] << " " diff --git a/lite/kernels/opencl/pad2d_image_compute.cc b/lite/kernels/opencl/pad2d_image_compute.cc index 3318825f2b..49489ea3b4 100644 --- a/lite/kernels/opencl/pad2d_image_compute.cc +++ b/lite/kernels/opencl/pad2d_image_compute.cc @@ -73,7 +73,7 @@ class Pad2dCompute : public KernelLitetarget():" << TargetToStr(x->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); VLOG(4) << "x->dims():" << in_dims; @@ -86,7 +86,7 @@ class Pad2dCompute : public KernelLitemutable_data( out_image_shape["width"], out_image_shape["height"]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "out_image_shape[w,h]: " << out_image_shape["width"] << " " << out_image_shape["height"]; @@ -104,7 +104,7 @@ class Pad2dCompute : public KernelLite{ static_cast(out_image_shape["width"]), static_cast(out_image_shape["height"])})); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "default_work_size: " << default_work_size[0] << ", " << default_work_size[1] << ", " << default_work_size[2]; #endif @@ -150,7 +150,7 @@ class Pad2dCompute : public KernelLite strides = param.strides; std::vector ksize = param.ksize; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_pooling: " << global_pooling; VLOG(4) << "pooling_type: " << pooling_type; VLOG(4) << "paddings : " << paddings[0] << " " << paddings[1] << " " @@ -75,7 +75,7 @@ class PoolComputeImage2D : public KernelLitemutable_data( out_image_shape.at("width"), out_image_shape.at("height")); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "out_dims= " << out_dims; #endif const std::vector& default_work_size = DefaultWorkSize( @@ -96,7 +96,7 @@ class ReshapeComputeFloatImage : public KernelLite --arm_abi= --arm_lang= test" echo echo -e "optional argument:" - echo -e "--shutdown_log: (OFF|ON); controls whether to shutdown log, default is ON" + echo -e "--with_log: (OFF|ON); controls whether to print log information, default is ON" echo -e "--build_extra: (OFF|ON); controls whether to publish extra operators and kernels for (sequence-related model such as OCR or NLP)" echo -e "--build_train: (OFF|ON); controls whether to publish training operators and kernels, build_train is only for full_publish library now" echo -e "--build_python: (OFF|ON); controls whether to publish python api lib (ANDROID and IOS is not supported)" @@ -481,8 +481,8 @@ function main { BUILD_TAILOR="${i#*=}" shift ;; - --shutdown_log=*) - SHUTDOWN_LOG="${i#*=}" + --with_log=*) + WITH_LOG="${i#*=}" shift ;; --build_npu=*) diff --git a/lite/tools/build_android.sh b/lite/tools/build_android.sh index db9ec400b1..971437fd41 100755 --- a/lite/tools/build_android.sh +++ b/lite/tools/build_android.sh @@ -16,7 +16,7 @@ WITH_JAVA=ON # controls whether to compile cv functions into lib, default is OFF. WITH_CV=OFF # controls whether to hide log information, default is ON. -SHUTDOWN_LOG=ON +WITH_LOG=ON # options of striping lib according to input model. OPTMODEL_DIR="" WITH_STRIP=OFF @@ -144,7 +144,7 @@ function make_tiny_publish_so { local cmake_mutable_options=" -DLITE_BUILD_EXTRA=$WITH_EXTRA \ - -DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \ + -DLITE_WITH_LOG=$WITH_LOG \ -DLITE_BUILD_TAILOR=$WITH_STRIP \ -DLITE_OPTMODEL_DIR=$OPTMODEL_DIR \ -DLITE_WITH_JAVA=$WITH_JAVA \ @@ -193,7 +193,7 @@ function make_full_publish_so { local cmake_mutable_options=" -DLITE_BUILD_EXTRA=$WITH_EXTRA \ - -DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \ + -DLITE_WITH_LOG=$WITH_LOG \ -DLITE_BUILD_TAILOR=$WITH_STRIP \ -DLITE_OPTMODEL_DIR=$OPTMODEL_DIR \ -DLITE_WITH_JAVA=$WITH_JAVA \ @@ -236,7 +236,7 @@ function print_usage { echo -e "| --android_stl: (c++_static|c++_shared|gnu_static|gnu_shared), default is c++_static |" echo -e "| --with_java: (OFF|ON); controls whether to publish java api lib, default is ON |" echo -e "| --with_cv: (OFF|ON); controls whether to compile cv functions into lib, default is OFF |" - echo -e "| --shutdown_log: (OFF|ON); controls whether to hide log information, default is ON |" + echo -e "| --with_log: (OFF|ON); controls whether to print log information, default is ON |" echo -e "| --with_extra: (OFF|ON); controls whether to publish extra operators and kernels for (sequence-related model such as OCR or NLP) |" echo -e "| |" echo -e "| arguments of striping lib according to input model:(armv8, gcc, c++_static) |" @@ -315,8 +315,8 @@ function main { shift ;; # ON or OFF, default ON - --shutdown_log=*) - SHUTDOWN_LOG="${i#*=}" + --with_log=*) + WITH_LOG="${i#*=}" shift ;; # compiling lib which can operate on opencl and cpu. diff --git a/lite/tools/build_ios.sh b/lite/tools/build_ios.sh index 61b2cc27b0..f80bf37417 100755 --- a/lite/tools/build_ios.sh +++ b/lite/tools/build_ios.sh @@ -11,7 +11,7 @@ WITH_EXTRA=OFF # controls whether to compile cv functions into lib, default is OFF. WITH_CV=OFF # controls whether to hide log information, default is ON. -SHUTDOWN_LOG=ON +WITH_LOG=ON # absolute path of Paddle-Lite. workspace=$PWD/$(dirname $0)/../../ # options of striping lib according to input model. @@ -67,7 +67,7 @@ function make_ios { -DLITE_WITH_OPENMP=OFF \ -DWITH_ARM_DOTPROD=OFF \ -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \ - -DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \ + -DLITE_WITH_LOG=$WITH_LOG \ -DLITE_BUILD_TAILOR=$WITH_STRIP \ -DLITE_OPTMODEL_DIR=$OPTMODEL_DIR \ -DARM_TARGET_ARCH_ABI=$abi \ @@ -94,7 +94,7 @@ function print_usage { echo -e "| optional argument: |" echo -e "| --arm_abi: (armv8|armv7), default is armv8 |" echo -e "| --with_cv: (OFF|ON); controls whether to compile cv functions into lib, default is OFF |" - echo -e "| --shutdown_log: (OFF|ON); controls whether to hide log information, default is ON |" + echo -e "| --with_log: (OFF|ON); controls whether to print log information, default is ON |" echo -e "| --with_extra: (OFF|ON); controls whether to publish extra operators and kernels for (sequence-related model such as OCR or NLP) |" echo -e "| |" echo -e "| arguments of striping lib according to input model:(armv8, gcc, c++_static) |" @@ -136,8 +136,8 @@ function main { WITH_STRIP="${i#*=}" shift ;; - --shutdown_log=*) - SHUTDOWN_LOG="${i#*=}" + --with_log=*) + WITH_LOG="${i#*=}" shift ;; help) diff --git a/lite/tools/build_linux.sh b/lite/tools/build_linux.sh index 3cc1013c60..c71ac792fb 100755 --- a/lite/tools/build_linux.sh +++ b/lite/tools/build_linux.sh @@ -52,17 +52,13 @@ readonly CMAKE_COMMON_OPTIONS="-DWITH_LITE=ON \ -DWITH_TESTING=OFF" # mutable options for linux compiling. function init_cmake_mutable_options { - SHUTDOWN_LOG=ON - if [ "$WITH_LOG" = "ON"]; then - SHUTDOWN_LOG=OFF - fi cmake_mutable_options="-DARM_TARGET_ARCH_ABI=$ARCH \ -DARM_TARGET_LANG=$TOOLCHAIN \ -DLITE_BUILD_EXTRA=$WITH_EXTRA \ -DLITE_WITH_PYTHON=$WITH_PYTHON \ -DLITE_WITH_CV=$WITH_CV \ - -DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \ + -DLITE_WITH_LOG=$WITH_LOG \ -DLITE_BUILD_TAILOR=$WITH_STRIP \ -DLITE_OPTMODEL_DIR=$OPTMODEL_DIR \ -DLITE_WITH_OPENCL=$WITH_OPENCL \ diff --git a/lite/tools/build_npu.sh b/lite/tools/build_npu.sh index 1515cfcdd3..bbfb71deeb 100755 --- a/lite/tools/build_npu.sh +++ b/lite/tools/build_npu.sh @@ -11,7 +11,7 @@ TARGET_NAME="test_subgraph_pass" # default target BUILD_EXTRA=OFF # ON(with sequence ops)/OFF WITH_JAVA=ON # ON(build jar and jni so)/OFF WITH_TESTING=ON # ON/OFF -SHUTDOWN_LOG=OFF # ON(disable logging)/OFF +WITH_LOG=ON # ON(disable logging)/OFF ON_TINY_PUBLISH=OFF # ON(tiny publish)/OFF(full publish) function print_usage { @@ -76,7 +76,7 @@ function build_npu { fi if [[ "${ON_TINY_PUBLISH}" == "ON" ]]; then WITH_TESTING=OFF - SHUTDOWN_LOG=ON + WITH_LOG=OFF publish_dir="tiny_publish" else publish_dir="full_publish" @@ -99,7 +99,7 @@ function build_npu { -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \ -DWITH_TESTING=${WITH_TESTING} \ -DLITE_WITH_JAVA=${WITH_JAVA} \ - -DLITE_SHUTDOWN_LOG=${SHUTDOWN_LOG} \ + -DLITE_WITH_LOG=${WITH_LOG} \ -DLITE_WITH_NPU=ON \ -DLITE_ON_TINY_PUBLISH=${ON_TINY_PUBLISH} \ -DANDROID_API_LEVEL=24 \ diff --git a/lite/tools/build_rknpu.sh b/lite/tools/build_rknpu.sh index aa2fb5a124..aed406db09 100755 --- a/lite/tools/build_rknpu.sh +++ b/lite/tools/build_rknpu.sh @@ -8,8 +8,8 @@ ARM_LANG="gcc" # gcc only yet DDK_ROOT="$(pwd)/rknpu" TARGET_NAME="test_subgraph_pass" # default target BUILD_EXTRA=OFF # ON(with sequence ops)/OFF -WITH_TESTING=ON # ON/OFF -SHUTDOWN_LOG=OFF # ON(disable logging)/OFF +WITH_TESTING=ON # ON/OFF +WITH_LOG=ON # ON(disable logging)/OFF ON_TINY_PUBLISH=OFF # ON(tiny publish)/OFF(full publish) function print_usage { @@ -65,7 +65,7 @@ function build_npu { local publish_dir if [[ "${ON_TINY_PUBLISH}" == "ON" ]]; then WITH_TESTING=OFF - SHUTDOWN_LOG=ON + WITH_LOG=OFF publish_dir="tiny_publish" else publish_dir="full_publish" @@ -89,7 +89,7 @@ function build_npu { -DWITH_ARM_DOTPROD=ON \ -DLITE_BUILD_EXTRA=${BUILD_EXTRA} \ -DWITH_TESTING=${WITH_TESTING} \ - -DLITE_SHUTDOWN_LOG=${SHUTDOWN_LOG} \ + -DLITE_WITH_LOG=${WITH_LOG} \ -DLITE_ON_TINY_PUBLISH=${ON_TINY_PUBLISH} \ -DARM_TARGET_OS=${ARM_OS} \ -DARM_TARGET_ARCH_ABI=${ARM_ABI} \ diff --git a/lite/tools/ci_build.sh b/lite/tools/ci_build.sh index a5dc2b741d..270c3cf79c 100755 --- a/lite/tools/ci_build.sh +++ b/lite/tools/ci_build.sh @@ -118,7 +118,7 @@ function cmake_opencl { -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \ -DWITH_TESTING=ON \ -DLITE_BUILD_EXTRA=ON \ - -DLITE_SHUTDOWN_LOG=OFF \ + -DLITE_WITH_LOG=ON \ -DLITE_WITH_CV=OFF \ -DARM_TARGET_OS=$1 -DARM_TARGET_ARCH_ABI=$2 -DARM_TARGET_LANG=$3 } @@ -653,7 +653,7 @@ function build_ios { -DLITE_WITH_ARM=ON \ -DWITH_TESTING=OFF \ -DLITE_WITH_JAVA=OFF \ - -DLITE_SHUTDOWN_LOG=ON \ + -DLITE_WITH_LOG=OFF \ -DLITE_ON_TINY_PUBLISH=ON \ -DLITE_WITH_OPENMP=OFF \ -DWITH_ARM_DOTPROD=OFF \ @@ -1000,7 +1000,7 @@ function mobile_publish { -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \ -DWITH_TESTING=OFF \ -DLITE_WITH_JAVA=ON \ - -DLITE_SHUTDOWN_LOG=ON \ + -DLITE_WITH_LOG=OFF \ -DLITE_ON_TINY_PUBLISH=ON \ -DARM_TARGET_OS=${os} -DARM_TARGET_ARCH_ABI=${abi} -DARM_TARGET_LANG=${lang} diff --git a/lite/utils/CMakeLists.txt b/lite/utils/CMakeLists.txt index ec58118829..573efcad9a 100644 --- a/lite/utils/CMakeLists.txt +++ b/lite/utils/CMakeLists.txt @@ -3,7 +3,7 @@ # else() # endif() -if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK OR LITE_ON_MODEL_OPTIMIZE_TOOL OR LITE_SHUTDOWN_LOG) +if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK OR LITE_ON_MODEL_OPTIMIZE_TOOL OR (NOT LITE_WITH_LOG)) lite_cc_library(logging SRCS logging.cc) set(utils_DEPS logging) lite_cc_test(test_logging SRCS logging_test.cc DEPS ${utils_DEPS}) diff --git a/lite/utils/cp_logging.h b/lite/utils/cp_logging.h index a9970c72b0..faaf25f656 100644 --- a/lite/utils/cp_logging.h +++ b/lite/utils/cp_logging.h @@ -14,7 +14,7 @@ #pragma once #if defined(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) || \ - defined(LITE_ON_MODEL_OPTIMIZE_TOOL) || defined(LITE_SHUTDOWN_LOG) + defined(LITE_ON_MODEL_OPTIMIZE_TOOL) || !defined(LITE_WITH_LOG) #include "lite/utils/logging.h" #else // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK #include diff --git a/lite/utils/logging.cc b/lite/utils/logging.cc index e9ee5861ba..920aa58fe3 100644 --- a/lite/utils/logging.cc +++ b/lite/utils/logging.cc @@ -22,7 +22,7 @@ #if defined(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) || \ defined(LITE_ON_MODEL_OPTIMIZE_TOOL) -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG namespace paddle { namespace lite { @@ -60,5 +60,5 @@ void gen_log(STL::ostream& log_stream_, } // namespace lite } // namespace paddle -#endif // LITE_SHUTDOWN_LOG +#endif // LITE_WITH_LOG #endif // LITE_WITH_LIGHT_FRAMEWORK diff --git a/lite/utils/logging.h b/lite/utils/logging.h index 97eb916ff1..d05624d7c6 100644 --- a/lite/utils/logging.h +++ b/lite/utils/logging.h @@ -46,7 +46,7 @@ // NOLINTFILE() // LOG() -#ifdef LITE_SHUTDOWN_LOG +#ifndef LITE_WITH_LOG #define LOG(status) LOG_##status #define LOG_INFO paddle::lite::Voidify() #define LOG_ERROR LOG_INFO @@ -62,7 +62,7 @@ paddle::lite::LogMessageFatal(__FILE__, __FUNCTION__, __LINE__) #endif -#ifdef LITE_SHUTDOWN_LOG +#ifndef LITE_WITH_LOG #define VLOG(level) paddle::lite::Voidify() #else // VLOG() @@ -72,7 +72,7 @@ // CHECK() // clang-format off -#ifdef LITE_SHUTDOWN_LOG +#ifndef LITE_WITH_LOG #define CHECK(x) if (!(x)) paddle::lite::VoidifyFatal() #define _CHECK_BINARY(x, cmp, y) CHECK(x cmp y) #else @@ -91,7 +91,7 @@ namespace paddle { namespace lite { -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG void gen_log(STL::ostream& log_stream_, const char* file, const char* func, diff --git a/lite/utils/replace_stl/stream.cc b/lite/utils/replace_stl/stream.cc index aadee7e269..081006be67 100644 --- a/lite/utils/replace_stl/stream.cc +++ b/lite/utils/replace_stl/stream.cc @@ -37,7 +37,7 @@ void ostream::pad(const std::string& text) { } } -#ifdef LITE_SHUTDOWN_LOG +#ifndef LITE_WITH_LOG #define ADD_DATA_AS_STRING(data_, obj_) #else #define ADD_DATA_AS_STRING(data_, obj_) \ -- GitLab