diff --git a/CMakeLists.txt b/CMakeLists.txt index 065bcbe3490d7d8ba92dbd17d115d7fefe5c1ec6..eab1fe0579635c58ae48dfb6302c2ef402f02373 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -97,7 +97,7 @@ lite_option(LITE_WITH_FPGA "Enable FPGA support in lite" OFF) lite_option(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK "Enable light-weight framework" OFF) lite_option(LITE_WITH_PROFILE "Enable profile mode in lite framework" OFF) lite_option(LITE_WITH_PRECISION_PROFILE "Enable precision profile in profile mode ON in lite" OFF) -lite_option(LITE_SHUTDOWN_LOG "Shutdown log system or not." OFF) +lite_option(LITE_WITH_LOG "Enable log printing or not." ON) lite_option(LITE_ON_TINY_PUBLISH "Publish tiny predictor lib." OFF) lite_option(LITE_ON_MODEL_OPTIMIZE_TOOL "Build the model optimize tool" OFF) # publish options diff --git a/cmake/configure.cmake b/cmake/configure.cmake index cf99645409436f24533005b9a74f2bdb1c89f662..1b0890e0dbf5e741176c293a059d809752c72a43 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -186,8 +186,8 @@ if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) add_definitions("-DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK") endif() -if (LITE_SHUTDOWN_LOG) - add_definitions("-DLITE_SHUTDOWN_LOG") +if (LITE_WITH_LOG) + add_definitions("-DLITE_WITH_LOG") endif() if (LITE_ON_TINY_PUBLISH) diff --git a/docs/demo_guides/rknpu.md b/docs/demo_guides/rknpu.md index 3290b2c2f8b882ed9a86358577abf783c22cbb0b..d1fdb6890377ffafd045a60e3eb1f4a3237bbf2b 100644 --- a/docs/demo_guides/rknpu.md +++ b/docs/demo_guides/rknpu.md @@ -131,8 +131,8 @@ $ git clone https://github.com/airockchip/rknpu_ddk.git ``` - 编译full_publish and tiny_publish for armv8(注意:RKNPU_DDK只支持armv8) ```shell -$ ./lite/tools/build.sh --arm_os=armlinux --arm_abi=armv8 --arm_lang=gcc --build_extra=ON --shutdown_log=OFF --build_rknpu=ON --rknpu_ddk_root=./rknpu_ddk full_publish -$ ./lite/tools/build.sh --arm_os=armlinux --arm_abi=armv8 --arm_lang=gcc --build_extra=ON --shutdown_log=OFF --build_rknpu=ON --rknpu_ddk_root=./rknpu_ddk tiny_publish +$ ./lite/tools/build.sh --arm_os=armlinux --arm_abi=armv8 --arm_lang=gcc --build_extra=ON --with_log=ON --build_rknpu=ON --rknpu_ddk_root=./rknpu_ddk full_publish +$ ./lite/tools/build.sh --arm_os=armlinux --arm_abi=armv8 --arm_lang=gcc --build_extra=ON --with_log=ON --build_rknpu=ON --rknpu_ddk_root=./rknpu_ddk tiny_publish ``` - 将编译生成的build.lite.armlinux.armv8.gcc/inference_lite_lib.armlinux.armv8.rknpu/cxx/include替换PaddleLite-armlinux-demo/Paddle-Lite/include目录; - 将编译生成的build.lite.armlinux.armv8.gcc/inference_lite_lib.armlinux.armv8.rknpu/cxx/lib/libpaddle_light_api_shared.so替换PaddleLite-armlinux-demo/Paddle-Lite/libs/armv8/libpaddle_light_api_shared.so文件。 diff --git a/lite/api/CMakeLists.txt b/lite/api/CMakeLists.txt index 506f2eab721807abcff64e16470edbc6bcd40842..c9855dd0a060db540c54703ab57ad035818a2fc0 100644 --- a/lite/api/CMakeLists.txt +++ b/lite/api/CMakeLists.txt @@ -1,4 +1,4 @@ -if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK OR LITE_SHUTDOWN_LOG) +if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK OR (NOT LITE_WITH_LOG)) lite_cc_library(place SRCS paddle_place.cc DEPS logging) else() lite_cc_library(place SRCS paddle_place.cc DEPS glog) diff --git a/lite/backends/opencl/cl_utility.h b/lite/backends/opencl/cl_utility.h index de01f896a6eb461eb24023a77935bba07de029e7..7ca12c1f808352936359f83b3049716c53806b2f 100644 --- a/lite/backends/opencl/cl_utility.h +++ b/lite/backends/opencl/cl_utility.h @@ -32,7 +32,7 @@ const char* opencl_error_to_str(cl_int error); __FILE__, \ __LINE__); \ } -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG #define CL_CHECK_FATAL(err_code__) \ if (err_code__ != CL_SUCCESS) { \ LOG(FATAL) << string_format( \ diff --git a/lite/demo/cxx/README.md b/lite/demo/cxx/README.md index c2bdb25f4e3b46265bcc4830b613b6d0d6d8232d..ff579a7f36927cd2af481d60c933d312bf5c035a 100644 --- a/lite/demo/cxx/README.md +++ b/lite/demo/cxx/README.md @@ -54,7 +54,7 @@ git checkout release/v2.3 --arm_lang=gcc \ --android_stl=c++_static \ --build_extra=ON \ - --shutdown_log=OFF \ + --with_log=ON \ full_publish ``` diff --git a/lite/demo/java/README.md b/lite/demo/java/README.md index 904726d744b7bda075cee05830903a470d52cf54..4cf651a829e6b43607fe12ab21454d52408528e8 100644 --- a/lite/demo/java/README.md +++ b/lite/demo/java/README.md @@ -24,7 +24,7 @@ cmake .. \ -DLITE_WITH_ARM=ON \ -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \ -DWITH_TESTING=OFF \ --DLITE_SHUTDOWN_LOG=ON \ +-DLITE_WITH_LOG=OFF \ -DLITE_ON_TINY_PUBLISH=ON \ -DARM_TARGET_OS=android -DARM_TARGET_ARCH_ABI=armv8 -DARM_TARGET_LANG=gcc diff --git a/lite/kernels/opencl/activation_image_compute.cc b/lite/kernels/opencl/activation_image_compute.cc index 944a59ce15eea34f1e2045dc1093c971adc8483a..da957d8bdec8a4689740fb996010968c14d95b16 100644 --- a/lite/kernels/opencl/activation_image_compute.cc +++ b/lite/kernels/opencl/activation_image_compute.cc @@ -39,7 +39,7 @@ class ActivationComputeImageDefault void PrepareForRun() override { act_param_ = param_.get_mutable(); int act_type = static_cast(act_param_->active_type); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(1) << "ActivationTypeToStr(act_param_->active_type):" << ActivationTypeToStr(act_param_->active_type); #endif @@ -72,7 +72,7 @@ class ActivationComputeImageDefault LOG(FATAL) << "This act type:" << act_type << " doesn't support."; return; } -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(1) << "kernel_func_name_:" << kernel_func_name_; #endif @@ -129,7 +129,7 @@ class ActivationComputeImageDefault status = kernel.setArg(3, scale_); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG const auto& x_dims = act_param_->X->dims(); const auto& y_dims = act_param_->Out->dims(); // useless: check dim only VLOG(4) << TargetToStr(act_param_->X->target()); diff --git a/lite/kernels/opencl/bilinear_interp_image_compute.cc b/lite/kernels/opencl/bilinear_interp_image_compute.cc index a078301883b9fc1de4f82e7d23570f2a108a87d4..84fd3312c3b965c2856780aaab6d9ecb9122ccfc 100644 --- a/lite/kernels/opencl/bilinear_interp_image_compute.cc +++ b/lite/kernels/opencl/bilinear_interp_image_compute.cc @@ -79,7 +79,7 @@ class BilinearInterpImageCompute int out_h = out_dims[2]; int out_w = out_dims[3]; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x->target():" << TargetToStr(x->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); VLOG(4) << "x->dims():" << in_dims; @@ -92,7 +92,7 @@ class BilinearInterpImageCompute auto* out_img = out->mutable_data( out_image_shape["width"], out_image_shape["height"]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG // VLOG(4) << "x_image: " << x_img; // VLOG(4) << "out_image: " << out_img; VLOG(4) << "out_image_shape[w,h]: " << out_image_shape["width"] << " " @@ -114,7 +114,7 @@ class BilinearInterpImageCompute DDim(std::vector{ static_cast(out_image_shape["width"]), static_cast(out_image_shape["height"])})); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "default_work_size: " << default_work_size[0] << ", " << default_work_size[1] << ", " << default_work_size[2]; #endif @@ -150,7 +150,7 @@ class BilinearInterpImageCompute nullptr, nullptr); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_work_size:[2D]:" << global_work_size[0] << " " << global_work_size[1] << " " << global_work_size[2]; #endif diff --git a/lite/kernels/opencl/box_coder_image_compute.cc b/lite/kernels/opencl/box_coder_image_compute.cc index 00509f5aacbcd531fe338729c8bb2c6664fba495..84298b29d4f8ce99a0bacc2dbb5acf545a49617c 100644 --- a/lite/kernels/opencl/box_coder_image_compute.cc +++ b/lite/kernels/opencl/box_coder_image_compute.cc @@ -61,7 +61,7 @@ class BoxCoderComputeImage : public KernelLiteproposals->mutable_data( image_shape["width"], image_shape["height"]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "boxcoder input shape: "; #endif @@ -93,7 +93,7 @@ class BoxCoderComputeImage : public KernelLiteproposals->target()); VLOG(4) << "output shape: " << out_dims[0] << ", " << out_dims[1] << ", " << out_dims[2] << ", " << out_dims[3]; @@ -130,7 +130,7 @@ class BoxCoderComputeImage : public KernelLitedims()[inputs[0]->dims().size() - 1]; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "concat input shape: "; for (size_t i = 0; i < inputs.size(); i++) { VLOG(4) << "inputs [" << i << "]" @@ -149,7 +149,7 @@ class ConcatComputeImage : public KernelLite(image_shape["height"])}; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << TargetToStr(param.output->target()); VLOG(4) << "image_shape(w,h):" << image_shape["width"] << " " << image_shape["height"]; @@ -204,7 +204,7 @@ class ConcatComputeImage : public KernelLitedata(); int in_w = in_dims[in_dims.size() - 1]; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "image_shape(w,h):" << image_shape["width"] << " " << image_shape["height"]; #endif diff --git a/lite/kernels/opencl/conv_image_compute.cc b/lite/kernels/opencl/conv_image_compute.cc index 9e5f365fdb5f8f678af4da189587d30b41bd0b41..3de4512cb1d9d06b95d14c51615d5ab87e0a7419 100644 --- a/lite/kernels/opencl/conv_image_compute.cc +++ b/lite/kernels/opencl/conv_image_compute.cc @@ -541,12 +541,12 @@ void ConvImageCompute::Conv2d1x1opt(bool is_turn) { int input_c = input_dims[1]; auto dilations = *param.dilations; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG // VLOG(4) << "out_image: " << out_image; VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << "," << global_work_size_[1] << "," << global_work_size_[2] << "}"; #endif -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ conv2d_1x1 params ============"; VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," << input_image_shape["height"]; @@ -846,7 +846,7 @@ void ConvImageCompute::Conv2d3x3opt(bool is_turn) { const bool is_element_wise_bias = has_bias && param.output->dims() == param.bias->dims(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ conv2d params ============"; // VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," // << input_image_shape["height"]; @@ -893,7 +893,7 @@ void ConvImageCompute::Conv2d3x3opt(bool is_turn) { status = kernel.setArg(++arg_idx, *filter_image); CL_CHECK_FATAL(status); if (has_bias) { -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "set bias_image: "; #endif status = kernel.setArg(++arg_idx, *bias_image); @@ -922,7 +922,7 @@ void ConvImageCompute::Conv2d3x3opt(bool is_turn) { status = kernel.setArg(++arg_idx, output_height); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG // VLOG(4) << "out_image: " << out_image; VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << "," << global_work_size_[1] << "," << global_work_size_[2] << "}"; @@ -975,7 +975,7 @@ void ConvImageCompute::Conv2d5x5(bool is_turn) { int input_c = input_dims[1]; auto dilations = *param.dilations; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ conv2d params ============"; VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," << input_image_shape["height"]; @@ -1025,7 +1025,7 @@ void ConvImageCompute::Conv2d5x5(bool is_turn) { status = kernel.setArg(++arg_idx, *filter_image); CL_CHECK_FATAL(status); if (has_bias) { -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "set bias_image: "; #endif status = kernel.setArg(++arg_idx, *bias_image); @@ -1052,7 +1052,7 @@ void ConvImageCompute::Conv2d5x5(bool is_turn) { status = kernel.setArg(++arg_idx, output_height); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG // VLOG(4) << "out_image: " << out_image; VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << "," << global_work_size_[1] << "," << global_work_size_[2] << "}"; @@ -1103,7 +1103,7 @@ void ConvImageCompute::Conv2d5x5opt(bool is_turn) { has_bias && param.output->dims() == param.bias->dims(); // default_work_size[2] = h_blk; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ conv2d params ============"; // VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," // << input_image_shape["height"]; @@ -1223,7 +1223,7 @@ void ConvImageCompute::Conv2d7x7(bool is_turn) { int input_c = input_dims[1]; auto dilations = *param.dilations; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ conv2d params ============"; VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," << input_image_shape["height"]; @@ -1273,7 +1273,7 @@ void ConvImageCompute::Conv2d7x7(bool is_turn) { status = kernel.setArg(++arg_idx, *filter_image); CL_CHECK_FATAL(status); if (has_bias) { -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "set bias_image: "; #endif status = kernel.setArg(++arg_idx, *bias_image); @@ -1300,7 +1300,7 @@ void ConvImageCompute::Conv2d7x7(bool is_turn) { status = kernel.setArg(++arg_idx, output_height); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG // VLOG(4) << "out_image: " << out_image; VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << "," << global_work_size_[1] << "," << global_work_size_[2] << "}"; @@ -1349,7 +1349,7 @@ void ConvImageCompute::Conv2d7x7opt(bool is_turn) { const bool is_element_wise_bias = has_bias && param.output->dims() == param.bias->dims(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ conv2d 7x7 params ============"; // VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," // << input_image_shape["height"]; @@ -1479,7 +1479,7 @@ void ConvImageCompute::DepthwiseConv2d3x3s1(bool is_turn) { const cl::Image2D* bias_image = nullptr; if (has_bias) { bias_image = bias_gpu_image_->data(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "set bias_image: "; #endif status = kernel.setArg(++arg_idx, *bias_image); @@ -1546,7 +1546,7 @@ void ConvImageCompute::DepthwiseConv2d3x3(bool is_turn) { auto kernel = kernel_; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "setArg"; VLOG(4) << "strides = " << strides[0]; VLOG(4) << "offset = " << offset; @@ -1576,7 +1576,7 @@ void ConvImageCompute::DepthwiseConv2d3x3(bool is_turn) { const cl::Image2D* bias_image = nullptr; if (has_bias) { bias_image = bias_gpu_image_->data(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "set bias_image: "; #endif status = kernel.setArg(++arg_idx, *bias_image); @@ -1649,7 +1649,7 @@ void ConvImageCompute::DepthwiseConv2d(bool is_turn) { int input_c = input_dims[1]; auto dilations = *param.dilations; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "============ depthwise conv2d params ============"; VLOG(4) << "input_image_shape: " << input_image_shape["width"] << "," << input_image_shape["height"]; @@ -1700,7 +1700,7 @@ void ConvImageCompute::DepthwiseConv2d(bool is_turn) { status = kernel.setArg(++arg_idx, *filter_image); CL_CHECK_FATAL(status); if (has_bias) { -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "set bias_image: "; #endif status = kernel.setArg(++arg_idx, *bias_image); @@ -1731,7 +1731,7 @@ void ConvImageCompute::DepthwiseConv2d(bool is_turn) { status = kernel.setArg(++arg_idx, filter_height); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << "," << global_work_size_[1] << "," << global_work_size_[2] << "}"; #endif diff --git a/lite/kernels/opencl/elementwise_add_buffer_compute.cc b/lite/kernels/opencl/elementwise_add_buffer_compute.cc index 237de7b6fad9dc2e03de37e15f7078c487635ce7..85fcac6b8524365a322e497fa632044693efa2a4 100644 --- a/lite/kernels/opencl/elementwise_add_buffer_compute.cc +++ b/lite/kernels/opencl/elementwise_add_buffer_compute.cc @@ -43,7 +43,7 @@ void ElementwiseAddCompute::Run() { STL::stringstream kernel_key; kernel_key << kernel_func_name_ << build_options_ << time_stamp_; auto kernel = context.cl_context()->GetKernel(kernel_key.str()); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << TargetToStr(ele_param_->X->target()); VLOG(4) << TargetToStr(ele_param_->Y->target()); VLOG(4) << TargetToStr(ele_param_->Out->target()); @@ -86,7 +86,7 @@ void ElementwiseAddCompute::UpdateParams() { for (int i = static_cast(y_dims.size() + axis); i < x_dims.size(); ++i) { num_ *= x_dims[i]; } -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "axis: " << axis; VLOG(4) << "batch: " << batch_; VLOG(4) << "channels: " << channels_; diff --git a/lite/kernels/opencl/elementwise_add_image_compute.cc b/lite/kernels/opencl/elementwise_add_image_compute.cc index c507dcb43da35f6912f98a89416a34e10012bdc0..4af02e8b7392fab80608a54838a69cc3eb754af0 100644 --- a/lite/kernels/opencl/elementwise_add_image_compute.cc +++ b/lite/kernels/opencl/elementwise_add_image_compute.cc @@ -83,7 +83,7 @@ void ElementwiseAddImageCompute::ReInitWhenNeeded() { void ElementwiseAddImageCompute::GetGlobalWorkSize() { global_work_size_ = cl::NDRange{static_cast(x_img_shape_[0]), static_cast(x_img_shape_[1])}; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_work_size:[2D]:" << x_img_shape_[0] << " " << x_img_shape_[1]; #endif @@ -102,7 +102,7 @@ void ElementwiseAddImageCompute::Run() { auto* out_img = out->mutable_data(out_img_shape_[0], out_img_shape_[1]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x->target():" << TargetToStr(x->target()); VLOG(4) << "y->target():" << TargetToStr(y->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); @@ -129,7 +129,7 @@ void ElementwiseAddImageCompute::Run() { } else if (y_dims.size() == 1) { if (axis == x_dims.size() - 1 || axis == x_dims.size() - 3) { const int tensor_w = x_dims[x_dims.size() - 1]; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "tensor_w:" << tensor_w; #endif status = kernel.setArg(0, *x_img); diff --git a/lite/kernels/opencl/elementwise_mul_image_compute.cc b/lite/kernels/opencl/elementwise_mul_image_compute.cc index 1f17d60097b95f67cd65b2745f7f0ce5623bdc50..dcedee86de08d6df46c9e71ec23eddebe4f32376 100644 --- a/lite/kernels/opencl/elementwise_mul_image_compute.cc +++ b/lite/kernels/opencl/elementwise_mul_image_compute.cc @@ -85,7 +85,7 @@ class ElementwiseMulImageCompute auto* y = ele_param_->Y; auto* out = ele_param_->Out; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x->target():" << TargetToStr(x->target()); VLOG(4) << "y->target():" << TargetToStr(y->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); @@ -108,7 +108,7 @@ class ElementwiseMulImageCompute auto* out_img = out->mutable_data(out_img_shape[0], out_img_shape[1]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x_img_shape[w,h]:" << x_img_width << " " << x_img_height; VLOG(4) << "y_img_shape[w,h]:" << y_img_shape[0] << " " << y_img_shape[1]; VLOG(4) << "out_img_shape[w,h]:" << out_img_shape[0] << " " @@ -194,7 +194,7 @@ class ElementwiseMulImageCompute nullptr, nullptr); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_work_size:[2D]:" << x_img_width << " " << x_img_height; #endif } diff --git a/lite/kernels/opencl/elementwise_sub_image_compute.cc b/lite/kernels/opencl/elementwise_sub_image_compute.cc index cae6338959fd93810fc885e59d2c574de489af7c..8a29cde6a4bbc1fe56b42e4541936b3ce56df264 100644 --- a/lite/kernels/opencl/elementwise_sub_image_compute.cc +++ b/lite/kernels/opencl/elementwise_sub_image_compute.cc @@ -64,7 +64,7 @@ void ElementwiseSubImageCompute::Run() { auto* out = ele_param_->Out; auto axis = ele_param_->axis; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x->target():" << TargetToStr(x->target()); VLOG(4) << "y->target():" << TargetToStr(y->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); @@ -87,7 +87,7 @@ void ElementwiseSubImageCompute::Run() { auto* out_img = out->mutable_data(out_img_shape[0], out_img_shape[1]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x_img_shape[w,h]:" << x_img_width << " " << x_img_height; VLOG(4) << "y_img_shape[w,h]:" << y_img_shape[0] << " " << y_img_shape[1]; VLOG(4) << "out_img_shape[w,h]:" << out_img_shape[0] << " " @@ -110,7 +110,7 @@ void ElementwiseSubImageCompute::Run() { } else if (y_dims.size() == 1) { if (axis == x->dims().size() - 1 || axis == x->dims().size() - 3) { int tensor_w = x->dims()[x->dims().size() - 1]; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "tensor_w:" << tensor_w; #endif cl_int status = kernel.setArg(arg_idx, *x_img); @@ -134,7 +134,7 @@ void ElementwiseSubImageCompute::Run() { auto global_work_size = cl::NDRange{static_cast(x_img_width), static_cast(x_img_height)}; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_work_size:[2D]:" << x_img_width << " " << x_img_height; #endif diff --git a/lite/kernels/opencl/fc_buffer_compute.cc b/lite/kernels/opencl/fc_buffer_compute.cc index 107575ac6d0cd21358d1ccbe4ba9d0834a445bcd..38ca4fb7968fb5d0820837077dd3236e588aa129 100644 --- a/lite/kernels/opencl/fc_buffer_compute.cc +++ b/lite/kernels/opencl/fc_buffer_compute.cc @@ -52,7 +52,7 @@ class FcCompute n_ = w_dims[1]; CHECK_EQ(k_, static_cast(w_dims[0])); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x_dims:" << x_dims[0] << " " << x_dims[1] << " " << x_dims[2] << " " << x_dims[3]; VLOG(4) << "w_dims:" << w_dims[0] << " " << w_dims[1] << " " << w_dims[2] @@ -66,7 +66,7 @@ class FcCompute } else { // gemm kernel_func_name_ = "fc_gemm_4x4"; } -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(1) << "kernel_func_name_:" << kernel_func_name_; #endif diff --git a/lite/kernels/opencl/grid_sampler_image_compute.cc b/lite/kernels/opencl/grid_sampler_image_compute.cc index c4daf6ae4222e498726f24e0ba10d12f6f4918af..e9151e18efb6ea24e965aaa81027259ac0beef90 100644 --- a/lite/kernels/opencl/grid_sampler_image_compute.cc +++ b/lite/kernels/opencl/grid_sampler_image_compute.cc @@ -80,7 +80,7 @@ class GridSamplerImageCompute : public KernelLite(default_work_size[0]), static_cast(default_work_size[1]), static_cast(default_work_size[2] / 4)}; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "default_work_size: " << default_work_size[0] << ", " << default_work_size[1] << ", " << default_work_size[2]; VLOG(4) << "global_work_size_:[2D]:" << global_work_size_[0] << " " @@ -102,7 +102,7 @@ class GridSamplerImageCompute : public KernelLitemutable_data(out_img_shape_[0], out_img_shape_[1]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG auto in_dims = x->dims(); VLOG(4) << "x->target():" << TargetToStr(x->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); diff --git a/lite/kernels/opencl/instance_norm_image_compute.cc b/lite/kernels/opencl/instance_norm_image_compute.cc index bf7c2aab35ebeae2f64960721f6b23d1c04c1ddc..d0145889419bb7b8d467d645024d56fe8f872976 100644 --- a/lite/kernels/opencl/instance_norm_image_compute.cc +++ b/lite/kernels/opencl/instance_norm_image_compute.cc @@ -96,7 +96,7 @@ class InstanceNormImageCompute : public KernelLite(lws1), static_cast(lws2)}; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_work_size:" << static_cast(global_work_size[0]) << " " << static_cast(global_work_size[1]) << " " << static_cast(global_work_size[2]); @@ -200,7 +200,7 @@ class InstanceNormImageCompute : public KernelLitetarget():" << TargetToStr(x->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); VLOG(4) << "x->dims():" << in_dims; @@ -211,7 +211,7 @@ class InstanceNormImageCompute : public KernelLitemutable_data( out_image_shape["width"], out_image_shape["height"]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "out_image_shape[w,h]: " << out_image_shape["width"] << " " << out_image_shape["height"]; @@ -229,7 +229,7 @@ class InstanceNormImageCompute : public KernelLite(group_size_y), static_cast(1)}; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "local_work_size:[2D]:" << local_work_size[0] << " " << local_work_size[1] << " " << local_work_size[2]; VLOG(4) << "global_work_size:[2D]:" << global_work_size[0] << " " diff --git a/lite/kernels/opencl/io_copy_buffer_compute.cc b/lite/kernels/opencl/io_copy_buffer_compute.cc index f981c5ca11a456ff649ba975a9ed63372f80f6ce..31fc563c95294aa5612899805aaf9ae8b11d2191 100644 --- a/lite/kernels/opencl/io_copy_buffer_compute.cc +++ b/lite/kernels/opencl/io_copy_buffer_compute.cc @@ -42,7 +42,7 @@ class IoCopyHostToOpenCLCompute CHECK(param.x->target() == TARGET(kHost) || param.x->target() == TARGET(kARM)); auto mem_size = param.x->memory_size(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "param.x->memory_size():" << mem_size; VLOG(2) << "param.x->dims().size():" << param.x->dims().size(); VLOG(2) << "param.x->dims():" << param.x->dims(); @@ -87,7 +87,7 @@ class IoCopykOpenCLToHostCompute CHECK(param.x->target() == TARGET(kOpenCL)); auto mem_size = param.x->memory_size(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "copy size " << mem_size; VLOG(2) << "param.x->dims().size():" << param.x->dims().size(); VLOG(2) << "param.x->dims():" << param.x->dims(); @@ -106,7 +106,7 @@ class IoCopykOpenCLToHostCompute auto& context = ctx_->As(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "--- Find the sync event for the target cl tensor. ---"; #endif CLRuntime::Global()->command_queue().finish(); diff --git a/lite/kernels/opencl/layout_image_compute.cc b/lite/kernels/opencl/layout_image_compute.cc index ce2242661144e11f1e042011919353f29e0440a8..3c7a6ae42f4d442ece152b13b37f80355c6cc6b7 100644 --- a/lite/kernels/opencl/layout_image_compute.cc +++ b/lite/kernels/opencl/layout_image_compute.cc @@ -76,7 +76,7 @@ class LayoutComputeBufferChwToImageDefault const int Stride1 = out_H * out_W; const int Stride0 = out_W; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "param.process_type:" << param.process_type; VLOG(2) << "x_dims:" << x_dims; VLOG(2) << "param.x->memory_size():" << param.x->memory_size(); @@ -118,7 +118,7 @@ class LayoutComputeBufferChwToImageDefault status = kernel.setArg(++arg_idx, static_cast(Stride2)); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3] << " " << (new_dims[0] * new_dims[2]); #endif @@ -186,7 +186,7 @@ class LayoutComputeImageDefaultToBufferChw new_dims[4 - x_dims.size() + j] = x_dims[j]; } -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "param.process_type:" << param.process_type; VLOG(2) << "x_dims:" << x_dims; VLOG(2) << "param.x->memory_size():" << param.x->memory_size(); @@ -228,7 +228,7 @@ class LayoutComputeImageDefaultToBufferChw CL_CHECK_FATAL(status); status = kernel.setArg(++arg_idx, static_cast(C)); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(2) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3] << " " << (new_dims[0] * new_dims[2]); #endif diff --git a/lite/kernels/opencl/lrn_image_compute.cc b/lite/kernels/opencl/lrn_image_compute.cc index 91e94fd4a508bee169f9030aa033136b13607382..8e70189b8842045b0e67a5d32b233e8746cf60a2 100644 --- a/lite/kernels/opencl/lrn_image_compute.cc +++ b/lite/kernels/opencl/lrn_image_compute.cc @@ -65,7 +65,7 @@ class LrnImageCompute : public KernelLitedims(); auto in_dims = x->dims(); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "x->target(): " << TargetToStr(x->target()); VLOG(4) << "out->target(): " << TargetToStr(out->target()); VLOG(4) << "x->dims(): " << in_dims; @@ -84,7 +84,7 @@ class LrnImageCompute : public KernelLitemutable_data( out_image_shape["width"], out_image_shape["height"]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG // VLOG(4) << "out_image" << out_img; VLOG(4) << "out_image_shape[w,h]:" << out_image_shape["width"] << " " << out_image_shape["height"]; @@ -102,7 +102,7 @@ class LrnImageCompute : public KernelLite{ static_cast(out_image_shape["width"]), static_cast(out_image_shape["height"])})); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "default_work_size: " << default_work_size[0] << ", " << default_work_size[1] << ", " << default_work_size[3]; #endif @@ -136,7 +136,7 @@ class LrnImageCompute : public KernelLite(out_dims_w)); CL_CHECK_FATAL(status); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << TargetToStr(param.X->target()); VLOG(4) << TargetToStr(param.Out->target()); VLOG(4) << "out_image_shape(w,h):" << out_image_shape["width"] << " " diff --git a/lite/kernels/opencl/pad2d_image_compute.cc b/lite/kernels/opencl/pad2d_image_compute.cc index 3318825f2ba5ebe60340a179f12f37a1b92fb5e6..49489ea3b40d99c00b89cdda6108b512a9f9b6b9 100644 --- a/lite/kernels/opencl/pad2d_image_compute.cc +++ b/lite/kernels/opencl/pad2d_image_compute.cc @@ -73,7 +73,7 @@ class Pad2dCompute : public KernelLitetarget():" << TargetToStr(x->target()); VLOG(4) << "out->target():" << TargetToStr(out->target()); VLOG(4) << "x->dims():" << in_dims; @@ -86,7 +86,7 @@ class Pad2dCompute : public KernelLitemutable_data( out_image_shape["width"], out_image_shape["height"]); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "out_image_shape[w,h]: " << out_image_shape["width"] << " " << out_image_shape["height"]; @@ -104,7 +104,7 @@ class Pad2dCompute : public KernelLite{ static_cast(out_image_shape["width"]), static_cast(out_image_shape["height"])})); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "default_work_size: " << default_work_size[0] << ", " << default_work_size[1] << ", " << default_work_size[2]; #endif @@ -150,7 +150,7 @@ class Pad2dCompute : public KernelLite strides = param.strides; std::vector ksize = param.ksize; -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "global_pooling: " << global_pooling; VLOG(4) << "pooling_type: " << pooling_type; VLOG(4) << "paddings : " << paddings[0] << " " << paddings[1] << " " @@ -75,7 +75,7 @@ class PoolComputeImage2D : public KernelLitemutable_data( out_image_shape.at("width"), out_image_shape.at("height")); -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG VLOG(4) << "out_dims= " << out_dims; #endif const std::vector& default_work_size = DefaultWorkSize( @@ -96,7 +96,7 @@ class ReshapeComputeFloatImage : public KernelLite --arm_abi= --arm_lang= test" echo echo -e "optional argument:" - echo -e "--shutdown_log: (OFF|ON); controls whether to shutdown log, default is ON" + echo -e "--with_log: (OFF|ON); controls whether to print log information, default is ON" echo -e "--build_extra: (OFF|ON); controls whether to publish extra operators and kernels for (sequence-related model such as OCR or NLP)" echo -e "--build_train: (OFF|ON); controls whether to publish training operators and kernels, build_train is only for full_publish library now" echo -e "--build_python: (OFF|ON); controls whether to publish python api lib (ANDROID and IOS is not supported)" @@ -493,8 +493,8 @@ function main { BUILD_TAILOR="${i#*=}" shift ;; - --shutdown_log=*) - SHUTDOWN_LOG="${i#*=}" + --with_log=*) + WITH_LOG="${i#*=}" shift ;; --build_npu=*) diff --git a/lite/tools/build_android.sh b/lite/tools/build_android.sh index b216b958fa6d3a77511d070a3af36a9116608844..2c90bad1ada081c65fdbd04464f2a12be2252e6f 100755 --- a/lite/tools/build_android.sh +++ b/lite/tools/build_android.sh @@ -16,7 +16,7 @@ WITH_JAVA=ON # controls whether to compile cv functions into lib, default is OFF. WITH_CV=OFF # controls whether to hide log information, default is ON. -SHUTDOWN_LOG=ON +WITH_LOG=ON # options of striping lib according to input model. OPTMODEL_DIR="" WITH_STRIP=OFF @@ -144,7 +144,7 @@ function make_tiny_publish_so { local cmake_mutable_options=" -DLITE_BUILD_EXTRA=$WITH_EXTRA \ - -DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \ + -DLITE_WITH_LOG=$WITH_LOG \ -DLITE_BUILD_TAILOR=$WITH_STRIP \ -DLITE_OPTMODEL_DIR=$OPTMODEL_DIR \ -DLITE_WITH_JAVA=$WITH_JAVA \ @@ -193,7 +193,7 @@ function make_full_publish_so { local cmake_mutable_options=" -DLITE_BUILD_EXTRA=$WITH_EXTRA \ - -DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \ + -DLITE_WITH_LOG=$WITH_LOG \ -DLITE_BUILD_TAILOR=$WITH_STRIP \ -DLITE_OPTMODEL_DIR=$OPTMODEL_DIR \ -DLITE_WITH_JAVA=$WITH_JAVA \ @@ -236,7 +236,7 @@ function print_usage { echo -e "| --android_stl: (c++_static|c++_shared|gnu_static|gnu_shared), default is c++_static |" echo -e "| --with_java: (OFF|ON); controls whether to publish java api lib, default is ON |" echo -e "| --with_cv: (OFF|ON); controls whether to compile cv functions into lib, default is OFF |" - echo -e "| --shutdown_log: (OFF|ON); controls whether to hide log information, default is ON |" + echo -e "| --with_log: (OFF|ON); controls whether to print log information, default is ON |" echo -e "| --with_extra: (OFF|ON); controls whether to publish extra operators and kernels for (sequence-related model such as OCR or NLP) |" echo -e "| |" echo -e "| arguments of striping lib according to input model:(armv8, gcc, c++_static) |" @@ -315,8 +315,8 @@ function main { shift ;; # ON or OFF, default ON - --shutdown_log=*) - SHUTDOWN_LOG="${i#*=}" + --with_log=*) + WITH_LOG="${i#*=}" shift ;; # compiling lib which can operate on opencl and cpu. diff --git a/lite/tools/build_ios.sh b/lite/tools/build_ios.sh index afb766ff1f60816d4e70efe929174f92eb5c3923..219e9bb70932e5cb63c47b661366253f63f6f3d3 100755 --- a/lite/tools/build_ios.sh +++ b/lite/tools/build_ios.sh @@ -11,7 +11,7 @@ WITH_EXTRA=OFF # controls whether to compile cv functions into lib, default is OFF. WITH_CV=OFF # controls whether to hide log information, default is ON. -SHUTDOWN_LOG=ON +WITH_LOG=ON # absolute path of Paddle-Lite. workspace=$PWD/$(dirname $0)/../../ # options of striping lib according to input model. @@ -67,7 +67,7 @@ function make_ios { -DLITE_WITH_OPENMP=OFF \ -DWITH_ARM_DOTPROD=OFF \ -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \ - -DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \ + -DLITE_WITH_LOG=$WITH_LOG \ -DLITE_BUILD_TAILOR=$WITH_STRIP \ -DLITE_OPTMODEL_DIR=$OPTMODEL_DIR \ -DARM_TARGET_ARCH_ABI=$abi \ @@ -94,7 +94,7 @@ function print_usage { echo -e "| optional argument: |" echo -e "| --arm_abi: (armv8|armv7), default is armv8 |" echo -e "| --with_cv: (OFF|ON); controls whether to compile cv functions into lib, default is OFF |" - echo -e "| --shutdown_log: (OFF|ON); controls whether to hide log information, default is ON |" + echo -e "| --with_log: (OFF|ON); controls whether to print log information, default is ON |" echo -e "| --with_extra: (OFF|ON); controls whether to publish extra operators and kernels for (sequence-related model such as OCR or NLP) |" echo -e "| |" echo -e "| arguments of striping lib according to input model:(armv8, gcc, c++_static) |" @@ -136,8 +136,8 @@ function main { WITH_STRIP="${i#*=}" shift ;; - --shutdown_log=*) - SHUTDOWN_LOG="${i#*=}" + --with_log=*) + WITH_LOG="${i#*=}" shift ;; help) diff --git a/lite/tools/build_linux.sh b/lite/tools/build_linux.sh index 202f8a8406402a6d86f4d9a82df88d2612eed374..d17760b9e40f7f91803f92d6b1f6b4547b963ff8 100755 --- a/lite/tools/build_linux.sh +++ b/lite/tools/build_linux.sh @@ -52,12 +52,13 @@ readonly CMAKE_COMMON_OPTIONS="-DWITH_LITE=ON \ -DWITH_TESTING=OFF" # mutable options for linux compiling. function init_cmake_mutable_options { + cmake_mutable_options="-DARM_TARGET_ARCH_ABI=$ARCH \ -DARM_TARGET_LANG=$TOOLCHAIN \ -DLITE_BUILD_EXTRA=$WITH_EXTRA \ -DLITE_WITH_PYTHON=$WITH_PYTHON \ -DLITE_WITH_CV=$WITH_CV \ - -DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \ + -DLITE_WITH_LOG=$WITH_LOG \ -DLITE_BUILD_TAILOR=$WITH_STRIP \ -DLITE_OPTMODEL_DIR=$OPTMODEL_DIR \ -DLITE_WITH_OPENCL=$WITH_OPENCL \ diff --git a/lite/tools/build_npu.sh b/lite/tools/build_npu.sh index 1515cfcdd3e69391b4d1a96688c7dc75f40e6dc2..bbfb71deebed23ac205ce3e4e8b23d2a5d312f5b 100755 --- a/lite/tools/build_npu.sh +++ b/lite/tools/build_npu.sh @@ -11,7 +11,7 @@ TARGET_NAME="test_subgraph_pass" # default target BUILD_EXTRA=OFF # ON(with sequence ops)/OFF WITH_JAVA=ON # ON(build jar and jni so)/OFF WITH_TESTING=ON # ON/OFF -SHUTDOWN_LOG=OFF # ON(disable logging)/OFF +WITH_LOG=ON # ON(disable logging)/OFF ON_TINY_PUBLISH=OFF # ON(tiny publish)/OFF(full publish) function print_usage { @@ -76,7 +76,7 @@ function build_npu { fi if [[ "${ON_TINY_PUBLISH}" == "ON" ]]; then WITH_TESTING=OFF - SHUTDOWN_LOG=ON + WITH_LOG=OFF publish_dir="tiny_publish" else publish_dir="full_publish" @@ -99,7 +99,7 @@ function build_npu { -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \ -DWITH_TESTING=${WITH_TESTING} \ -DLITE_WITH_JAVA=${WITH_JAVA} \ - -DLITE_SHUTDOWN_LOG=${SHUTDOWN_LOG} \ + -DLITE_WITH_LOG=${WITH_LOG} \ -DLITE_WITH_NPU=ON \ -DLITE_ON_TINY_PUBLISH=${ON_TINY_PUBLISH} \ -DANDROID_API_LEVEL=24 \ diff --git a/lite/tools/build_rknpu.sh b/lite/tools/build_rknpu.sh index aa2fb5a124077b43f65537ab12715602ab1fe6b8..aed406db0979ca945732364f5bdc93afb8dd3c1c 100755 --- a/lite/tools/build_rknpu.sh +++ b/lite/tools/build_rknpu.sh @@ -8,8 +8,8 @@ ARM_LANG="gcc" # gcc only yet DDK_ROOT="$(pwd)/rknpu" TARGET_NAME="test_subgraph_pass" # default target BUILD_EXTRA=OFF # ON(with sequence ops)/OFF -WITH_TESTING=ON # ON/OFF -SHUTDOWN_LOG=OFF # ON(disable logging)/OFF +WITH_TESTING=ON # ON/OFF +WITH_LOG=ON # ON(disable logging)/OFF ON_TINY_PUBLISH=OFF # ON(tiny publish)/OFF(full publish) function print_usage { @@ -65,7 +65,7 @@ function build_npu { local publish_dir if [[ "${ON_TINY_PUBLISH}" == "ON" ]]; then WITH_TESTING=OFF - SHUTDOWN_LOG=ON + WITH_LOG=OFF publish_dir="tiny_publish" else publish_dir="full_publish" @@ -89,7 +89,7 @@ function build_npu { -DWITH_ARM_DOTPROD=ON \ -DLITE_BUILD_EXTRA=${BUILD_EXTRA} \ -DWITH_TESTING=${WITH_TESTING} \ - -DLITE_SHUTDOWN_LOG=${SHUTDOWN_LOG} \ + -DLITE_WITH_LOG=${WITH_LOG} \ -DLITE_ON_TINY_PUBLISH=${ON_TINY_PUBLISH} \ -DARM_TARGET_OS=${ARM_OS} \ -DARM_TARGET_ARCH_ABI=${ARM_ABI} \ diff --git a/lite/tools/ci_build.sh b/lite/tools/ci_build.sh index a5dc2b741d2d3d5fdd2f08d13b7dc483a3065b0e..270c3cf79c0dc498f0f792a32442130822545635 100755 --- a/lite/tools/ci_build.sh +++ b/lite/tools/ci_build.sh @@ -118,7 +118,7 @@ function cmake_opencl { -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \ -DWITH_TESTING=ON \ -DLITE_BUILD_EXTRA=ON \ - -DLITE_SHUTDOWN_LOG=OFF \ + -DLITE_WITH_LOG=ON \ -DLITE_WITH_CV=OFF \ -DARM_TARGET_OS=$1 -DARM_TARGET_ARCH_ABI=$2 -DARM_TARGET_LANG=$3 } @@ -653,7 +653,7 @@ function build_ios { -DLITE_WITH_ARM=ON \ -DWITH_TESTING=OFF \ -DLITE_WITH_JAVA=OFF \ - -DLITE_SHUTDOWN_LOG=ON \ + -DLITE_WITH_LOG=OFF \ -DLITE_ON_TINY_PUBLISH=ON \ -DLITE_WITH_OPENMP=OFF \ -DWITH_ARM_DOTPROD=OFF \ @@ -1000,7 +1000,7 @@ function mobile_publish { -DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \ -DWITH_TESTING=OFF \ -DLITE_WITH_JAVA=ON \ - -DLITE_SHUTDOWN_LOG=ON \ + -DLITE_WITH_LOG=OFF \ -DLITE_ON_TINY_PUBLISH=ON \ -DARM_TARGET_OS=${os} -DARM_TARGET_ARCH_ABI=${abi} -DARM_TARGET_LANG=${lang} diff --git a/lite/utils/CMakeLists.txt b/lite/utils/CMakeLists.txt index ec5811882966cab828148760f7924cf33f25cf94..573efcad9a0f11c6b944663afd88be1d6042013f 100644 --- a/lite/utils/CMakeLists.txt +++ b/lite/utils/CMakeLists.txt @@ -3,7 +3,7 @@ # else() # endif() -if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK OR LITE_ON_MODEL_OPTIMIZE_TOOL OR LITE_SHUTDOWN_LOG) +if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK OR LITE_ON_MODEL_OPTIMIZE_TOOL OR (NOT LITE_WITH_LOG)) lite_cc_library(logging SRCS logging.cc) set(utils_DEPS logging) lite_cc_test(test_logging SRCS logging_test.cc DEPS ${utils_DEPS}) diff --git a/lite/utils/cp_logging.h b/lite/utils/cp_logging.h index a9970c72b033ff760e2237ce45e7b75d77d8835a..faaf25f6562cb1ecb408dbe8a9da806ed4dfdffa 100644 --- a/lite/utils/cp_logging.h +++ b/lite/utils/cp_logging.h @@ -14,7 +14,7 @@ #pragma once #if defined(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) || \ - defined(LITE_ON_MODEL_OPTIMIZE_TOOL) || defined(LITE_SHUTDOWN_LOG) + defined(LITE_ON_MODEL_OPTIMIZE_TOOL) || !defined(LITE_WITH_LOG) #include "lite/utils/logging.h" #else // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK #include diff --git a/lite/utils/logging.cc b/lite/utils/logging.cc index e9ee5861baca85966ce53ac1570d7ebc23a002cb..920aa58fe3d2df92d9b4b0104d9fab5094611331 100644 --- a/lite/utils/logging.cc +++ b/lite/utils/logging.cc @@ -22,7 +22,7 @@ #if defined(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) || \ defined(LITE_ON_MODEL_OPTIMIZE_TOOL) -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG namespace paddle { namespace lite { @@ -60,5 +60,5 @@ void gen_log(STL::ostream& log_stream_, } // namespace lite } // namespace paddle -#endif // LITE_SHUTDOWN_LOG +#endif // LITE_WITH_LOG #endif // LITE_WITH_LIGHT_FRAMEWORK diff --git a/lite/utils/logging.h b/lite/utils/logging.h index 97eb916ff15db35c0cd3a7cd240483f83e1a5a27..d05624d7c6eba856ac76cf7a78e8c231fc7dfbeb 100644 --- a/lite/utils/logging.h +++ b/lite/utils/logging.h @@ -46,7 +46,7 @@ // NOLINTFILE() // LOG() -#ifdef LITE_SHUTDOWN_LOG +#ifndef LITE_WITH_LOG #define LOG(status) LOG_##status #define LOG_INFO paddle::lite::Voidify() #define LOG_ERROR LOG_INFO @@ -62,7 +62,7 @@ paddle::lite::LogMessageFatal(__FILE__, __FUNCTION__, __LINE__) #endif -#ifdef LITE_SHUTDOWN_LOG +#ifndef LITE_WITH_LOG #define VLOG(level) paddle::lite::Voidify() #else // VLOG() @@ -72,7 +72,7 @@ // CHECK() // clang-format off -#ifdef LITE_SHUTDOWN_LOG +#ifndef LITE_WITH_LOG #define CHECK(x) if (!(x)) paddle::lite::VoidifyFatal() #define _CHECK_BINARY(x, cmp, y) CHECK(x cmp y) #else @@ -91,7 +91,7 @@ namespace paddle { namespace lite { -#ifndef LITE_SHUTDOWN_LOG +#ifdef LITE_WITH_LOG void gen_log(STL::ostream& log_stream_, const char* file, const char* func, diff --git a/lite/utils/replace_stl/stream.cc b/lite/utils/replace_stl/stream.cc index aadee7e2695a61ca2141be0e375fb829f6d5663e..081006be6711d5d26c405181fd6d86e89c9e4e95 100644 --- a/lite/utils/replace_stl/stream.cc +++ b/lite/utils/replace_stl/stream.cc @@ -37,7 +37,7 @@ void ostream::pad(const std::string& text) { } } -#ifdef LITE_SHUTDOWN_LOG +#ifndef LITE_WITH_LOG #define ADD_DATA_AS_STRING(data_, obj_) #else #define ADD_DATA_AS_STRING(data_, obj_) \