未验证 提交 3a959784 编写于 作者: H huzhiqiang 提交者: GitHub

[Compile] change compiling option `SHUTDOWN_LOG` into `WITH_LOG` #3514

上级 e47f1607
......@@ -97,7 +97,7 @@ lite_option(LITE_WITH_FPGA "Enable FPGA support in lite" OFF)
lite_option(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK "Enable light-weight framework" OFF)
lite_option(LITE_WITH_PROFILE "Enable profile mode in lite framework" OFF)
lite_option(LITE_WITH_PRECISION_PROFILE "Enable precision profile in profile mode ON in lite" OFF)
lite_option(LITE_SHUTDOWN_LOG "Shutdown log system or not." OFF)
lite_option(LITE_WITH_LOG "Enable log printing or not." ON)
lite_option(LITE_ON_TINY_PUBLISH "Publish tiny predictor lib." OFF)
lite_option(LITE_ON_MODEL_OPTIMIZE_TOOL "Build the model optimize tool" OFF)
# publish options
......
......@@ -186,8 +186,8 @@ if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK)
add_definitions("-DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK")
endif()
if (LITE_SHUTDOWN_LOG)
add_definitions("-DLITE_SHUTDOWN_LOG")
if (LITE_WITH_LOG)
add_definitions("-DLITE_WITH_LOG")
endif()
if (LITE_ON_TINY_PUBLISH)
......
......@@ -131,8 +131,8 @@ $ git clone https://github.com/airockchip/rknpu_ddk.git
```
- 编译full_publish and tiny_publish for armv8(注意:RKNPU_DDK只支持armv8)
```shell
$ ./lite/tools/build.sh --arm_os=armlinux --arm_abi=armv8 --arm_lang=gcc --build_extra=ON --shutdown_log=OFF --build_rknpu=ON --rknpu_ddk_root=./rknpu_ddk full_publish
$ ./lite/tools/build.sh --arm_os=armlinux --arm_abi=armv8 --arm_lang=gcc --build_extra=ON --shutdown_log=OFF --build_rknpu=ON --rknpu_ddk_root=./rknpu_ddk tiny_publish
$ ./lite/tools/build.sh --arm_os=armlinux --arm_abi=armv8 --arm_lang=gcc --build_extra=ON --with_log=ON --build_rknpu=ON --rknpu_ddk_root=./rknpu_ddk full_publish
$ ./lite/tools/build.sh --arm_os=armlinux --arm_abi=armv8 --arm_lang=gcc --build_extra=ON --with_log=ON --build_rknpu=ON --rknpu_ddk_root=./rknpu_ddk tiny_publish
```
- 将编译生成的build.lite.armlinux.armv8.gcc/inference_lite_lib.armlinux.armv8.rknpu/cxx/include替换PaddleLite-armlinux-demo/Paddle-Lite/include目录;
- 将编译生成的build.lite.armlinux.armv8.gcc/inference_lite_lib.armlinux.armv8.rknpu/cxx/lib/libpaddle_light_api_shared.so替换PaddleLite-armlinux-demo/Paddle-Lite/libs/armv8/libpaddle_light_api_shared.so文件。
......
if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK OR LITE_SHUTDOWN_LOG)
if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK OR (NOT LITE_WITH_LOG))
lite_cc_library(place SRCS paddle_place.cc DEPS logging)
else()
lite_cc_library(place SRCS paddle_place.cc DEPS glog)
......
......@@ -32,7 +32,7 @@ const char* opencl_error_to_str(cl_int error);
__FILE__, \
__LINE__); \
}
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
#define CL_CHECK_FATAL(err_code__) \
if (err_code__ != CL_SUCCESS) { \
LOG(FATAL) << string_format( \
......
......@@ -54,7 +54,7 @@ git checkout release/v2.3
--arm_lang=gcc \
--android_stl=c++_static \
--build_extra=ON \
--shutdown_log=OFF \
--with_log=ON \
full_publish
```
......
......@@ -24,7 +24,7 @@ cmake .. \
-DLITE_WITH_ARM=ON \
-DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \
-DWITH_TESTING=OFF \
-DLITE_SHUTDOWN_LOG=ON \
-DLITE_WITH_LOG=OFF \
-DLITE_ON_TINY_PUBLISH=ON \
-DARM_TARGET_OS=android -DARM_TARGET_ARCH_ABI=armv8 -DARM_TARGET_LANG=gcc
......
......@@ -39,7 +39,7 @@ class ActivationComputeImageDefault
void PrepareForRun() override {
act_param_ = param_.get_mutable<param_t>();
int act_type = static_cast<int>(act_param_->active_type);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(1) << "ActivationTypeToStr(act_param_->active_type):"
<< ActivationTypeToStr(act_param_->active_type);
#endif
......@@ -72,7 +72,7 @@ class ActivationComputeImageDefault
LOG(FATAL) << "This act type:" << act_type << " doesn't support.";
return;
}
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(1) << "kernel_func_name_:" << kernel_func_name_;
#endif
......@@ -129,7 +129,7 @@ class ActivationComputeImageDefault
status = kernel.setArg(3, scale_);
CL_CHECK_FATAL(status);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
const auto& x_dims = act_param_->X->dims();
const auto& y_dims = act_param_->Out->dims(); // useless: check dim only
VLOG(4) << TargetToStr(act_param_->X->target());
......
......@@ -79,7 +79,7 @@ class BilinearInterpImageCompute
int out_h = out_dims[2];
int out_w = out_dims[3];
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "x->target():" << TargetToStr(x->target());
VLOG(4) << "out->target():" << TargetToStr(out->target());
VLOG(4) << "x->dims():" << in_dims;
......@@ -92,7 +92,7 @@ class BilinearInterpImageCompute
auto* out_img = out->mutable_data<half_t, cl::Image2D>(
out_image_shape["width"], out_image_shape["height"]);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
// VLOG(4) << "x_image: " << x_img;
// VLOG(4) << "out_image: " << out_img;
VLOG(4) << "out_image_shape[w,h]: " << out_image_shape["width"] << " "
......@@ -114,7 +114,7 @@ class BilinearInterpImageCompute
DDim(std::vector<DDim::value_type>{
static_cast<int64_t>(out_image_shape["width"]),
static_cast<int64_t>(out_image_shape["height"])}));
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "default_work_size: " << default_work_size[0] << ", "
<< default_work_size[1] << ", " << default_work_size[2];
#endif
......@@ -150,7 +150,7 @@ class BilinearInterpImageCompute
nullptr,
nullptr);
CL_CHECK_FATAL(status);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "global_work_size:[2D]:" << global_work_size[0] << " "
<< global_work_size[1] << " " << global_work_size[2];
#endif
......
......@@ -61,7 +61,7 @@ class BoxCoderComputeImage : public KernelLite<TARGET(kOpenCL),
boxcoder_param_->proposals->mutable_data<half_t, cl::Image2D>(
image_shape["width"], image_shape["height"]);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "boxcoder input shape: ";
#endif
......@@ -93,7 +93,7 @@ class BoxCoderComputeImage : public KernelLite<TARGET(kOpenCL),
int out_C = new_dims[1];
int out_H = new_dims[2];
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << TargetToStr(boxcoder_param_->proposals->target());
VLOG(4) << "output shape: " << out_dims[0] << ", " << out_dims[1] << ", "
<< out_dims[2] << ", " << out_dims[3];
......@@ -130,7 +130,7 @@ class BoxCoderComputeImage : public KernelLite<TARGET(kOpenCL),
nullptr);
CL_CHECK_FATAL(status);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "global_work_size:[2D]:" << global_work_size[0] << " "
<< global_work_size[1];
#endif
......
......@@ -125,7 +125,7 @@ class ConcatComputeImage : public KernelLite<TARGET(kOpenCL),
int arg_idx = 0;
int width = inputs[0]->dims()[inputs[0]->dims().size() - 1];
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "concat input shape: ";
for (size_t i = 0; i < inputs.size(); i++) {
VLOG(4) << "inputs [" << i << "]"
......@@ -149,7 +149,7 @@ class ConcatComputeImage : public KernelLite<TARGET(kOpenCL),
x_dims[x_dims.size() - 1]),
static_cast<cl::size_type>(image_shape["height"])};
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << TargetToStr(param.output->target());
VLOG(4) << "image_shape(w,h):" << image_shape["width"] << " "
<< image_shape["height"];
......@@ -204,7 +204,7 @@ class ConcatComputeImage : public KernelLite<TARGET(kOpenCL),
image_shape = InitImageDimInfoWith(in_dims);
auto* x_buf = inputs[i]->data<half_t, cl::Image2D>();
int in_w = in_dims[in_dims.size() - 1];
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "image_shape(w,h):" << image_shape["width"] << " "
<< image_shape["height"];
#endif
......
......@@ -541,12 +541,12 @@ void ConvImageCompute::Conv2d1x1opt(bool is_turn) {
int input_c = input_dims[1];
auto dilations = *param.dilations;
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
// VLOG(4) << "out_image: " << out_image;
VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << ","
<< global_work_size_[1] << "," << global_work_size_[2] << "}";
#endif
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "============ conv2d_1x1 params ============";
VLOG(4) << "input_image_shape: " << input_image_shape["width"] << ","
<< input_image_shape["height"];
......@@ -846,7 +846,7 @@ void ConvImageCompute::Conv2d3x3opt(bool is_turn) {
const bool is_element_wise_bias =
has_bias && param.output->dims() == param.bias->dims();
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "============ conv2d params ============";
// VLOG(4) << "input_image_shape: " << input_image_shape["width"] << ","
// << input_image_shape["height"];
......@@ -893,7 +893,7 @@ void ConvImageCompute::Conv2d3x3opt(bool is_turn) {
status = kernel.setArg(++arg_idx, *filter_image);
CL_CHECK_FATAL(status);
if (has_bias) {
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "set bias_image: ";
#endif
status = kernel.setArg(++arg_idx, *bias_image);
......@@ -922,7 +922,7 @@ void ConvImageCompute::Conv2d3x3opt(bool is_turn) {
status = kernel.setArg(++arg_idx, output_height);
CL_CHECK_FATAL(status);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
// VLOG(4) << "out_image: " << out_image;
VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << ","
<< global_work_size_[1] << "," << global_work_size_[2] << "}";
......@@ -975,7 +975,7 @@ void ConvImageCompute::Conv2d5x5(bool is_turn) {
int input_c = input_dims[1];
auto dilations = *param.dilations;
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "============ conv2d params ============";
VLOG(4) << "input_image_shape: " << input_image_shape["width"] << ","
<< input_image_shape["height"];
......@@ -1025,7 +1025,7 @@ void ConvImageCompute::Conv2d5x5(bool is_turn) {
status = kernel.setArg(++arg_idx, *filter_image);
CL_CHECK_FATAL(status);
if (has_bias) {
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "set bias_image: ";
#endif
status = kernel.setArg(++arg_idx, *bias_image);
......@@ -1052,7 +1052,7 @@ void ConvImageCompute::Conv2d5x5(bool is_turn) {
status = kernel.setArg(++arg_idx, output_height);
CL_CHECK_FATAL(status);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
// VLOG(4) << "out_image: " << out_image;
VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << ","
<< global_work_size_[1] << "," << global_work_size_[2] << "}";
......@@ -1103,7 +1103,7 @@ void ConvImageCompute::Conv2d5x5opt(bool is_turn) {
has_bias && param.output->dims() == param.bias->dims();
// default_work_size[2] = h_blk;
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "============ conv2d params ============";
// VLOG(4) << "input_image_shape: " << input_image_shape["width"] << ","
// << input_image_shape["height"];
......@@ -1223,7 +1223,7 @@ void ConvImageCompute::Conv2d7x7(bool is_turn) {
int input_c = input_dims[1];
auto dilations = *param.dilations;
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "============ conv2d params ============";
VLOG(4) << "input_image_shape: " << input_image_shape["width"] << ","
<< input_image_shape["height"];
......@@ -1273,7 +1273,7 @@ void ConvImageCompute::Conv2d7x7(bool is_turn) {
status = kernel.setArg(++arg_idx, *filter_image);
CL_CHECK_FATAL(status);
if (has_bias) {
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "set bias_image: ";
#endif
status = kernel.setArg(++arg_idx, *bias_image);
......@@ -1300,7 +1300,7 @@ void ConvImageCompute::Conv2d7x7(bool is_turn) {
status = kernel.setArg(++arg_idx, output_height);
CL_CHECK_FATAL(status);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
// VLOG(4) << "out_image: " << out_image;
VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << ","
<< global_work_size_[1] << "," << global_work_size_[2] << "}";
......@@ -1349,7 +1349,7 @@ void ConvImageCompute::Conv2d7x7opt(bool is_turn) {
const bool is_element_wise_bias =
has_bias && param.output->dims() == param.bias->dims();
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "============ conv2d 7x7 params ============";
// VLOG(4) << "input_image_shape: " << input_image_shape["width"] << ","
// << input_image_shape["height"];
......@@ -1479,7 +1479,7 @@ void ConvImageCompute::DepthwiseConv2d3x3s1(bool is_turn) {
const cl::Image2D* bias_image = nullptr;
if (has_bias) {
bias_image = bias_gpu_image_->data<half_t, cl::Image2D>();
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "set bias_image: ";
#endif
status = kernel.setArg(++arg_idx, *bias_image);
......@@ -1546,7 +1546,7 @@ void ConvImageCompute::DepthwiseConv2d3x3(bool is_turn) {
auto kernel = kernel_;
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "setArg";
VLOG(4) << "strides = " << strides[0];
VLOG(4) << "offset = " << offset;
......@@ -1576,7 +1576,7 @@ void ConvImageCompute::DepthwiseConv2d3x3(bool is_turn) {
const cl::Image2D* bias_image = nullptr;
if (has_bias) {
bias_image = bias_gpu_image_->data<half_t, cl::Image2D>();
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "set bias_image: ";
#endif
status = kernel.setArg(++arg_idx, *bias_image);
......@@ -1649,7 +1649,7 @@ void ConvImageCompute::DepthwiseConv2d(bool is_turn) {
int input_c = input_dims[1];
auto dilations = *param.dilations;
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "============ depthwise conv2d params ============";
VLOG(4) << "input_image_shape: " << input_image_shape["width"] << ","
<< input_image_shape["height"];
......@@ -1700,7 +1700,7 @@ void ConvImageCompute::DepthwiseConv2d(bool is_turn) {
status = kernel.setArg(++arg_idx, *filter_image);
CL_CHECK_FATAL(status);
if (has_bias) {
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "set bias_image: ";
#endif
status = kernel.setArg(++arg_idx, *bias_image);
......@@ -1731,7 +1731,7 @@ void ConvImageCompute::DepthwiseConv2d(bool is_turn) {
status = kernel.setArg(++arg_idx, filter_height);
CL_CHECK_FATAL(status);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "global_work_size_[3D]: {" << global_work_size_[0] << ","
<< global_work_size_[1] << "," << global_work_size_[2] << "}";
#endif
......
......@@ -43,7 +43,7 @@ void ElementwiseAddCompute::Run() {
STL::stringstream kernel_key;
kernel_key << kernel_func_name_ << build_options_ << time_stamp_;
auto kernel = context.cl_context()->GetKernel(kernel_key.str());
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << TargetToStr(ele_param_->X->target());
VLOG(4) << TargetToStr(ele_param_->Y->target());
VLOG(4) << TargetToStr(ele_param_->Out->target());
......@@ -86,7 +86,7 @@ void ElementwiseAddCompute::UpdateParams() {
for (int i = static_cast<int>(y_dims.size() + axis); i < x_dims.size(); ++i) {
num_ *= x_dims[i];
}
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "axis: " << axis;
VLOG(4) << "batch: " << batch_;
VLOG(4) << "channels: " << channels_;
......
......@@ -83,7 +83,7 @@ void ElementwiseAddImageCompute::ReInitWhenNeeded() {
void ElementwiseAddImageCompute::GetGlobalWorkSize() {
global_work_size_ = cl::NDRange{static_cast<cl::size_type>(x_img_shape_[0]),
static_cast<cl::size_type>(x_img_shape_[1])};
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "global_work_size:[2D]:" << x_img_shape_[0] << " "
<< x_img_shape_[1];
#endif
......@@ -102,7 +102,7 @@ void ElementwiseAddImageCompute::Run() {
auto* out_img = out->mutable_data<half_t, cl::Image2D>(out_img_shape_[0],
out_img_shape_[1]);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "x->target():" << TargetToStr(x->target());
VLOG(4) << "y->target():" << TargetToStr(y->target());
VLOG(4) << "out->target():" << TargetToStr(out->target());
......@@ -129,7 +129,7 @@ void ElementwiseAddImageCompute::Run() {
} else if (y_dims.size() == 1) {
if (axis == x_dims.size() - 1 || axis == x_dims.size() - 3) {
const int tensor_w = x_dims[x_dims.size() - 1];
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "tensor_w:" << tensor_w;
#endif
status = kernel.setArg(0, *x_img);
......
......@@ -85,7 +85,7 @@ class ElementwiseMulImageCompute
auto* y = ele_param_->Y;
auto* out = ele_param_->Out;
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "x->target():" << TargetToStr(x->target());
VLOG(4) << "y->target():" << TargetToStr(y->target());
VLOG(4) << "out->target():" << TargetToStr(out->target());
......@@ -108,7 +108,7 @@ class ElementwiseMulImageCompute
auto* out_img = out->mutable_data<half_t, cl::Image2D>(out_img_shape[0],
out_img_shape[1]);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "x_img_shape[w,h]:" << x_img_width << " " << x_img_height;
VLOG(4) << "y_img_shape[w,h]:" << y_img_shape[0] << " " << y_img_shape[1];
VLOG(4) << "out_img_shape[w,h]:" << out_img_shape[0] << " "
......@@ -194,7 +194,7 @@ class ElementwiseMulImageCompute
nullptr,
nullptr);
CL_CHECK_FATAL(status);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "global_work_size:[2D]:" << x_img_width << " " << x_img_height;
#endif
}
......
......@@ -64,7 +64,7 @@ void ElementwiseSubImageCompute::Run() {
auto* out = ele_param_->Out;
auto axis = ele_param_->axis;
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "x->target():" << TargetToStr(x->target());
VLOG(4) << "y->target():" << TargetToStr(y->target());
VLOG(4) << "out->target():" << TargetToStr(out->target());
......@@ -87,7 +87,7 @@ void ElementwiseSubImageCompute::Run() {
auto* out_img = out->mutable_data<half_t, cl::Image2D>(out_img_shape[0],
out_img_shape[1]);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "x_img_shape[w,h]:" << x_img_width << " " << x_img_height;
VLOG(4) << "y_img_shape[w,h]:" << y_img_shape[0] << " " << y_img_shape[1];
VLOG(4) << "out_img_shape[w,h]:" << out_img_shape[0] << " "
......@@ -110,7 +110,7 @@ void ElementwiseSubImageCompute::Run() {
} else if (y_dims.size() == 1) {
if (axis == x->dims().size() - 1 || axis == x->dims().size() - 3) {
int tensor_w = x->dims()[x->dims().size() - 1];
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "tensor_w:" << tensor_w;
#endif
cl_int status = kernel.setArg(arg_idx, *x_img);
......@@ -134,7 +134,7 @@ void ElementwiseSubImageCompute::Run() {
auto global_work_size = cl::NDRange{static_cast<cl::size_type>(x_img_width),
static_cast<cl::size_type>(x_img_height)};
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "global_work_size:[2D]:" << x_img_width << " " << x_img_height;
#endif
......
......@@ -52,7 +52,7 @@ class FcCompute
n_ = w_dims[1];
CHECK_EQ(k_, static_cast<int>(w_dims[0]));
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "x_dims:" << x_dims[0] << " " << x_dims[1] << " " << x_dims[2]
<< " " << x_dims[3];
VLOG(4) << "w_dims:" << w_dims[0] << " " << w_dims[1] << " " << w_dims[2]
......@@ -66,7 +66,7 @@ class FcCompute
} else { // gemm
kernel_func_name_ = "fc_gemm_4x4";
}
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(1) << "kernel_func_name_:" << kernel_func_name_;
#endif
......
......@@ -80,7 +80,7 @@ class GridSamplerImageCompute : public KernelLite<TARGET(kOpenCL),
cl::NDRange{static_cast<cl::size_type>(default_work_size[0]),
static_cast<cl::size_type>(default_work_size[1]),
static_cast<cl::size_type>(default_work_size[2] / 4)};
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "default_work_size: " << default_work_size[0] << ", "
<< default_work_size[1] << ", " << default_work_size[2];
VLOG(4) << "global_work_size_:[2D]:" << global_work_size_[0] << " "
......@@ -102,7 +102,7 @@ class GridSamplerImageCompute : public KernelLite<TARGET(kOpenCL),
auto* out_img = out->mutable_data<half_t, cl::Image2D>(out_img_shape_[0],
out_img_shape_[1]);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
auto in_dims = x->dims();
VLOG(4) << "x->target():" << TargetToStr(x->target());
VLOG(4) << "out->target():" << TargetToStr(out->target());
......
......@@ -96,7 +96,7 @@ class InstanceNormImageCompute : public KernelLite<TARGET(kOpenCL),
static_cast<cl::size_type>(lws1),
static_cast<cl::size_type>(lws2)};
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "global_work_size:" << static_cast<int>(global_work_size[0])
<< " " << static_cast<int>(global_work_size[1]) << " "
<< static_cast<int>(global_work_size[2]);
......@@ -200,7 +200,7 @@ class InstanceNormImageCompute : public KernelLite<TARGET(kOpenCL),
int in_h = in_dims[2];
int in_w = in_dims[3];
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "x->target():" << TargetToStr(x->target());
VLOG(4) << "out->target():" << TargetToStr(out->target());
VLOG(4) << "x->dims():" << in_dims;
......@@ -211,7 +211,7 @@ class InstanceNormImageCompute : public KernelLite<TARGET(kOpenCL),
auto* out_img = out->mutable_data<half_t, cl::Image2D>(
out_image_shape["width"], out_image_shape["height"]);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "out_image_shape[w,h]: " << out_image_shape["width"] << " "
<< out_image_shape["height"];
......@@ -229,7 +229,7 @@ class InstanceNormImageCompute : public KernelLite<TARGET(kOpenCL),
static_cast<cl::size_type>(group_size_y),
static_cast<cl::size_type>(1)};
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "local_work_size:[2D]:" << local_work_size[0] << " "
<< local_work_size[1] << " " << local_work_size[2];
VLOG(4) << "global_work_size:[2D]:" << global_work_size[0] << " "
......
......@@ -42,7 +42,7 @@ class IoCopyHostToOpenCLCompute
CHECK(param.x->target() == TARGET(kHost) ||
param.x->target() == TARGET(kARM));
auto mem_size = param.x->memory_size();
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(2) << "param.x->memory_size():" << mem_size;
VLOG(2) << "param.x->dims().size():" << param.x->dims().size();
VLOG(2) << "param.x->dims():" << param.x->dims();
......@@ -87,7 +87,7 @@ class IoCopykOpenCLToHostCompute
CHECK(param.x->target() == TARGET(kOpenCL));
auto mem_size = param.x->memory_size();
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(2) << "copy size " << mem_size;
VLOG(2) << "param.x->dims().size():" << param.x->dims().size();
VLOG(2) << "param.x->dims():" << param.x->dims();
......@@ -106,7 +106,7 @@ class IoCopykOpenCLToHostCompute
auto& context = ctx_->As<OpenCLContext>();
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(2) << "--- Find the sync event for the target cl tensor. ---";
#endif
CLRuntime::Global()->command_queue().finish();
......
......@@ -76,7 +76,7 @@ class LayoutComputeBufferChwToImageDefault
const int Stride1 = out_H * out_W;
const int Stride0 = out_W;
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(2) << "param.process_type:" << param.process_type;
VLOG(2) << "x_dims:" << x_dims;
VLOG(2) << "param.x->memory_size():" << param.x->memory_size();
......@@ -118,7 +118,7 @@ class LayoutComputeBufferChwToImageDefault
status = kernel.setArg(++arg_idx, static_cast<const int>(Stride2));
CL_CHECK_FATAL(status);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(2) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3]
<< " " << (new_dims[0] * new_dims[2]);
#endif
......@@ -186,7 +186,7 @@ class LayoutComputeImageDefaultToBufferChw
new_dims[4 - x_dims.size() + j] = x_dims[j];
}
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(2) << "param.process_type:" << param.process_type;
VLOG(2) << "x_dims:" << x_dims;
VLOG(2) << "param.x->memory_size():" << param.x->memory_size();
......@@ -228,7 +228,7 @@ class LayoutComputeImageDefaultToBufferChw
CL_CHECK_FATAL(status);
status = kernel.setArg(++arg_idx, static_cast<const int>(C));
CL_CHECK_FATAL(status);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(2) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3]
<< " " << (new_dims[0] * new_dims[2]);
#endif
......
......@@ -65,7 +65,7 @@ class LrnImageCompute : public KernelLite<TARGET(kOpenCL),
auto out_dims = out->dims();
auto in_dims = x->dims();
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "x->target(): " << TargetToStr(x->target());
VLOG(4) << "out->target(): " << TargetToStr(out->target());
VLOG(4) << "x->dims(): " << in_dims;
......@@ -84,7 +84,7 @@ class LrnImageCompute : public KernelLite<TARGET(kOpenCL),
auto* out_img = out->mutable_data<half_t, cl::Image2D>(
out_image_shape["width"], out_image_shape["height"]);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
// VLOG(4) << "out_image" << out_img;
VLOG(4) << "out_image_shape[w,h]:" << out_image_shape["width"] << " "
<< out_image_shape["height"];
......@@ -102,7 +102,7 @@ class LrnImageCompute : public KernelLite<TARGET(kOpenCL),
DDim(std::vector<DDim::value_type>{
static_cast<int64_t>(out_image_shape["width"]),
static_cast<int64_t>(out_image_shape["height"])}));
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "default_work_size: " << default_work_size[0] << ", "
<< default_work_size[1] << ", " << default_work_size[3];
#endif
......@@ -136,7 +136,7 @@ class LrnImageCompute : public KernelLite<TARGET(kOpenCL),
nullptr,
nullptr);
CL_CHECK_FATAL(status);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "global_work_size:[2D]:" << global_work_size[0] << " "
<< global_work_size[1] << " " << global_work_size[2];
#endif
......
......@@ -89,7 +89,7 @@ class NearestInterpComputeImageDefault
status = kernel.setArg(++arg_idx, static_cast<const int>(out_dims_w));
CL_CHECK_FATAL(status);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << TargetToStr(param.X->target());
VLOG(4) << TargetToStr(param.Out->target());
VLOG(4) << "out_image_shape(w,h):" << out_image_shape["width"] << " "
......
......@@ -73,7 +73,7 @@ class Pad2dCompute : public KernelLite<TARGET(kOpenCL),
int out_h = out_dims[2];
int out_w = out_dims[3];
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "x->target():" << TargetToStr(x->target());
VLOG(4) << "out->target():" << TargetToStr(out->target());
VLOG(4) << "x->dims():" << in_dims;
......@@ -86,7 +86,7 @@ class Pad2dCompute : public KernelLite<TARGET(kOpenCL),
auto* out_img = out->mutable_data<half_t, cl::Image2D>(
out_image_shape["width"], out_image_shape["height"]);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "out_image_shape[w,h]: " << out_image_shape["width"] << " "
<< out_image_shape["height"];
......@@ -104,7 +104,7 @@ class Pad2dCompute : public KernelLite<TARGET(kOpenCL),
DDim(std::vector<DDim::value_type>{
static_cast<int64_t>(out_image_shape["width"]),
static_cast<int64_t>(out_image_shape["height"])}));
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "default_work_size: " << default_work_size[0] << ", "
<< default_work_size[1] << ", " << default_work_size[2];
#endif
......@@ -150,7 +150,7 @@ class Pad2dCompute : public KernelLite<TARGET(kOpenCL),
nullptr,
nullptr);
CL_CHECK_FATAL(status);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "global_work_size:[2D]:" << global_work_size[0] << " "
<< global_work_size[1] << " " << global_work_size[2];
#endif
......
......@@ -60,7 +60,7 @@ class PoolComputeImage2D : public KernelLite<TARGET(kOpenCL),
std::vector<int> strides = param.strides;
std::vector<int> ksize = param.ksize;
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "global_pooling: " << global_pooling;
VLOG(4) << "pooling_type: " << pooling_type;
VLOG(4) << "paddings : " << paddings[0] << " " << paddings[1] << " "
......@@ -75,7 +75,7 @@ class PoolComputeImage2D : public KernelLite<TARGET(kOpenCL),
}
}
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "in_dims : [" << in_dims.size() << "]" << in_dims[0] << " "
<< in_dims[1] << " " << in_dims[2] << " " << in_dims[3];
VLOG(4) << "out_dims : [" << out_dims.size() << "]" << out_dims[0] << " "
......@@ -103,7 +103,7 @@ class PoolComputeImage2D : public KernelLite<TARGET(kOpenCL),
// VLOG(4) << "x_image" << x_img;
auto out_image_shape = InitImageDimInfoWith(out_dims);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "out_image_shape = " << out_image_shape["width"] << " "
<< out_image_shape["height"];
#endif
......@@ -119,7 +119,7 @@ class PoolComputeImage2D : public KernelLite<TARGET(kOpenCL),
int w = out_dims[3];
int nh = out_dims[0] * out_dims[2];
auto global_work_size = cl::NDRange(c_block, w, nh);
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "global_work_size : [" << 3 << "]" << c_block << " " << w
<< " " << nh << " ";
#endif
......
......@@ -64,7 +64,7 @@ class ReshapeComputeFloatImage : public KernelLite<TARGET(kOpenCL),
InitImageDimInfoWith(out_dims);
cl::Image2D* const out_image = output->mutable_data<half_t, cl::Image2D>(
out_image_shape.at("width"), out_image_shape.at("height"));
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "out_dims= " << out_dims;
#endif
const std::vector<size_t>& default_work_size = DefaultWorkSize(
......@@ -96,7 +96,7 @@ class ReshapeComputeFloatImage : public KernelLite<TARGET(kOpenCL),
int out_Stride1 = out_H * out_W;
int out_Stride2 = out_C * out_H * out_W;
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << "out_C=" << out_C;
VLOG(4) << "out_H=" << out_H;
VLOG(4) << "out_W=" << out_W;
......@@ -115,7 +115,7 @@ class ReshapeComputeFloatImage : public KernelLite<TARGET(kOpenCL),
kernel_key << kernel_func_name_ << build_options_ << time_stamp_;
auto kernel = context.cl_context()->GetKernel(kernel_key.str());
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
VLOG(4) << TargetToStr(x->target());
VLOG(4) << TargetToStr(param.output->target());
#endif
......
......@@ -11,7 +11,7 @@ set BUILD_DIR=%source_path%
set OPTMODEL_DIR=""
set BUILD_TAILOR=OFF
set BUILD_CV=OFF
set SHUTDOWN_LOG=ON
set WITH_LOG=ON
set THIRDPARTY_TAR=https://paddle-inference-dist.bj.bcebos.com/PaddleLite/third-party-05b862.tar.gz
......
......@@ -21,7 +21,7 @@ BUILD_DIR=$(pwd)
OPTMODEL_DIR=""
BUILD_TAILOR=OFF
BUILD_CV=OFF
SHUTDOWN_LOG=ON
WITH_LOG=ON
BUILD_NPU=OFF
NPU_DDK_ROOT="$(pwd)/ai_ddk_lib/" # Download HiAI DDK from https://developer.huawei.com/consumer/cn/hiai/
BUILD_XPU=OFF
......@@ -124,7 +124,7 @@ function make_tiny_publish_so {
-DWITH_TESTING=OFF \
-DLITE_WITH_JAVA=$BUILD_JAVA \
-DLITE_WITH_PYTHON=$BUILD_PYTHON \
-DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \
-DLITE_WITH_LOG=$WITH_LOG \
-DLITE_ON_TINY_PUBLISH=ON \
-DANDROID_STL_TYPE=$android_stl \
-DLITE_BUILD_EXTRA=$BUILD_EXTRA \
......@@ -179,7 +179,7 @@ function make_opencl {
-DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \
-DWITH_TESTING=OFF \
-DLITE_BUILD_EXTRA=$BUILD_EXTRA \
-DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \
-DLITE_WITH_LOG=$WITH_LOG \
-DLITE_WITH_CV=$BUILD_CV \
-DARM_TARGET_OS=$1 -DARM_TARGET_ARCH_ABI=$2 -DARM_TARGET_LANG=$3
......@@ -217,7 +217,7 @@ function make_full_publish_so {
-DWITH_TESTING=OFF \
-DLITE_WITH_JAVA=$BUILD_JAVA \
-DLITE_WITH_PYTHON=$BUILD_PYTHON \
-DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \
-DLITE_WITH_LOG=$WITH_LOG \
-DANDROID_STL_TYPE=$android_stl \
-DLITE_BUILD_EXTRA=$BUILD_EXTRA \
-DLITE_WITH_CV=$BUILD_CV \
......@@ -298,7 +298,7 @@ function make_ios {
-DLITE_WITH_ARM=ON \
-DWITH_TESTING=OFF \
-DLITE_WITH_JAVA=OFF \
-DLITE_SHUTDOWN_LOG=ON \
-DLITE_WITH_LOG=ON \
-DLITE_ON_TINY_PUBLISH=ON \
-DLITE_WITH_OPENMP=OFF \
-DWITH_ARM_DOTPROD=OFF \
......@@ -402,7 +402,7 @@ function print_usage {
echo -e " ./build.sh --arm_os=<os> --arm_abi=<abi> --arm_lang=<lang> test"
echo
echo -e "optional argument:"
echo -e "--shutdown_log: (OFF|ON); controls whether to shutdown log, default is ON"
echo -e "--with_log: (OFF|ON); controls whether to print log information, default is ON"
echo -e "--build_extra: (OFF|ON); controls whether to publish extra operators and kernels for (sequence-related model such as OCR or NLP)"
echo -e "--build_train: (OFF|ON); controls whether to publish training operators and kernels, build_train is only for full_publish library now"
echo -e "--build_python: (OFF|ON); controls whether to publish python api lib (ANDROID and IOS is not supported)"
......@@ -481,8 +481,8 @@ function main {
BUILD_TAILOR="${i#*=}"
shift
;;
--shutdown_log=*)
SHUTDOWN_LOG="${i#*=}"
--with_log=*)
WITH_LOG="${i#*=}"
shift
;;
--build_npu=*)
......
......@@ -16,7 +16,7 @@ WITH_JAVA=ON
# controls whether to compile cv functions into lib, default is OFF.
WITH_CV=OFF
# controls whether to hide log information, default is ON.
SHUTDOWN_LOG=ON
WITH_LOG=ON
# options of striping lib according to input model.
OPTMODEL_DIR=""
WITH_STRIP=OFF
......@@ -144,7 +144,7 @@ function make_tiny_publish_so {
local cmake_mutable_options="
-DLITE_BUILD_EXTRA=$WITH_EXTRA \
-DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \
-DLITE_WITH_LOG=$WITH_LOG \
-DLITE_BUILD_TAILOR=$WITH_STRIP \
-DLITE_OPTMODEL_DIR=$OPTMODEL_DIR \
-DLITE_WITH_JAVA=$WITH_JAVA \
......@@ -193,7 +193,7 @@ function make_full_publish_so {
local cmake_mutable_options="
-DLITE_BUILD_EXTRA=$WITH_EXTRA \
-DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \
-DLITE_WITH_LOG=$WITH_LOG \
-DLITE_BUILD_TAILOR=$WITH_STRIP \
-DLITE_OPTMODEL_DIR=$OPTMODEL_DIR \
-DLITE_WITH_JAVA=$WITH_JAVA \
......@@ -236,7 +236,7 @@ function print_usage {
echo -e "| --android_stl: (c++_static|c++_shared|gnu_static|gnu_shared), default is c++_static |"
echo -e "| --with_java: (OFF|ON); controls whether to publish java api lib, default is ON |"
echo -e "| --with_cv: (OFF|ON); controls whether to compile cv functions into lib, default is OFF |"
echo -e "| --shutdown_log: (OFF|ON); controls whether to hide log information, default is ON |"
echo -e "| --with_log: (OFF|ON); controls whether to print log information, default is ON |"
echo -e "| --with_extra: (OFF|ON); controls whether to publish extra operators and kernels for (sequence-related model such as OCR or NLP) |"
echo -e "| |"
echo -e "| arguments of striping lib according to input model:(armv8, gcc, c++_static) |"
......@@ -315,8 +315,8 @@ function main {
shift
;;
# ON or OFF, default ON
--shutdown_log=*)
SHUTDOWN_LOG="${i#*=}"
--with_log=*)
WITH_LOG="${i#*=}"
shift
;;
# compiling lib which can operate on opencl and cpu.
......
......@@ -11,7 +11,7 @@ WITH_EXTRA=OFF
# controls whether to compile cv functions into lib, default is OFF.
WITH_CV=OFF
# controls whether to hide log information, default is ON.
SHUTDOWN_LOG=ON
WITH_LOG=ON
# absolute path of Paddle-Lite.
workspace=$PWD/$(dirname $0)/../../
# options of striping lib according to input model.
......@@ -67,7 +67,7 @@ function make_ios {
-DLITE_WITH_OPENMP=OFF \
-DWITH_ARM_DOTPROD=OFF \
-DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \
-DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \
-DLITE_WITH_LOG=$WITH_LOG \
-DLITE_BUILD_TAILOR=$WITH_STRIP \
-DLITE_OPTMODEL_DIR=$OPTMODEL_DIR \
-DARM_TARGET_ARCH_ABI=$abi \
......@@ -94,7 +94,7 @@ function print_usage {
echo -e "| optional argument: |"
echo -e "| --arm_abi: (armv8|armv7), default is armv8 |"
echo -e "| --with_cv: (OFF|ON); controls whether to compile cv functions into lib, default is OFF |"
echo -e "| --shutdown_log: (OFF|ON); controls whether to hide log information, default is ON |"
echo -e "| --with_log: (OFF|ON); controls whether to print log information, default is ON |"
echo -e "| --with_extra: (OFF|ON); controls whether to publish extra operators and kernels for (sequence-related model such as OCR or NLP) |"
echo -e "| |"
echo -e "| arguments of striping lib according to input model:(armv8, gcc, c++_static) |"
......@@ -136,8 +136,8 @@ function main {
WITH_STRIP="${i#*=}"
shift
;;
--shutdown_log=*)
SHUTDOWN_LOG="${i#*=}"
--with_log=*)
WITH_LOG="${i#*=}"
shift
;;
help)
......
......@@ -52,17 +52,13 @@ readonly CMAKE_COMMON_OPTIONS="-DWITH_LITE=ON \
-DWITH_TESTING=OFF"
# mutable options for linux compiling.
function init_cmake_mutable_options {
SHUTDOWN_LOG=ON
if [ "$WITH_LOG" = "ON"]; then
SHUTDOWN_LOG=OFF
fi
cmake_mutable_options="-DARM_TARGET_ARCH_ABI=$ARCH \
-DARM_TARGET_LANG=$TOOLCHAIN \
-DLITE_BUILD_EXTRA=$WITH_EXTRA \
-DLITE_WITH_PYTHON=$WITH_PYTHON \
-DLITE_WITH_CV=$WITH_CV \
-DLITE_SHUTDOWN_LOG=$SHUTDOWN_LOG \
-DLITE_WITH_LOG=$WITH_LOG \
-DLITE_BUILD_TAILOR=$WITH_STRIP \
-DLITE_OPTMODEL_DIR=$OPTMODEL_DIR \
-DLITE_WITH_OPENCL=$WITH_OPENCL \
......
......@@ -11,7 +11,7 @@ TARGET_NAME="test_subgraph_pass" # default target
BUILD_EXTRA=OFF # ON(with sequence ops)/OFF
WITH_JAVA=ON # ON(build jar and jni so)/OFF
WITH_TESTING=ON # ON/OFF
SHUTDOWN_LOG=OFF # ON(disable logging)/OFF
WITH_LOG=ON # ON(disable logging)/OFF
ON_TINY_PUBLISH=OFF # ON(tiny publish)/OFF(full publish)
function print_usage {
......@@ -76,7 +76,7 @@ function build_npu {
fi
if [[ "${ON_TINY_PUBLISH}" == "ON" ]]; then
WITH_TESTING=OFF
SHUTDOWN_LOG=ON
WITH_LOG=OFF
publish_dir="tiny_publish"
else
publish_dir="full_publish"
......@@ -99,7 +99,7 @@ function build_npu {
-DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \
-DWITH_TESTING=${WITH_TESTING} \
-DLITE_WITH_JAVA=${WITH_JAVA} \
-DLITE_SHUTDOWN_LOG=${SHUTDOWN_LOG} \
-DLITE_WITH_LOG=${WITH_LOG} \
-DLITE_WITH_NPU=ON \
-DLITE_ON_TINY_PUBLISH=${ON_TINY_PUBLISH} \
-DANDROID_API_LEVEL=24 \
......
......@@ -8,8 +8,8 @@ ARM_LANG="gcc" # gcc only yet
DDK_ROOT="$(pwd)/rknpu"
TARGET_NAME="test_subgraph_pass" # default target
BUILD_EXTRA=OFF # ON(with sequence ops)/OFF
WITH_TESTING=ON # ON/OFF
SHUTDOWN_LOG=OFF # ON(disable logging)/OFF
WITH_TESTING=ON # ON/OFF
WITH_LOG=ON # ON(disable logging)/OFF
ON_TINY_PUBLISH=OFF # ON(tiny publish)/OFF(full publish)
function print_usage {
......@@ -65,7 +65,7 @@ function build_npu {
local publish_dir
if [[ "${ON_TINY_PUBLISH}" == "ON" ]]; then
WITH_TESTING=OFF
SHUTDOWN_LOG=ON
WITH_LOG=OFF
publish_dir="tiny_publish"
else
publish_dir="full_publish"
......@@ -89,7 +89,7 @@ function build_npu {
-DWITH_ARM_DOTPROD=ON \
-DLITE_BUILD_EXTRA=${BUILD_EXTRA} \
-DWITH_TESTING=${WITH_TESTING} \
-DLITE_SHUTDOWN_LOG=${SHUTDOWN_LOG} \
-DLITE_WITH_LOG=${WITH_LOG} \
-DLITE_ON_TINY_PUBLISH=${ON_TINY_PUBLISH} \
-DARM_TARGET_OS=${ARM_OS} \
-DARM_TARGET_ARCH_ABI=${ARM_ABI} \
......
......@@ -118,7 +118,7 @@ function cmake_opencl {
-DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \
-DWITH_TESTING=ON \
-DLITE_BUILD_EXTRA=ON \
-DLITE_SHUTDOWN_LOG=OFF \
-DLITE_WITH_LOG=ON \
-DLITE_WITH_CV=OFF \
-DARM_TARGET_OS=$1 -DARM_TARGET_ARCH_ABI=$2 -DARM_TARGET_LANG=$3
}
......@@ -653,7 +653,7 @@ function build_ios {
-DLITE_WITH_ARM=ON \
-DWITH_TESTING=OFF \
-DLITE_WITH_JAVA=OFF \
-DLITE_SHUTDOWN_LOG=ON \
-DLITE_WITH_LOG=OFF \
-DLITE_ON_TINY_PUBLISH=ON \
-DLITE_WITH_OPENMP=OFF \
-DWITH_ARM_DOTPROD=OFF \
......@@ -1000,7 +1000,7 @@ function mobile_publish {
-DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=ON \
-DWITH_TESTING=OFF \
-DLITE_WITH_JAVA=ON \
-DLITE_SHUTDOWN_LOG=ON \
-DLITE_WITH_LOG=OFF \
-DLITE_ON_TINY_PUBLISH=ON \
-DARM_TARGET_OS=${os} -DARM_TARGET_ARCH_ABI=${abi} -DARM_TARGET_LANG=${lang}
......
......@@ -3,7 +3,7 @@
# else()
# endif()
if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK OR LITE_ON_MODEL_OPTIMIZE_TOOL OR LITE_SHUTDOWN_LOG)
if(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK OR LITE_ON_MODEL_OPTIMIZE_TOOL OR (NOT LITE_WITH_LOG))
lite_cc_library(logging SRCS logging.cc)
set(utils_DEPS logging)
lite_cc_test(test_logging SRCS logging_test.cc DEPS ${utils_DEPS})
......
......@@ -14,7 +14,7 @@
#pragma once
#if defined(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) || \
defined(LITE_ON_MODEL_OPTIMIZE_TOOL) || defined(LITE_SHUTDOWN_LOG)
defined(LITE_ON_MODEL_OPTIMIZE_TOOL) || !defined(LITE_WITH_LOG)
#include "lite/utils/logging.h"
#else // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
#include <glog/logging.h>
......
......@@ -22,7 +22,7 @@
#if defined(LITE_WITH_LIGHT_WEIGHT_FRAMEWORK) || \
defined(LITE_ON_MODEL_OPTIMIZE_TOOL)
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
namespace paddle {
namespace lite {
......@@ -60,5 +60,5 @@ void gen_log(STL::ostream& log_stream_,
} // namespace lite
} // namespace paddle
#endif // LITE_SHUTDOWN_LOG
#endif // LITE_WITH_LOG
#endif // LITE_WITH_LIGHT_FRAMEWORK
......@@ -46,7 +46,7 @@
// NOLINTFILE()
// LOG()
#ifdef LITE_SHUTDOWN_LOG
#ifndef LITE_WITH_LOG
#define LOG(status) LOG_##status
#define LOG_INFO paddle::lite::Voidify()
#define LOG_ERROR LOG_INFO
......@@ -62,7 +62,7 @@
paddle::lite::LogMessageFatal(__FILE__, __FUNCTION__, __LINE__)
#endif
#ifdef LITE_SHUTDOWN_LOG
#ifndef LITE_WITH_LOG
#define VLOG(level) paddle::lite::Voidify()
#else
// VLOG()
......@@ -72,7 +72,7 @@
// CHECK()
// clang-format off
#ifdef LITE_SHUTDOWN_LOG
#ifndef LITE_WITH_LOG
#define CHECK(x) if (!(x)) paddle::lite::VoidifyFatal()
#define _CHECK_BINARY(x, cmp, y) CHECK(x cmp y)
#else
......@@ -91,7 +91,7 @@
namespace paddle {
namespace lite {
#ifndef LITE_SHUTDOWN_LOG
#ifdef LITE_WITH_LOG
void gen_log(STL::ostream& log_stream_,
const char* file,
const char* func,
......
......@@ -37,7 +37,7 @@ void ostream::pad(const std::string& text) {
}
}
#ifdef LITE_SHUTDOWN_LOG
#ifndef LITE_WITH_LOG
#define ADD_DATA_AS_STRING(data_, obj_)
#else
#define ADD_DATA_AS_STRING(data_, obj_) \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册