diff --git a/src/operators/kernel/arm/dequantize_kernel.cpp b/src/operators/kernel/arm/dequantize_kernel.cpp index c9dfd6f93667cf283e83e21c0a1f3725bb847956..7c0d1cea18c90145c5dbc06de7cb97fa5ed289b6 100644 --- a/src/operators/kernel/arm/dequantize_kernel.cpp +++ b/src/operators/kernel/arm/dequantize_kernel.cpp @@ -34,7 +34,7 @@ void DequantizeKernel::Compute(const DequantizeParam ¶m) { LoDTensor *output = param.output_; float activation_scale = param.activation_scale_->data()[0]; float weight_scale = param.weight_scale_; - const int32_t *x = input->data(); + const int32_t *x = input->data(); float *y = output->mutable_data(); size_t size = output->numel(); // float scale = 1.f / (activation_scale * weight_scale); diff --git a/src/operators/kernel/arm/quantize_kernel.cpp b/src/operators/kernel/arm/quantize_kernel.cpp index 79186be79e5b4207e9f6b2b221bc1a4160e4e67e..515e9cf40dad4eedd307bc59309177910330a3bf 100644 --- a/src/operators/kernel/arm/quantize_kernel.cpp +++ b/src/operators/kernel/arm/quantize_kernel.cpp @@ -36,7 +36,7 @@ inline float32_t vmaxvq_f32(float32x4_t r) { template inline void QuantizeOffline(const Tensor *input, const float scale, const float max_abs, Tensor *output) { - const float *x = input->data(); + const float *x = input->data(); int8_t *y = output->mutable_data(); size_t remain = input->numel(); #if defined(__ARM_NEON__) || defined(__ARM_NEON) @@ -88,7 +88,7 @@ inline void QuantizeOffline(const Tensor *input, const float scale, template inline void QuantizeOnline(const Tensor *input, const float scale, Tensor *output) { - const float *x = input->data(); + const float *x = input->data(); int8_t *y = output->mutable_data(); size_t remain = input->numel(); #if defined(__ARM_NEON__) || defined(__ARM_NEON) @@ -143,7 +143,7 @@ static void Quantize(const Tensor *input, const float max_abs, float find_abs_max(const Tensor *input) { float max_abs = 0.f; - const float *x = input->data(); + const float *x = input->data(); size_t remain = input->numel(); #if defined(__ARM_NEON__) || defined(__ARM_NEON) size_t loop = remain >> 4; diff --git a/test/operators/test_gru_op.cpp b/test/operators/test_gru_op.cpp index 704a4df0294e46cd661d5b010398baa6fa40a740..b14c4642fd7638aed0473790ac9ca3bc18476160 100644 --- a/test/operators/test_gru_op.cpp +++ b/test/operators/test_gru_op.cpp @@ -20,8 +20,8 @@ namespace paddle_mobile { template int TestGruOp(int in_channels, int out_channels, std::string opname) { - int input_c = in_channels; - int output_c = out_channels; + size_t input_c = in_channels; + size_t output_c = out_channels; paddle_mobile::framework::LoD lod{{0, input_c}}; int batch_size = lod.size(); framework::DDim input_shape = framework::make_ddim({input_c, output_c * 3}); diff --git a/test/operators/test_quantize_op.cpp b/test/operators/test_quantize_op.cpp index 2f8e08806be426e3cb23804514a01b0ca44a0fe1..d8e72e9b1472d0de48143b37ee2a7fe48ad4e174 100644 --- a/test/operators/test_quantize_op.cpp +++ b/test/operators/test_quantize_op.cpp @@ -65,7 +65,7 @@ static void quantize(const Tensor *input, const float scale, Tensor *output) { int output_w = output->dims()[3]; size_t input_spatial = input_h * input_w; size_t output_spatial = output_h * output_w; - const float *x = input->data(); + const float *x = input->data(); int8_t *y = output->mutable_data(); for (int nc = 0; nc < batch_size * channels; ++nc) { @@ -81,7 +81,7 @@ static void quantize(const Tensor *input, const float scale, Tensor *output) { static float find_abs_max(const Tensor *input) { float max_abs = 0.f; - const float *x = input->data(); + const float *x = input->data(); size_t size = input->numel(); for (size_t i = 0; i < size; ++i) { float value = std::abs(x[i]);