提交 65a8b291 编写于 作者: H hjchen2

Fix unit test

上级 3efc0a95
......@@ -34,7 +34,7 @@ void DequantizeKernel<CPU, float>::Compute(const DequantizeParam<CPU> &param) {
LoDTensor *output = param.output_;
float activation_scale = param.activation_scale_->data<float>()[0];
float weight_scale = param.weight_scale_;
const int32_t *x = input->data<const int32_t>();
const int32_t *x = input->data<int32_t>();
float *y = output->mutable_data<float>();
size_t size = output->numel();
// float scale = 1.f / (activation_scale * weight_scale);
......
......@@ -36,7 +36,7 @@ inline float32_t vmaxvq_f32(float32x4_t r) {
template <RoundType R>
inline void QuantizeOffline(const Tensor *input, const float scale,
const float max_abs, Tensor *output) {
const float *x = input->data<const float>();
const float *x = input->data<float>();
int8_t *y = output->mutable_data<int8_t>();
size_t remain = input->numel();
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
......@@ -88,7 +88,7 @@ inline void QuantizeOffline(const Tensor *input, const float scale,
template <RoundType R>
inline void QuantizeOnline(const Tensor *input, const float scale,
Tensor *output) {
const float *x = input->data<const float>();
const float *x = input->data<float>();
int8_t *y = output->mutable_data<int8_t>();
size_t remain = input->numel();
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
......@@ -143,7 +143,7 @@ static void Quantize(const Tensor *input, const float max_abs,
float find_abs_max(const Tensor *input) {
float max_abs = 0.f;
const float *x = input->data<const float>();
const float *x = input->data<float>();
size_t remain = input->numel();
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
size_t loop = remain >> 4;
......
......@@ -20,8 +20,8 @@ namespace paddle_mobile {
template <typename Itype, typename Otype>
int TestGruOp(int in_channels, int out_channels, std::string opname) {
int input_c = in_channels;
int output_c = out_channels;
size_t input_c = in_channels;
size_t output_c = out_channels;
paddle_mobile::framework::LoD lod{{0, input_c}};
int batch_size = lod.size();
framework::DDim input_shape = framework::make_ddim({input_c, output_c * 3});
......
......@@ -65,7 +65,7 @@ static void quantize(const Tensor *input, const float scale, Tensor *output) {
int output_w = output->dims()[3];
size_t input_spatial = input_h * input_w;
size_t output_spatial = output_h * output_w;
const float *x = input->data<const float>();
const float *x = input->data<float>();
int8_t *y = output->mutable_data<int8_t>();
for (int nc = 0; nc < batch_size * channels; ++nc) {
......@@ -81,7 +81,7 @@ static void quantize(const Tensor *input, const float scale, Tensor *output) {
static float find_abs_max(const Tensor *input) {
float max_abs = 0.f;
const float *x = input->data<const float>();
const float *x = input->data<float>();
size_t size = input->numel();
for (size_t i = 0; i < size; ++i) {
float value = std::abs(x[i]);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册