diff --git a/src/operators/kernel/central-arm-func/fusion_fc_arm_func.h b/src/operators/kernel/central-arm-func/fusion_fc_arm_func.h index 42c01d2825e052a52e7021a1b2a97997fb9c915b..45d5dc76d1e95668638706a252cc24d7ff2dec40 100644 --- a/src/operators/kernel/central-arm-func/fusion_fc_arm_func.h +++ b/src/operators/kernel/central-arm-func/fusion_fc_arm_func.h @@ -29,10 +29,9 @@ void FusionFcCompute(const FusionFcParam ¶m) { auto *input_z_data = input_z->data(); int axis = param.Axis(); Tensor *out = param.Out(); - auto *out_data = out->mutable_data(); // int m = out->dims()[0]; // int n = out->dims()[1]; - + auto *out_data = out->mutable_data(); const Tensor x_matrix = input_x->dims().size() > 2 ? framework::ReshapeToMatrix(*input_x, param.XNumColDims()) diff --git a/src/operators/kernel/central-arm-func/pool_arm_func.h b/src/operators/kernel/central-arm-func/pool_arm_func.h index 1c22a2646ea4efd91e6c73333c21e0d0c56fcb17..f1065d4571ab512913412f7fb05b059ebc62ec40 100644 --- a/src/operators/kernel/central-arm-func/pool_arm_func.h +++ b/src/operators/kernel/central-arm-func/pool_arm_func.h @@ -83,6 +83,7 @@ void PoolCompute(const PoolParam ¶m) { #if __aarch64__ PoolBasic(pooling_type, ksize, strides, paddings, in_x, out); #else + /// todo: fix bug in Pool2x2 if (pooling_type == "max") { math::Pool2x2Maxs2p0(strides, paddings, in_x, out); } else if (pooling_type == "avg") { diff --git a/src/operators/kernel/central-arm-func/softmax_arm_func.h b/src/operators/kernel/central-arm-func/softmax_arm_func.h index d311d97984a7207df9075befe71a9806092966e1..a94c8299c514bc9e2937daf57b1a845d7be56b16 100644 --- a/src/operators/kernel/central-arm-func/softmax_arm_func.h +++ b/src/operators/kernel/central-arm-func/softmax_arm_func.h @@ -24,6 +24,7 @@ void SoftmaxCompute(const SoftmaxParam ¶m) { Tensor *out = param.Out(); auto x_dims = in_x->dims(); out->Resize(x_dims); + out->mutable_data(); math::SoftmaxFuntor()(in_x, out); } } // namespace operators diff --git a/src/operators/op_param.h b/src/operators/op_param.h index c60014094b582036ef2038b04edf7be3313e571e..2c0075271a92cb66ef95603965dd18d0dd3c5faf 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -2147,9 +2147,9 @@ class Im2SequenceParam : public OpParam { paddings_ = GetAttr>("paddings", attrs); } - const RType *Input() const { return input_x_; } + const GType *Input() const { return input_x_; } - RType *Output() const { return out_; } + GType *Output() const { return out_; } const vector &Kernels() const { return kernels_; } @@ -2158,8 +2158,8 @@ class Im2SequenceParam : public OpParam { const vector &Paddings() const { return paddings_; } private: - RType *input_x_; - RType *out_; + GType *input_x_; + GType *out_; vector kernels_; vector strides_; vector paddings_; diff --git a/test/net/test_eng.cpp b/test/net/test_eng.cpp index 4a78af8310cf7f1db976fbc344a21dd0bb4b25a6..b99a6c927a44ca4032b352731b3971b63cf26b4f 100644 --- a/test/net/test_eng.cpp +++ b/test/net/test_eng.cpp @@ -23,13 +23,13 @@ int main() { // paddle_mobile.SetThreadNum(4); auto time1 = time(); if (paddle_mobile.Load(std::string(g_eng) + "/model", - std::string(g_eng) + "/params", false, false, 1, + std::string(g_eng) + "/params", true, false, 1, true)) { auto time2 = time(); std::cout << "load cost :" << time_diff(time1, time1) << "ms" << std::endl; - std::vector dims{1, 1, 48, 512}; + std::vector dims{1, 1, 48, 400}; LoDTensor input_tensor; - SetupTensor(&input_tensor, {1, 1, 48, 512}, static_cast(0), + SetupTensor(&input_tensor, {1, 1, 48, 400}, static_cast(0), static_cast(1)); std::vector input(input_tensor.data(),