diff --git a/src/operators/kernel/central-arm-func/fusion_fc_arm_func.h b/src/operators/kernel/central-arm-func/fusion_fc_arm_func.h index 42c01d2825e052a52e7021a1b2a97997fb9c915b..45d5dc76d1e95668638706a252cc24d7ff2dec40 100644 --- a/src/operators/kernel/central-arm-func/fusion_fc_arm_func.h +++ b/src/operators/kernel/central-arm-func/fusion_fc_arm_func.h @@ -29,10 +29,9 @@ void FusionFcCompute(const FusionFcParam ¶m) { auto *input_z_data = input_z->data(); int axis = param.Axis(); Tensor *out = param.Out(); - auto *out_data = out->mutable_data(); // int m = out->dims()[0]; // int n = out->dims()[1]; - + auto *out_data = out->mutable_data(); const Tensor x_matrix = input_x->dims().size() > 2 ? framework::ReshapeToMatrix(*input_x, param.XNumColDims()) diff --git a/src/operators/kernel/central-arm-func/softmax_arm_func.h b/src/operators/kernel/central-arm-func/softmax_arm_func.h index d311d97984a7207df9075befe71a9806092966e1..a94c8299c514bc9e2937daf57b1a845d7be56b16 100644 --- a/src/operators/kernel/central-arm-func/softmax_arm_func.h +++ b/src/operators/kernel/central-arm-func/softmax_arm_func.h @@ -24,6 +24,7 @@ void SoftmaxCompute(const SoftmaxParam ¶m) { Tensor *out = param.Out(); auto x_dims = in_x->dims(); out->Resize(x_dims); + out->mutable_data(); math::SoftmaxFuntor()(in_x, out); } } // namespace operators diff --git a/src/operators/op_param.h b/src/operators/op_param.h index 568cf77b8e4e81732cd9a783c1a9ea64d347102b..106f5c43c1762afa7f24a8c3e3e86beac8517834 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -2067,9 +2067,9 @@ class Im2SequenceParam : public OpParam { paddings_ = GetAttr>("paddings", attrs); } - const RType *Input() const { return input_x_; } + const GType *Input() const { return input_x_; } - RType *Output() const { return out_; } + GType *Output() const { return out_; } const vector &Kernels() const { return kernels_; } @@ -2078,8 +2078,8 @@ class Im2SequenceParam : public OpParam { const vector &Paddings() const { return paddings_; } private: - RType *input_x_; - RType *out_; + GType *input_x_; + GType *out_; vector kernels_; vector strides_; vector paddings_; diff --git a/test/net/test_eng.cpp b/test/net/test_eng.cpp index 4a78af8310cf7f1db976fbc344a21dd0bb4b25a6..b99a6c927a44ca4032b352731b3971b63cf26b4f 100644 --- a/test/net/test_eng.cpp +++ b/test/net/test_eng.cpp @@ -23,13 +23,13 @@ int main() { // paddle_mobile.SetThreadNum(4); auto time1 = time(); if (paddle_mobile.Load(std::string(g_eng) + "/model", - std::string(g_eng) + "/params", false, false, 1, + std::string(g_eng) + "/params", true, false, 1, true)) { auto time2 = time(); std::cout << "load cost :" << time_diff(time1, time1) << "ms" << std::endl; - std::vector dims{1, 1, 48, 512}; + std::vector dims{1, 1, 48, 400}; LoDTensor input_tensor; - SetupTensor(&input_tensor, {1, 1, 48, 512}, static_cast(0), + SetupTensor(&input_tensor, {1, 1, 48, 400}, static_cast(0), static_cast(1)); std::vector input(input_tensor.data(),