diff --git a/src/framework/executor.cpp b/src/framework/executor.cpp index d511ad48ca3cf794ea473ab5bc0d791d4c72eded..97957f02ede60036c828c849eb779cfd14a6468e 100644 --- a/src/framework/executor.cpp +++ b/src/framework/executor.cpp @@ -474,12 +474,14 @@ PMStatus Executor::Predict() { struct timespec ts; int op_index = 0; #endif - for (auto &op_handler : ops_of_block0_) { + for (int i = 0; i < ops_of_block0_.size(); ++i) { + auto &op_handler = ops_of_block0_[i]; #ifdef PADDLE_MOBILE_PROFILE clock_gettime(CLOCK_MONOTONIC, &ts); profile[op_index].runBegin = (uint64_t)ts.tv_sec * 1e9 + ts.tv_nsec; #endif - DLOG << "run op: " << op_handler->Type(); + DLOG << i << "th, " + << "run op: " << op_handler->Type(); if (lod_mode_ && input_dim_has_changed_) { op_handler->InferShape(); } diff --git a/src/framework/program/program_desc.cpp b/src/framework/program/program_desc.cpp index 88cac11d283cd0c5b72a6809deb34df8206c3e25..7ec74874b678d564529c3d6d86e4db439a94cba9 100644 --- a/src/framework/program/program_desc.cpp +++ b/src/framework/program/program_desc.cpp @@ -58,7 +58,7 @@ void ProgramDesc::Description(std::string header) const { LOG(kLOG_INFO) << "block ops size: " << block->Ops().size(); for (int j = 0; j < block->Ops().size(); ++j) { auto op = block->Ops()[j]; - LOG(kLOG_DEBUG1) << "op: " << op->Type(); + LOG(kLOG_DEBUG1) << j << "th, op: " << op->Type(); for (auto &input : op->GetInputs()) { LOG(kLOG_DEBUG2) << "input parameter: " << input.first; for (auto &n : input.second) { diff --git a/src/operators/kernel/arm/slice_kernel.cpp b/src/operators/kernel/arm/slice_kernel.cpp index e373b569870c81587377ac02e578397518513a85..db3718612be94e78a38d412b52579c7abc1d98c8 100644 --- a/src/operators/kernel/arm/slice_kernel.cpp +++ b/src/operators/kernel/arm/slice_kernel.cpp @@ -19,12 +19,54 @@ limitations under the License. */ namespace paddle_mobile { namespace operators { +void SliceCompute(const SliceParam& param) { + auto input = param.input_; + auto output = param.output_; + auto* input_ptr = input->data(); + auto* output_ptr = output->mutable_data(); + auto out_dims = output->dims(); + auto in_dims = input->dims(); + auto starts = param.starts_; + auto ends = param.ends_; + int axes = param.axes_[0]; + int HW = input->dims()[axes + 1] * input->dims()[axes + 2]; + int batch_size = out_dims[axes - 1]; + int input_channel = in_dims[axes]; + int output_channel = out_dims[axes]; + + for (int c1 = 0; c1 < batch_size; ++c1) { + for (int c2 = starts[0], c3 = 0; c2 < ends[0]; ++c2, ++c3) { + size_t out_offset = c1 * output_channel * HW + c3 * HW; + size_t in_offset = c1 * input_channel * HW + c2 * HW; + memcpy(output_ptr + out_offset, input_ptr + in_offset, + HW * sizeof(float)); + } + } +} + template <> bool SliceKernel::Init(SliceParam* param) { return true; } + template <> -void SliceKernel::Compute(const SliceParam& param) {} +void SliceKernel::Compute(const SliceParam& param) { + int rank = param.input_->dims().size(); + switch (rank) { + case 4: + SliceCompute(param); + break; + case 5: + if (param.input_->dims()[0] == 1) { + SliceCompute(param); + } + break; + default: + PADDLE_MOBILE_ENFORCE(0, "input dims not support now"); + break; + } +} + } // namespace operators } // namespace paddle_mobile #endif diff --git a/src/operators/slice_op.cpp b/src/operators/slice_op.cpp index 85b2fea07f8bbe1ce3452566abb0c41111a03d88..14316d37eb3ea69557e47f47f3d8563523184490 100644 --- a/src/operators/slice_op.cpp +++ b/src/operators/slice_op.cpp @@ -21,7 +21,14 @@ namespace operators { template void SliceOp::InferShape() const { - /// todo: add InputShape() detection. + auto axes = this->param_.axes_; + auto input = this->param_.input_; + auto output = this->param_.output_; + PADDLE_MOBILE_ENFORCE(axes.size() == 1, "axes size should equals 1"); + PADDLE_MOBILE_ENFORCE(input->dims().size() == output->dims().size(), + "input dim size should equals output dim size"); + PADDLE_MOBILE_ENFORCE(input->dims().size() - axes[0] == 3, + "op only support slice channel now"); } } // namespace operators diff --git a/tools/python/fluidtools/run.py b/tools/python/fluidtools/run.py index f519f97208b0c6b49cce3910e602d62c3c8f7a9f..03efa209e2482bbc11cbbfdbe91b5fd1a9f4b159 100644 --- a/tools/python/fluidtools/run.py +++ b/tools/python/fluidtools/run.py @@ -368,10 +368,11 @@ def check_mobile_results(args, fuse, mem_opt): error_values1 = np.array(error_values1) error_values2 = np.array(error_values2) # pp_red("mobile op is not correct, error occurs at {}th op, op's type is {}") - pp_red("corresponding fluid op is {}th op, op's type is {}".format(error_index, op_cache[error_index][1].type), 1) + pp_red("corresponding fluid op is {}th op, op's type is {}, wrong var name is {}".format( + error_index,op_cache[error_index][1].type,op_output_var_name), 1) pp_red("fluid results are : ", 1) pp_red(str(error_values1).replace("\n", "\n" + "\t" * 1), 1) - pp_red("paddle mobile results are : ", 1) + pp_yellow("paddle mobile results are : ", 1) pp_red(str(error_values2).replace("\n", "\n" + "\t" * 1), 1) # print(output_var_cache) # print(mobile_var_cache)