diff --git a/src/framework/cl/cl_image.cpp b/src/framework/cl/cl_image.cpp index d6cc52d69cef1249fe9ed62a344d069af0305bc0..7c716f370bfb1cc26d36146a41aefab53a99da2e 100644 --- a/src/framework/cl/cl_image.cpp +++ b/src/framework/cl/cl_image.cpp @@ -122,6 +122,8 @@ Print &operator<<(Print &printer, const CLImage &cl_image) { CL_CHECK_ERRORS(err); + PADDLE_MOBILE_ENFORCE(cl_image.numel() != 0, + "cl_image numel should not be 0 "); float *tensor_data = new float[cl_image.numel()]; auto converter = cl_image.Converter(); converter->ImageToNCHW(image_data, tensor_data, cl_image.ImageDims(), diff --git a/src/framework/cl/cl_image_converter.cpp b/src/framework/cl/cl_image_converter.cpp index 5770cf38656b7566b4a993dccca21c797010a013..f1993bba9d5ac59a93045cd3f578ba85de6b6b64 100644 --- a/src/framework/cl/cl_image_converter.cpp +++ b/src/framework/cl/cl_image_converter.cpp @@ -391,6 +391,8 @@ void CLImageConverterDWBlock::ImageToNCHW(half_t *image, float *tensor, const DDim &CLImageConverterNormal::InitImageDimInfoWith( const DDim &tensor_dim) { + PADDLE_MOBILE_ENFORCE(tensor_dim.size() <= 4 && tensor_dim.size() > 0, + "tensor dim is not support "); size_t new_dims[] = {1, 1, 1, 1}; for (int j = 0; j < tensor_dim.size(); ++j) { new_dims[4 - tensor_dim.size() + j] = tensor_dim[j]; diff --git a/src/framework/executor.cpp b/src/framework/executor.cpp index 557f91addeb53202e05144a79739875f2270f800..ecbbe925af228ad15d343ce783422b021ff609c5 100644 --- a/src/framework/executor.cpp +++ b/src/framework/executor.cpp @@ -1027,7 +1027,7 @@ void Executor::InitCombineMemory() { bool shouldResize = true; if (ddim.size() > 4) { for (int i = 0; i < ddim.size() - 4; ++i) { - if (ddim[i] != 0) { + if (ddim[i] != 0 && ddim[i] != 1) { shouldResize = false; break; } diff --git a/src/operators/reshape2_op.cpp b/src/operators/reshape2_op.cpp index 4ac8f3458efd6fc19f885f3c55533c039bcf4b35..fd95cad44a3475b59b7ed9c280ac02d3c061cd94 100644 --- a/src/operators/reshape2_op.cpp +++ b/src/operators/reshape2_op.cpp @@ -75,6 +75,9 @@ void Reshape2Op::InferShape() const { xshape_dims[i + 1] = input_x_dims[i]; } this->param_.OutputXShape()->Resize(framework::make_ddim(xshape_dims)); +#ifdef PADDLE_MOBILE_CL + this->param_.OutputXShape()->Resize(input_x_dims); +#endif } } // namespace operators diff --git a/src/operators/transpose2_op.cpp b/src/operators/transpose2_op.cpp index 945e019f672cd47a009bd1ad1b4083798b97366d..0552a82ca832fd53b6e10f9dc99b1f4f65bfdef1 100644 --- a/src/operators/transpose2_op.cpp +++ b/src/operators/transpose2_op.cpp @@ -100,6 +100,9 @@ void Transpose2Op::InferShape() const { xshape_dims[i + 1] = input_x_dims[i]; } this->param_.OutputXShape()->Resize(framework::make_ddim(xshape_dims)); +#ifdef PADDLE_MOBILE_CL + this->param_.OutputXShape()->Resize(input_x_dims); +#endif } } // namespace operators diff --git a/tools/python/fluidtools/run.py b/tools/python/fluidtools/run.py index 3503f633e0a4c6ed1a78d8afb887c80886ab3bd7..19507d361168a43195a6059e2dd4de0fdad0eb50 100644 --- a/tools/python/fluidtools/run.py +++ b/tools/python/fluidtools/run.py @@ -155,7 +155,7 @@ def load_feed_kv(): expected_len = 1 for dim in feed_shape: expected_len *= dim - if len(data) != expected_len: + if len(np.atleast_1d(data)) != expected_len: return None data = data.reshape(feed_shape).astype("float32")