diff --git a/mobile/src/io/api.cc b/mobile/src/io/api.cc index 0e254aa15ac06083038773d89c23d40242847782..b9e7421b54bc4f0e092a6c743d39a81def48b09c 100644 --- a/mobile/src/io/api.cc +++ b/mobile/src/io/api.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "common/type_define.h" #include "cstring" #include "io/paddle_inference_api.h" diff --git a/mobile/src/io/api_paddle_mobile.cc b/mobile/src/io/api_paddle_mobile.cc index fd77941823d55347b6a86a545cc703fd2dfaf787..57e9218030d44dbf6f36feced2aab4b32dfec7c1 100644 --- a/mobile/src/io/api_paddle_mobile.cc +++ b/mobile/src/io/api_paddle_mobile.cc @@ -18,6 +18,7 @@ #include #include #include "common/enforce.h" +#include "common/type_define.h" #include "framework/tensor.h" #ifdef PADDLE_MOBILE_FPGA #include @@ -35,7 +36,9 @@ PaddleMobilePredictor::PaddleMobilePredictor( template bool PaddleMobilePredictor::Init(const PaddleMobileConfig &config) { - paddle_mobile_.reset(new PaddleMobile()); + PaddleMobileConfigInternal configInternal; + configInternal.load_when_predict = config.load_when_predict; + paddle_mobile_.reset(new PaddleMobile(configInternal)); #ifdef PADDLE_MOBILE_CL paddle_mobile_->SetCLPath(config.cl_path); #endif @@ -135,14 +138,14 @@ bool PaddleMobilePredictor::Run( void ConvertPaddleTensors(const PaddleTensor &src, framework::Tensor *des) { des->Resize(framework::make_ddim(src.shape)); des->external_data = src.data.data(); - des->set_type(src.dtypeid); + des->set_type(static_cast(static_cast(src.dtypeid))); des->layout = src.layout == LAYOUT_HWC ? framework::LAYOUT_HWC : framework::LAYOUT_CHW; } void ConvertTensors(const framework::Tensor &src, PaddleTensor *des) { des->shape = framework::vectorize2int(src.dims()); - des->dtypeid = src.type(); + des->dtypeid = static_cast(static_cast(src.type())); des->layout = src.layout == framework::LAYOUT_HWC ? LAYOUT_HWC : LAYOUT_CHW; auto num = src.numel(); @@ -164,7 +167,8 @@ void PaddleMobilePredictor::FeedPaddleTensors( auto num = inputs.size(); std::vector tensors(num, framework::Tensor()); for (int i = 0; i < num; i++) { - if (inputs[i].dtypeid == type_id().hash_code()) { + if (static_cast(static_cast(inputs[i].dtypeid)) == + type_id().hash_code()) { tensors[i].init(type_id().hash_code()); } else { tensors[i].init(type_id().hash_code()); diff --git a/mobile/src/io/paddle_inference_api.h b/mobile/src/io/paddle_inference_api.h index ae7d34bd51dd59de9359a471964647c020e18649..4d2998d955bbbaf61b903da7eb2eef93a120436c 100644 --- a/mobile/src/io/paddle_inference_api.h +++ b/mobile/src/io/paddle_inference_api.h @@ -25,7 +25,6 @@ limitations under the License. */ #include #include #include -#include "common/type_define.h" namespace paddle_mobile { @@ -86,6 +85,56 @@ class PaddleBuf { bool memory_owned_{true}; }; +typedef enum { + paddle_void = 0, + paddle_float, + paddle_int, + paddle_uint16_t, + paddle_double, + paddle_int64_t, + paddle_size_t, + paddle_int16_t, + paddle_int8_t, + paddle_uint8_t, + paddle_bool, + paddle_string, + paddle_floats = 100, + paddle_ints, + paddle_int64_ts, + paddle_size_ts, + paddle_bools, + paddle_strings, + paddle_const_float = 200, + paddle_const_int, + paddle_block = 300, + paddle_tensor, + paddle_lod_tensor, + paddle_blocks, + paddle_tensors, + paddle_lod_tensors, + paddle_p_block = 400, + paddle_p_tensor, + paddle_p_lod_tensor, + paddle_p_blocks, + paddle_p_tensors, + paddle_p_lod_tensors, + paddle_scopes = 500, + paddle_selected_rows, + paddle_dim0 = 600, + paddle_dim1, + paddle_dim2, + paddle_dim3, + paddle_dim4, + paddle_dim5, + paddle_dim6, + paddle_dim7, + paddle_dim8, + paddle_dim9, +#ifdef PADDLE_MOBILE_CL + paddle_cl_image, +#endif +} PaddlekTypeId_t; + struct PaddleTensor { PaddleTensor() = default; std::string name; // variable name. @@ -93,7 +142,7 @@ struct PaddleTensor { std::vector lod; PaddleBuf data; // blob of data. PaddleDType dtype; - kTypeId_t dtypeid; + PaddlekTypeId_t dtypeid; LayoutType layout; }; @@ -166,6 +215,7 @@ struct PaddleMobileConfig : public PaddlePredictor::Config { bool quantification = false; bool lod_mode = false; int thread_num = 1; + bool load_when_predict = false; std::string cl_path; struct PaddleModelMemoryPack memory_pack; }; diff --git a/mobile/tools/build.sh b/mobile/tools/build.sh index 877791ff7bdb4fc64f2d62210ff974c0cd6bced0..3606f66e755b72e4baa84e587bf29b8e3994c8b7 100755 --- a/mobile/tools/build.sh +++ b/mobile/tools/build.sh @@ -61,7 +61,7 @@ build_for_android() { elif [ "${PLATFORM}" = "arm-v8a" ]; then ABI="arm64-v8a" ARM_PLATFORM="V8" - CXX_FLAGS="-march=armv8-a -pie -fPIE -w -Wno-error=format-security -llog" + CXX_FLAGS="-march=armv8-a -pie -fPIE -w -Wno-error=format-security -llog -fuse-ld=gold" else echo "unknown platform!" exit -1