From 5957475cab2828a722b4242c270fb0e8459cf646 Mon Sep 17 00:00:00 2001 From: liuruilong Date: Tue, 19 Feb 2019 15:50:05 +0800 Subject: [PATCH] add oc interface dim info and multi-output --- src/io/ios_io/PaddleMobileCPU.h | 34 ++++++++-- src/io/ios_io/PaddleMobileCPU.mm | 104 ++++++++++++++++++++++++++----- 2 files changed, 116 insertions(+), 22 deletions(-) diff --git a/src/io/ios_io/PaddleMobileCPU.h b/src/io/ios_io/PaddleMobileCPU.h index 69e8b894d7..0536f513aa 100644 --- a/src/io/ios_io/PaddleMobileCPU.h +++ b/src/io/ios_io/PaddleMobileCPU.h @@ -19,10 +19,21 @@ @interface PaddleMobileCPUResult: NSObject +/** + @b 输出指针 + */ @property (assign, nonatomic, readonly) float *output; +/** + @b 输出的 float 数 + * */ @property (assign, nonatomic, readonly) int outputSize; +/** + @b 维度信息, longlongValue + */ +@property (strong, nonatomic, readonly) NSArray *dim; + -(void)releaseOutput; @end @@ -92,11 +103,6 @@ andModelParamsLen:(size_t)combinedParamsLen andCombinedParamsBuf:(const uint8_t *)combinedParamsBuf; -/* - * - * */ - - /** @b 对图像进行预处理, 需要外部开辟 output 内存, 外部释放 output 内存, 每一个像素经过这样的预处理 (x + means) * scale, 其中 x 为像素值 @@ -134,7 +140,7 @@ - (PaddleMobileCPUResult *)predict:(CGImageRef)image dim:(NSArray *)dim means:(NSArray *)means scale:(float)scale; /** - 进行预测, 预处理 means 值为 0, scale 值为 1 + @b 进行预测, 预处理 means 值为 0, scale 值为 1 @param image 输入图像 @param dim 输入维度 @@ -142,6 +148,22 @@ */ - (PaddleMobileCPUResult *)predict:(CGImageRef)image dim:(NSArray *)dim; + +/** + @b 取出模型描述中 key 为 "fetch" 对应的输出 + + @return 预测结果 + */ +- (PaddleMobileCPUResult *)fetchOutput; + +/** + @b 当输出为多个时, 可用此函数取出对应的输出 + + @param key 模型中输出的key + @return 预测结果 + */ +- (PaddleMobileCPUResult *)fetchOutputWithKey:(NSString *)key; + /** @b 清理内存 */ diff --git a/src/io/ios_io/PaddleMobileCPU.mm b/src/io/ios_io/PaddleMobileCPU.mm index 7103dce16b..cec28d6381 100644 --- a/src/io/ios_io/PaddleMobileCPU.mm +++ b/src/io/ios_io/PaddleMobileCPU.mm @@ -43,6 +43,10 @@ _outputSize = outputSize; } +-(void)toSetDim:(NSArray *)dim { + _dim = dim; +} + @end @implementation PaddleMobileCPUConfig @@ -241,17 +245,22 @@ static std::mutex shared_mutex; } paddle_mobile::framework::Tensor input_tensor; - paddle_mobile::framework::DDim dims = paddle_mobile::framework::make_ddim(dim_vec); - float *input_ptr = input_tensor.mutable_data(dims); - memcpy(input_ptr, input, numel * sizeof(float)); pam_->Predict(input_tensor); std::shared_ptr output = pam_->Fetch(); + auto output_dims = output->dims(); + std::vector output_dim_vec = vectorize(output_dims); + NSMutableArray *ocDim = [NSMutableArray array]; + for (int i = 0; i < output_dim_vec.size(); ++i) { + NSNumber *num = [NSNumber numberWithLongLong:output_dim_vec[i]]; + [ocDim addObject:num]; + } + float *output_pointer = new float[output->numel()]; memcpy(output_pointer, output->data(), @@ -259,6 +268,7 @@ static std::mutex shared_mutex; PaddleMobileCPUResult *cpuResult = [[PaddleMobileCPUResult alloc] init]; [cpuResult toSetOutput: output_pointer]; + [cpuResult toSetDim: ocDim]; [cpuResult toSetOutputSize: output->numel()]; return cpuResult; @@ -304,23 +314,31 @@ static std::mutex shared_mutex; return nil; } - // input - std::vector predict_input; - for (int j = 0; j < numel; ++j) { - predict_input.push_back(dataPointer[j]); - } + paddle_mobile::framework::Tensor input_tensor; + paddle_mobile::framework::DDim dims = paddle_mobile::framework::make_ddim(dim_vec); + float *input_ptr = input_tensor.mutable_data(dims); + memcpy(input_ptr, dataPointer, + numel * sizeof(float)); - // predict - std::vector cpp_result = pam_->Predict(predict_input, dim_vec); + pam_->Predict(input_tensor); + std::shared_ptr output_tensor = pam_->Fetch(); + + auto output_dims = output_tensor->dims(); + std::vector output_dim_vec = vectorize(output_dims); + NSMutableArray *ocDim = [NSMutableArray array]; + for (int i = 0; i < output_dim_vec.size(); ++i) { + NSNumber *num = [NSNumber numberWithLongLong:output_dim_vec[i]]; + [ocDim addObject:num]; + } - float *output_pointer = new float[cpp_result.size()]; - memcpy(output_pointer, cpp_result.data(), - cpp_result.size() * sizeof(float)); + float *output_pointer = new float[output_tensor->numel()]; + memcpy(output_pointer, output_tensor->data(), + output_tensor->numel() * sizeof(float)); PaddleMobileCPUResult *cpuResult = [[PaddleMobileCPUResult alloc] init]; [cpuResult toSetOutput: output_pointer]; - [cpuResult toSetOutputSize: cpp_result.size()]; + [cpuResult toSetDim: ocDim]; + [cpuResult toSetOutputSize: output_tensor->numel()]; - free(output); CFRelease(cfData); cfData = NULL; @@ -331,8 +349,62 @@ static std::mutex shared_mutex; return [self predict:image dim:dim means:nil scale:1]; } +- (PaddleMobileCPUResult *)fetchOutput{ + if (pam_ && loaded_) { + auto tensorPtr = pam_->Fetch(); + float *output_pointer = new float[tensorPtr->numel()]; + memcpy(output_pointer, tensorPtr->data(), + tensorPtr->numel() * sizeof(float)); + auto dims = tensorPtr->dims(); + std::vector dim_vec = vectorize(dims); + + + NSMutableArray *ocDim = [NSMutableArray array]; + for (int i = 0; i < dim_vec.size(); ++i) { + NSNumber *num = [NSNumber numberWithLongLong:dim_vec[i]]; + [ocDim addObject:num]; + } + + PaddleMobileCPUResult *cpuResult = [[PaddleMobileCPUResult alloc] init]; + [cpuResult toSetOutput: output_pointer]; + [cpuResult toSetDim: ocDim]; + [cpuResult toSetOutputSize: tensorPtr->numel()]; + + return cpuResult; + } + return nil; +} + +- (PaddleMobileCPUResult *)fetchOutputWithKey:(NSString *)key{ + if (pam_ && loaded_ && key.length) { + auto tensorPtr = pam_->Fetch(std::string([key cStringUsingEncoding:NSUTF8StringEncoding])); + float *output_pointer = new float[tensorPtr->numel()]; + memcpy(output_pointer, tensorPtr->data(), + tensorPtr->numel() * sizeof(float)); + + auto dims = tensorPtr->dims(); + std::vector dim_vec = vectorize(dims); + + NSMutableArray *ocDim = [NSMutableArray array]; + for (int i = 0; i < dim_vec.size(); ++i) { + NSNumber *num = [NSNumber numberWithLongLong:dim_vec[i]]; + [ocDim addObject:num]; + } + + PaddleMobileCPUResult *cpuResult = [[PaddleMobileCPUResult alloc] init]; + [cpuResult toSetOutput: output_pointer]; + [cpuResult toSetDim: ocDim]; + [cpuResult toSetOutputSize: tensorPtr->numel()]; + + return cpuResult; + } + return nil; +} + - (void)clear{ - pam_->Clear(); + if (pam_) { + pam_->Clear(); + } } @end -- GitLab