提交 9e2dc93d 编写于 作者: R Ray Liu 提交者: GitHub

Merge pull request #1442 from codeWorm2015/develop

fix #1441 add oc interface dim info and multi-output
...@@ -19,10 +19,21 @@ ...@@ -19,10 +19,21 @@
@interface PaddleMobileCPUResult: NSObject @interface PaddleMobileCPUResult: NSObject
/**
@b 输出指针
*/
@property (assign, nonatomic, readonly) float *output; @property (assign, nonatomic, readonly) float *output;
/**
@b 输出的 float 数
* */
@property (assign, nonatomic, readonly) int outputSize; @property (assign, nonatomic, readonly) int outputSize;
/**
@b 维度信息, longlongValue
*/
@property (strong, nonatomic, readonly) NSArray <NSNumber *> *dim;
-(void)releaseOutput; -(void)releaseOutput;
@end @end
...@@ -92,11 +103,6 @@ ...@@ -92,11 +103,6 @@
andModelParamsLen:(size_t)combinedParamsLen andModelParamsLen:(size_t)combinedParamsLen
andCombinedParamsBuf:(const uint8_t *)combinedParamsBuf; andCombinedParamsBuf:(const uint8_t *)combinedParamsBuf;
/*
*
* */
/** /**
@b 对图像进行预处理, 需要外部开辟 output 内存, 外部释放 output 内存, 每一个像素经过这样的预处理 (x + means) * scale, 其中 x 为像素值 @b 对图像进行预处理, 需要外部开辟 output 内存, 外部释放 output 内存, 每一个像素经过这样的预处理 (x + means) * scale, 其中 x 为像素值
...@@ -134,7 +140,7 @@ ...@@ -134,7 +140,7 @@
- (PaddleMobileCPUResult *)predict:(CGImageRef)image dim:(NSArray<NSNumber *> *)dim means:(NSArray<NSNumber *> *)means scale:(float)scale; - (PaddleMobileCPUResult *)predict:(CGImageRef)image dim:(NSArray<NSNumber *> *)dim means:(NSArray<NSNumber *> *)means scale:(float)scale;
/** /**
进行预测, 预处理 means 值为 0, scale 值为 1 @b 进行预测, 预处理 means 值为 0, scale 值为 1
@param image 输入图像 @param image 输入图像
@param dim 输入维度 @param dim 输入维度
...@@ -142,6 +148,22 @@ ...@@ -142,6 +148,22 @@
*/ */
- (PaddleMobileCPUResult *)predict:(CGImageRef)image dim:(NSArray<NSNumber *> *)dim; - (PaddleMobileCPUResult *)predict:(CGImageRef)image dim:(NSArray<NSNumber *> *)dim;
/**
@b 取出模型描述中 key 为 "fetch" 对应的输出
@return 预测结果
*/
- (PaddleMobileCPUResult *)fetchOutput;
/**
@b 当输出为多个时, 可用此函数取出对应的输出
@param key 模型中输出的key
@return 预测结果
*/
- (PaddleMobileCPUResult *)fetchOutputWithKey:(NSString *)key;
/** /**
@b 清理内存 @b 清理内存
*/ */
......
...@@ -43,6 +43,10 @@ ...@@ -43,6 +43,10 @@
_outputSize = outputSize; _outputSize = outputSize;
} }
-(void)toSetDim:(NSArray <NSNumber *> *)dim {
_dim = dim;
}
@end @end
@implementation PaddleMobileCPUConfig @implementation PaddleMobileCPUConfig
...@@ -241,17 +245,22 @@ static std::mutex shared_mutex; ...@@ -241,17 +245,22 @@ static std::mutex shared_mutex;
} }
paddle_mobile::framework::Tensor input_tensor; paddle_mobile::framework::Tensor input_tensor;
paddle_mobile::framework::DDim dims = paddle_mobile::framework::make_ddim(dim_vec); paddle_mobile::framework::DDim dims = paddle_mobile::framework::make_ddim(dim_vec);
float *input_ptr = input_tensor.mutable_data<float>(dims); float *input_ptr = input_tensor.mutable_data<float>(dims);
memcpy(input_ptr, input, memcpy(input_ptr, input,
numel * sizeof(float)); numel * sizeof(float));
pam_->Predict(input_tensor); pam_->Predict(input_tensor);
std::shared_ptr<paddle_mobile::framework::Tensor> output = pam_->Fetch(); std::shared_ptr<paddle_mobile::framework::Tensor> output = pam_->Fetch();
auto output_dims = output->dims();
std::vector<int64_t> output_dim_vec = vectorize(output_dims);
NSMutableArray <NSNumber *> *ocDim = [NSMutableArray array];
for (int i = 0; i < output_dim_vec.size(); ++i) {
NSNumber *num = [NSNumber numberWithLongLong:output_dim_vec[i]];
[ocDim addObject:num];
}
float *output_pointer = new float[output->numel()]; float *output_pointer = new float[output->numel()];
memcpy(output_pointer, output->data<float>(), memcpy(output_pointer, output->data<float>(),
...@@ -259,6 +268,7 @@ static std::mutex shared_mutex; ...@@ -259,6 +268,7 @@ static std::mutex shared_mutex;
PaddleMobileCPUResult *cpuResult = [[PaddleMobileCPUResult alloc] init]; PaddleMobileCPUResult *cpuResult = [[PaddleMobileCPUResult alloc] init];
[cpuResult toSetOutput: output_pointer]; [cpuResult toSetOutput: output_pointer];
[cpuResult toSetDim: ocDim];
[cpuResult toSetOutputSize: output->numel()]; [cpuResult toSetOutputSize: output->numel()];
return cpuResult; return cpuResult;
...@@ -304,23 +314,31 @@ static std::mutex shared_mutex; ...@@ -304,23 +314,31 @@ static std::mutex shared_mutex;
return nil; return nil;
} }
// input paddle_mobile::framework::Tensor input_tensor;
std::vector<float> predict_input; paddle_mobile::framework::DDim dims = paddle_mobile::framework::make_ddim(dim_vec);
for (int j = 0; j < numel; ++j) { float *input_ptr = input_tensor.mutable_data<float>(dims);
predict_input.push_back(dataPointer[j]); memcpy(input_ptr, dataPointer,
} numel * sizeof(float));
// predict pam_->Predict(input_tensor);
std::vector<float> cpp_result = pam_->Predict(predict_input, dim_vec); std::shared_ptr<paddle_mobile::framework::Tensor> output_tensor = pam_->Fetch();
auto output_dims = output_tensor->dims();
std::vector<int64_t> output_dim_vec = vectorize(output_dims);
NSMutableArray <NSNumber *> *ocDim = [NSMutableArray array];
for (int i = 0; i < output_dim_vec.size(); ++i) {
NSNumber *num = [NSNumber numberWithLongLong:output_dim_vec[i]];
[ocDim addObject:num];
}
float *output_pointer = new float[cpp_result.size()]; float *output_pointer = new float[output_tensor->numel()];
memcpy(output_pointer, cpp_result.data(), memcpy(output_pointer, output_tensor->data<float>(),
cpp_result.size() * sizeof(float)); output_tensor->numel() * sizeof(float));
PaddleMobileCPUResult *cpuResult = [[PaddleMobileCPUResult alloc] init]; PaddleMobileCPUResult *cpuResult = [[PaddleMobileCPUResult alloc] init];
[cpuResult toSetOutput: output_pointer]; [cpuResult toSetOutput: output_pointer];
[cpuResult toSetOutputSize: cpp_result.size()]; [cpuResult toSetDim: ocDim];
[cpuResult toSetOutputSize: output_tensor->numel()];
free(output);
CFRelease(cfData); CFRelease(cfData);
cfData = NULL; cfData = NULL;
...@@ -331,8 +349,62 @@ static std::mutex shared_mutex; ...@@ -331,8 +349,62 @@ static std::mutex shared_mutex;
return [self predict:image dim:dim means:nil scale:1]; return [self predict:image dim:dim means:nil scale:1];
} }
- (PaddleMobileCPUResult *)fetchOutput{
if (pam_ && loaded_) {
auto tensorPtr = pam_->Fetch();
float *output_pointer = new float[tensorPtr->numel()];
memcpy(output_pointer, tensorPtr->data<float>(),
tensorPtr->numel() * sizeof(float));
auto dims = tensorPtr->dims();
std::vector<int64_t> dim_vec = vectorize(dims);
NSMutableArray <NSNumber *> *ocDim = [NSMutableArray array];
for (int i = 0; i < dim_vec.size(); ++i) {
NSNumber *num = [NSNumber numberWithLongLong:dim_vec[i]];
[ocDim addObject:num];
}
PaddleMobileCPUResult *cpuResult = [[PaddleMobileCPUResult alloc] init];
[cpuResult toSetOutput: output_pointer];
[cpuResult toSetDim: ocDim];
[cpuResult toSetOutputSize: tensorPtr->numel()];
return cpuResult;
}
return nil;
}
- (PaddleMobileCPUResult *)fetchOutputWithKey:(NSString *)key{
if (pam_ && loaded_ && key.length) {
auto tensorPtr = pam_->Fetch(std::string([key cStringUsingEncoding:NSUTF8StringEncoding]));
float *output_pointer = new float[tensorPtr->numel()];
memcpy(output_pointer, tensorPtr->data<float>(),
tensorPtr->numel() * sizeof(float));
auto dims = tensorPtr->dims();
std::vector<int64_t> dim_vec = vectorize(dims);
NSMutableArray <NSNumber *> *ocDim = [NSMutableArray array];
for (int i = 0; i < dim_vec.size(); ++i) {
NSNumber *num = [NSNumber numberWithLongLong:dim_vec[i]];
[ocDim addObject:num];
}
PaddleMobileCPUResult *cpuResult = [[PaddleMobileCPUResult alloc] init];
[cpuResult toSetOutput: output_pointer];
[cpuResult toSetDim: ocDim];
[cpuResult toSetOutputSize: tensorPtr->numel()];
return cpuResult;
}
return nil;
}
- (void)clear{ - (void)clear{
pam_->Clear(); if (pam_) {
pam_->Clear();
}
} }
@end @end
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册