提交 24a86e2e 编写于 作者: H Hao Han

performance improvement

上级 7251a92c
......@@ -14,8 +14,6 @@ limitations under the License. */
#ifdef BATCHNORM_OP
#pragma once
#include "operators/kernel/batchnorm_kernel.h"
#ifdef PADDLE_MOBILE_MALI_GPU
#include "acl_operator.h"
......@@ -73,6 +71,7 @@ class AclBatchNormOp : public acl::ACLOperator {
bool Bypass_acl(const BatchNormParam& param) {
bool bypass_acl = false;
AclParametersByContext(param);
InitAclLayer(param);
// for performance, more groups impact GPU performance
if (this->force_bypass_acl_path_) {
bypass_acl = true;
......@@ -137,6 +136,10 @@ bool BatchNormKernel<GPU_MALI, float>::Init(const BatchNormParam& param) const {
acl_op = new AclBatchNormOp<GPU_MALI, float>();
this->SetAclOp((void*)acl_op, (void*)this);
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return false;
}
return true;
}
......@@ -149,15 +152,8 @@ void BatchNormKernel<GPU_MALI, float>::Compute(
if (acl_op == nullptr) {
return;
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return;
}
acl::AclParameters& args = acl_op->getargs();
const float* input_data = (const float*)args.input_data;
const float* output_data = (const float*)args.output_data;
acl_op->InitAclLayer(param);
acl_op->RunAcl((void*)input_data, (void*)output_data);
acl_op->RunAcl(args.input_data, args.output_data);
}
template class BatchNormKernel<GPU_MALI, float>;
......
......@@ -50,8 +50,6 @@ class AclConcatOp : public acl::ACLOperator {
T type;
for (int i = 0; i < input_data->size(); i++) {
const T* idata = (*input_data)[i]->data<T>();
const T* pdata = (*input_data)[i]->data<T>();
int in_batch = (*input_data)[i]->dims()[0];
int in_channels = (*input_data)[i]->dims()[1];
int in_width = (*input_data)[i]->dims()[2];
......@@ -75,6 +73,7 @@ class AclConcatOp : public acl::ACLOperator {
bool Bypass_acl(const ConcatParam& param) {
bool bypass_acl = false;
AclParametersByContext(param);
InitAclLayer(param);
// for performance, more groups impact GPU performance
if (this->force_bypass_acl_path_ || !args.is_channel_concat) {
bypass_acl = true;
......@@ -110,6 +109,10 @@ bool ConcatKernel<GPU_MALI, float>::Init(const ConcatParam& param) const {
acl_op = new AclConcatOp<GPU_MALI, float>();
this->SetAclOp((void*)acl_op, (void*)this);
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return false;
}
return true;
}
......@@ -121,15 +124,8 @@ void ConcatKernel<GPU_MALI, float>::Compute(const ConcatParam& param) const {
if (acl_op == nullptr) {
return;
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return;
}
acl::AclParameters& args = acl_op->getargs();
std::vector<framework::LoDTensor*> temp_data = args.in_tensor;
const float* output_data = (const float*)args.output_data;
acl_op->InitAclLayer(param);
acl_op->RunAcl(temp_data, (void*)output_data);
acl_op->RunAcl(args.in_tensor, args.output_data);
}
template class ConcatKernel<GPU_MALI, float>;
......
......@@ -55,7 +55,8 @@ class AclConvAddOp : public acl::ACLOperator {
set_operator_init_done();
this->force_bypass_acl_path_ = false;
check_direct_conv();
// check_direct_conv();
group() = args.num_group;
//[kernel_x, kernel_y, IFM, OFM]
new_tensor(weights(), weights_shape, args.weight_data);
//[OFM]
......@@ -63,8 +64,6 @@ class AclConvAddOp : public acl::ACLOperator {
new_tensor(biases(), biases_shape, args.biases_data);
}
group() = args.num_group;
//[width, height, IFM]
new_tensor(input(), input_shape, args.input_data);
//[width, height, OFM]
......@@ -79,6 +78,7 @@ class AclConvAddOp : public acl::ACLOperator {
bool Bypass_acl(const FusionConvAddParam& param) {
bool bypass_acl = false;
AclParametersByContext(param);
InitAclLayer(param);
// for performance, more groups impact GPU performance
if (this->force_bypass_acl_path_ || args.num_group >= 5) {
bypass_acl = true;
......@@ -204,6 +204,10 @@ bool ConvAddKernel<GPU_MALI, float>::Init(
acl_op = new AclConvAddOp<GPU_MALI, float>();
this->SetAclOp((void*)acl_op, (void*)this);
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return false;
}
return true;
}
......@@ -216,15 +220,9 @@ void ConvAddKernel<GPU_MALI, float>::Compute(
if (acl_op == nullptr) {
return;
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return;
}
acl::AclParameters& args = acl_op->getargs();
const float* input_data = (const float*)args.input_data;
const float* output_data = (const float*)args.output_data;
acl_op->InitAclLayer(param);
acl_op->RunAcl((void*)input_data, (void*)output_data);
acl_op->RunAcl(args.input_data, args.output_data);
}
template class ConvAddKernel<GPU_MALI, float>;
......
......@@ -79,6 +79,7 @@ class AclConvOp : public acl::ACLOperator {
bool Bypass_acl(const ConvParam& param) {
bool bypass_acl = false;
AclParametersByContext(param);
InitAclLayer(param);
// for performance, more groups impact GPU performance
if (this->force_bypass_acl_path_ || args.num_group >= 5) {
bypass_acl = true;
......@@ -202,6 +203,10 @@ bool ConvKernel<GPU_MALI, float>::Init(const ConvParam& param) const {
acl_op = new AclConvOp<GPU_MALI, float>();
this->SetAclOp((void*)acl_op, (void*)this);
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return false;
}
return true;
}
......@@ -213,15 +218,8 @@ void ConvKernel<GPU_MALI, float>::Compute(const ConvParam& param) const {
if (acl_op == nullptr) {
return;
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return;
}
acl::AclParameters& args = acl_op->getargs();
const float* input_data = (const float*)args.input_data;
const float* output_data = (const float*)args.output_data;
acl_op->InitAclLayer(param);
acl_op->RunAcl((void*)input_data, (void*)output_data);
acl_op->RunAcl(args.input_data, args.output_data);
}
template class ConvKernel<GPU_MALI, float>;
......
......@@ -82,6 +82,7 @@ class AclPoolOp : public acl::ACLOperator {
bool Bypass_acl(const PoolParam& param) {
bool bypass_acl = false;
AclParametersByContext(param);
InitAclLayer(param);
// for performance, more groups impact GPU performance
if (this->force_bypass_acl_path_) {
bypass_acl = true;
......@@ -186,6 +187,10 @@ bool PoolKernel<GPU_MALI, float>::Init(const PoolParam& param) const {
acl_op = new AclPoolOp<GPU_MALI, float>();
this->SetAclOp((void*)acl_op, (void*)this);
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return false;
}
return true;
}
......@@ -197,14 +202,9 @@ void PoolKernel<GPU_MALI, float>::Compute(const PoolParam& param) const {
if (acl_op == nullptr) {
return;
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return;
}
acl::AclParameters& args = acl_op->getargs();
const float* input_data = (const float*)args.input_data;
const float* output_data = (const float*)args.output_data;
acl_op->InitAclLayer(param);
for (int n = 0; n < args.batch; ++n) {
acl_op->RunAcl((void*)input_data, (void*)output_data);
input_data += args.in_depth * args.in_cols * args.in_rows;
......
......@@ -41,10 +41,10 @@ class AclReluOp : public acl::ACLOperator {
acl::AclParameters& getargs() { return args; }
void InitAclLayer(const ReluParam& param) {
setTargetHint(acl::TargetHint::OPENCL);
arm_compute::TensorShape input_shape(args.in_cols * args.in_rows *
args.in_depth * args.batch);
arm_compute::TensorShape output_shape(args.in_cols * args.in_rows *
args.in_depth * args.out_num);
arm_compute::TensorShape input_shape(args.in_cols, args.in_rows,
args.in_depth, args.batch);
arm_compute::TensorShape output_shape(args.in_cols, args.in_rows,
args.in_depth, args.out_num);
// arm_compute::TensorShape weights_shape(
// args.filter_cols, args.filter_rows, args.in_depth, args.out_depth);
// arm_compute::TensorShape biases_shape(args.out_depth);
......@@ -71,6 +71,7 @@ class AclReluOp : public acl::ACLOperator {
bool Bypass_acl(const ReluParam& param) {
bool bypass_acl = false;
AclParametersByContext(param);
InitAclLayer(param);
// for performance, more groups impact GPU performance
if (this->force_bypass_acl_path_) {
bypass_acl = true;
......@@ -106,6 +107,10 @@ bool ReluKernel<GPU_MALI, float>::Init(const ReluParam& param) const {
acl_op = new AclReluOp<GPU_MALI, float>();
this->SetAclOp((void*)acl_op, (void*)this);
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return false;
}
return true;
}
......@@ -117,15 +122,8 @@ void ReluKernel<GPU_MALI, float>::Compute(const ReluParam& param) const {
if (acl_op == nullptr) {
return;
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return;
}
acl::AclParameters& args = acl_op->getargs();
const float* input_data = (const float*)args.input_data;
const float* output_data = (const float*)args.output_data;
acl_op->InitAclLayer(param);
acl_op->RunAcl((void*)input_data, (void*)output_data);
acl_op->RunAcl(args.input_data, args.output_data);
}
template class ReluKernel<GPU_MALI, float>;
......
......@@ -61,6 +61,7 @@ class AclSoftmaxOp : public acl::ACLOperator {
bool Bypass_acl(const SoftmaxParam& param) {
bool bypass_acl = false;
AclParametersByContext(param);
InitAclLayer(param);
// for performance, more groups impact GPU performance
if (this->force_bypass_acl_path_) {
bypass_acl = true;
......@@ -103,6 +104,10 @@ bool SoftmaxKernel<GPU_MALI, float>::Init(const SoftmaxParam& param) const {
acl_op = new AclSoftmaxOp<GPU_MALI, float>();
this->SetAclOp((void*)acl_op, (void*)this);
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return false;
}
return true;
}
......@@ -114,14 +119,10 @@ void SoftmaxKernel<GPU_MALI, float>::Compute(const SoftmaxParam& param) const {
if (acl_op == nullptr) {
return;
}
if (acl_op->Bypass_acl(param)) {
std::cout << "init acl failed" << std::endl;
return;
}
acl::AclParameters& args = acl_op->getargs();
const float* input_data = (const float*)args.input_data;
const float* output_data = (const float*)args.output_data;
acl_op->InitAclLayer(param);
for (int n = 0; n < args.out_num; ++n) {
acl_op->RunAcl((void*)input_data, (void*)output_data);
input_data += args.in_depth;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册