提交 7432e34a 编写于 作者: qnqinan's avatar qnqinan

update format of added fpga ops and kernels

上级 30ca60d7
...@@ -17,26 +17,29 @@ limitations under the License. */ ...@@ -17,26 +17,29 @@ limitations under the License. */
#include "fusion_elementwise_add_relu_op.h" #include "fusion_elementwise_add_relu_op.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename Dtype, typename T> template <typename Dtype, typename T>
void FusionElementwiseAddReluOp<Dtype, T>::InferShape() const { void FusionElementwiseAddReluOp<Dtype, T>::InferShape() const {
auto x_dim = this->param_.InputX()->dims(); auto x_dim = this->param_.InputX()->dims();
this->param_.Out()->Resize(x_dim); this->param_.Out()->Resize(x_dim);
} }
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_elementwise_add_relu, ops::FusionElementwiseAddReluOp); REGISTER_OPERATOR_CPU(fusion_elementwise_add_relu,
ops::FusionElementwiseAddReluOp);
#endif #endif
#ifdef PADDLE_MOBILE_MALI_GPU #ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU(fusion_elementwise_add_relu, ops::FusionElementwiseAddReluOp); REGISTER_OPERATOR_MALI_GPU(fusion_elementwise_add_relu,
ops::FusionElementwiseAddReluOp);
#endif #endif
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA(fusion_elementwise_add_relu, ops::FusionElementwiseAddReluOp); REGISTER_OPERATOR_FPGA(fusion_elementwise_add_relu,
ops::FusionElementwiseAddReluOp);
#endif #endif
#endif #endif
...@@ -22,10 +22,11 @@ limitations under the License. */ ...@@ -22,10 +22,11 @@ limitations under the License. */
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using std::string; using std::string;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class FusionElementwiseAddReluOp : public framework::OperatorWithKernel< class FusionElementwiseAddReluOp
: public framework::OperatorWithKernel<
DeviceType, ElementwiseAddReluParam, DeviceType, ElementwiseAddReluParam,
operators::ElementwiseAddReluKernel<DeviceType, T>> { operators::ElementwiseAddReluKernel<DeviceType, T>> {
public: public:
...@@ -44,8 +45,8 @@ namespace paddle_mobile { ...@@ -44,8 +45,8 @@ namespace paddle_mobile {
void InferShape() const override; void InferShape() const override;
protected: protected:
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#ifdef PADDLE_MOBILE_CPU #ifdef PADDLE_MOBILE_CPU
......
...@@ -21,18 +21,18 @@ limitations under the License. */ ...@@ -21,18 +21,18 @@ limitations under the License. */
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using namespace framework; using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ElementwiseAddReluKernel class ElementwiseAddReluKernel
: public framework::OpKernelBase<DeviceType, ElementwiseAddReluParam> { : public framework::OpKernelBase<DeviceType, ElementwiseAddReluParam> {
public: public:
void Compute(const ElementwiseAddReluParam &param) const; void Compute(const ElementwiseAddReluParam &param) const;
bool Init(ElementwiseAddReluParam *param); bool Init(ElementwiseAddReluParam *param);
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -17,24 +17,24 @@ limitations under the License. */ ...@@ -17,24 +17,24 @@ limitations under the License. */
#include "operators/kernel/dropout_kernel.h" #include "operators/kernel/dropout_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <> template <>
bool DropoutKernel<FPGA, float>::Init(DropoutParam *param) { bool DropoutKernel<FPGA, float>::Init(DropoutParam *param) {
param->Out()->ShareDataWith(*param->InputX()); param->Out()->ShareDataWith(*param->InputX());
return true; return true;
} }
template <> template <>
void DropoutKernel<FPGA, float>::Compute(const DropoutParam &param) const { void DropoutKernel<FPGA, float>::Compute(const DropoutParam &param) const {
//auto *input_x = param.InputX(); // auto *input_x = param.InputX();
//auto *out = param.Out(); // auto *out = param.Out();
//auto input_x_ptr = input_x->data<float>(); // auto input_x_ptr = input_x->data<float>();
//auto out_ptr = out->mutable_data<float>(); // auto out_ptr = out->mutable_data<float>();
//out_ptr = const_cast<float *>(input_x_ptr); // out_ptr = const_cast<float *>(input_x_ptr);
} }
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -17,10 +17,11 @@ limitations under the License. */ ...@@ -17,10 +17,11 @@ limitations under the License. */
#include "fpga/api/fpga_api.h" #include "fpga/api/fpga_api.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <> template <>
bool ElementwiseAddReluKernel<FPGA, float>::Init(ElementwiseAddReluParam *param) { bool ElementwiseAddReluKernel<FPGA, float>::Init(
ElementwiseAddReluParam *param) {
bool relu_enabled = true; bool relu_enabled = true;
const Tensor *input_x = param->InputX(); const Tensor *input_x = param->InputX();
const Tensor *input_y = param->InputY(); const Tensor *input_y = param->InputY();
...@@ -32,31 +33,32 @@ namespace paddle_mobile { ...@@ -32,31 +33,32 @@ namespace paddle_mobile {
ewaddArgs.relu_enabled = relu_enabled; ewaddArgs.relu_enabled = relu_enabled;
ewaddArgs.const0 = 1; ewaddArgs.const0 = 1;
ewaddArgs.const1 = 1; ewaddArgs.const1 = 1;
ewaddArgs.image0.address = (void*)input_x_ptr; ewaddArgs.image0.address = (void *)input_x_ptr;
ewaddArgs.image0.channels = input_x->dims()[1]; ewaddArgs.image0.channels = input_x->dims()[1];
ewaddArgs.image0.scale_address = nullptr;//ew has scale attribute?? ewaddArgs.image0.scale_address = nullptr; // ew has scale attribute??
ewaddArgs.image0.height = input_x->dims()[2]; ewaddArgs.image0.height = input_x->dims()[2];
ewaddArgs.image0.width = input_x->dims()[3]; ewaddArgs.image0.width = input_x->dims()[3];
ewaddArgs.image0.pad_height = 1; ewaddArgs.image0.pad_height = 1;
ewaddArgs.image0.pad_width = 1; ewaddArgs.image0.pad_width = 1;
ewaddArgs.image1.address = (void*)input_y_ptr; ewaddArgs.image1.address = (void *)input_y_ptr;
ewaddArgs.image1.channels = input_y->dims()[1]; ewaddArgs.image1.channels = input_y->dims()[1];
ewaddArgs.image1.scale_address = nullptr;//ew has scale attribute?? ewaddArgs.image1.scale_address = nullptr; // ew has scale attribute??
ewaddArgs.image1.height = input_y->dims()[2]; ewaddArgs.image1.height = input_y->dims()[2];
ewaddArgs.image1.width = input_y->dims()[3]; ewaddArgs.image1.width = input_y->dims()[3];
ewaddArgs.image1.pad_height = 1; ewaddArgs.image1.pad_height = 1;
ewaddArgs.image1.pad_width = 1; ewaddArgs.image1.pad_width = 1;
ewaddArgs.output.scale_address = nullptr; ewaddArgs.output.scale_address = nullptr;
ewaddArgs.output.address = (void*)out_ptr; ewaddArgs.output.address = (void *)out_ptr;
param->SetFpgaArgs(ewaddArgs); param->SetFpgaArgs(ewaddArgs);
return true; return true;
} }
template <> template <>
void ElementwiseAddReluKernel<FPGA, float>::Compute(const ElementwiseAddReluParam &param) const { void ElementwiseAddReluKernel<FPGA, float>::Compute(
const ElementwiseAddReluParam &param) const {
fpga::ComputeFpgaEWAdd(param.FpgaArgs()); fpga::ComputeFpgaEWAdd(param.FpgaArgs());
} }
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -16,10 +16,10 @@ limitations under the License. */ ...@@ -16,10 +16,10 @@ limitations under the License. */
#include "fpga/api/fpga_api.h" #include "fpga/api/fpga_api.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <> template <>
bool FusionFcReluKernel<FPGA, float>::Init(FusionFcReluParam *param) { bool FusionFcReluKernel<FPGA, float>::Init(FusionFcReluParam *param) {
bool relu_enabled = true; bool relu_enabled = true;
bool bn_enabled = false; bool bn_enabled = false;
const Tensor *input_x = param->InputX(); const Tensor *input_x = param->InputX();
...@@ -31,11 +31,10 @@ namespace paddle_mobile { ...@@ -31,11 +31,10 @@ namespace paddle_mobile {
Tensor *out = param->Out(); Tensor *out = param->Out();
auto out_ptr = out->mutable_data<float>(); auto out_ptr = out->mutable_data<float>();
fpga::ConvArgs convArgs; fpga::ConvArgs convArgs;
convArgs.relu_enabled = relu_enabled; convArgs.relu_enabled = relu_enabled;
convArgs.bias_address = (void*)input_z_ptr; convArgs.bias_address = (void *)input_z_ptr;
convArgs.filter_address = (void*)input_y_ptr; convArgs.filter_address = (void *)input_y_ptr;
convArgs.filter_num = out->dims()[1]; convArgs.filter_num = out->dims()[1];
convArgs.group_num = 1; convArgs.group_num = 1;
convArgs.bn.enabled = bn_enabled; convArgs.bn.enabled = bn_enabled;
...@@ -43,24 +42,25 @@ namespace paddle_mobile { ...@@ -43,24 +42,25 @@ namespace paddle_mobile {
convArgs.kernel.stride_h = 1; convArgs.kernel.stride_h = 1;
convArgs.kernel.height = input_x->dims()[2]; convArgs.kernel.height = input_x->dims()[2];
convArgs.kernel.width = input_x->dims()[3]; convArgs.kernel.width = input_x->dims()[3];
convArgs.image.address = (void*)input_x_ptr; convArgs.image.address = (void *)input_x_ptr;
convArgs.image.channels = input_x->dims()[1]; convArgs.image.channels = input_x->dims()[1];
convArgs.image.height = input_x->dims()[2]; convArgs.image.height = input_x->dims()[2];
convArgs.image.width = input_x->dims()[3]; convArgs.image.width = input_x->dims()[3];
convArgs.image.pad_height = 1; convArgs.image.pad_height = 1;
convArgs.image.pad_width = 1; convArgs.image.pad_width = 1;
convArgs.image.scale_address = nullptr;//fc input has scale attribute?? convArgs.image.scale_address = nullptr; // fc input has scale attribute??
convArgs.output.address = (void*)out_ptr; convArgs.output.address = (void *)out_ptr;
convArgs.output.scale_address = nullptr;//fc output has scale attribute?? convArgs.output.scale_address = nullptr; // fc output has scale attribute??
param->SetFpgaArgs(convArgs); param->SetFpgaArgs(convArgs);
return true; return true;
} }
template <> template <>
void FusionFcReluKernel<FPGA, float>::Compute(const FusionFcReluParam &param) const { void FusionFcReluKernel<FPGA, float>::Compute(
const FusionFcReluParam &param) const {
fpga::ComputeFpgaConv(param.FpgaArgs()); fpga::ComputeFpgaConv(param.FpgaArgs());
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -16,10 +16,10 @@ limitations under the License. */ ...@@ -16,10 +16,10 @@ limitations under the License. */
#include "operators/kernel/fusion_fc_kernel.h" #include "operators/kernel/fusion_fc_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <> template <>
bool FusionFcKernel<FPGA, float>::Init(FusionFcParam *param) { bool FusionFcKernel<FPGA, float>::Init(FusionFcParam *param) {
bool relu_enabled = false; bool relu_enabled = false;
bool bn_enabled = false; bool bn_enabled = false;
const Tensor *input_x = param->InputX(); const Tensor *input_x = param->InputX();
...@@ -31,11 +31,10 @@ namespace paddle_mobile { ...@@ -31,11 +31,10 @@ namespace paddle_mobile {
Tensor *out = param->Out(); Tensor *out = param->Out();
auto out_ptr = out->mutable_data<float>(); auto out_ptr = out->mutable_data<float>();
fpga::ConvArgs convArgs; fpga::ConvArgs convArgs;
convArgs.relu_enabled = relu_enabled; convArgs.relu_enabled = relu_enabled;
convArgs.bias_address = (void*)input_z_ptr; convArgs.bias_address = (void *)input_z_ptr;
convArgs.filter_address = (void*)input_y_ptr; convArgs.filter_address = (void *)input_y_ptr;
convArgs.filter_num = out->dims()[1]; convArgs.filter_num = out->dims()[1];
convArgs.group_num = 1; convArgs.group_num = 1;
convArgs.bn.enabled = bn_enabled; convArgs.bn.enabled = bn_enabled;
...@@ -43,24 +42,24 @@ namespace paddle_mobile { ...@@ -43,24 +42,24 @@ namespace paddle_mobile {
convArgs.kernel.stride_h = 1; convArgs.kernel.stride_h = 1;
convArgs.kernel.height = input_x->dims()[2]; convArgs.kernel.height = input_x->dims()[2];
convArgs.kernel.width = input_x->dims()[3]; convArgs.kernel.width = input_x->dims()[3];
convArgs.image.address = (void*)input_x_ptr; convArgs.image.address = (void *)input_x_ptr;
convArgs.image.channels = input_x->dims()[1]; convArgs.image.channels = input_x->dims()[1];
convArgs.image.height = input_x->dims()[2]; convArgs.image.height = input_x->dims()[2];
convArgs.image.width = input_x->dims()[3]; convArgs.image.width = input_x->dims()[3];
convArgs.image.pad_height = 1; convArgs.image.pad_height = 1;
convArgs.image.pad_width = 1; convArgs.image.pad_width = 1;
convArgs.image.scale_address = nullptr;//fc input has scale attribute?? convArgs.image.scale_address = nullptr; // fc input has scale attribute??
convArgs.output.address = (void*)out_ptr; convArgs.output.address = (void *)out_ptr;
convArgs.output.scale_address = nullptr;//fc output has scale attribute?? convArgs.output.scale_address = nullptr; // fc output has scale attribute??
param->SetFpgaArgs(convArgs); param->SetFpgaArgs(convArgs);
return true; return true;
} }
template <> template <>
void FusionFcKernel<FPGA, float>::Compute(const FusionFcParam &param) const { void FusionFcKernel<FPGA, float>::Compute(const FusionFcParam &param) const {
fpga::ComputeFpgaConv(param.FpgaArgs()); fpga::ComputeFpgaConv(param.FpgaArgs());
} }
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
...@@ -18,10 +18,10 @@ limitations under the License. */ ...@@ -18,10 +18,10 @@ limitations under the License. */
class PoolingArgs; class PoolingArgs;
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <> template <>
bool PoolKernel<FPGA, float>::Init(PoolParam *param) { bool PoolKernel<FPGA, float>::Init(PoolParam *param) {
const Tensor *input = param->Input(); const Tensor *input = param->Input();
auto input_ptr = input->data<float>(); auto input_ptr = input->data<float>();
Tensor *output = param->Output(); Tensor *output = param->Output();
...@@ -30,7 +30,7 @@ namespace paddle_mobile { ...@@ -30,7 +30,7 @@ namespace paddle_mobile {
vector<int> strides = param->Strides(); vector<int> strides = param->Strides();
vector<int> paddings = param->Paddings(); vector<int> paddings = param->Paddings();
fpga::PoolingArgs poolArgs; fpga::PoolingArgs poolArgs;
poolArgs.image.address = (void*)input_ptr; poolArgs.image.address = (void *)input_ptr;
poolArgs.image.channels = input->dims()[1]; poolArgs.image.channels = input->dims()[1];
poolArgs.image.height = input->dims()[2]; poolArgs.image.height = input->dims()[2];
poolArgs.image.width = input->dims()[3]; poolArgs.image.width = input->dims()[3];
...@@ -43,15 +43,15 @@ namespace paddle_mobile { ...@@ -43,15 +43,15 @@ namespace paddle_mobile {
poolArgs.kernel.stride_w = strides[1]; poolArgs.kernel.stride_w = strides[1];
param->SetFpgaArgs(poolArgs); param->SetFpgaArgs(poolArgs);
return true; return true;
} }
template <> template <>
void PoolKernel<FPGA, float>::Compute(const PoolParam &param) const { void PoolKernel<FPGA, float>::Compute(const PoolParam &param) const {
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
fpga::ComputeFpgaPool(param.FpgaArgs()); fpga::ComputeFpgaPool(param.FpgaArgs());
#endif #endif
} }
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_FC_RELU_OP
#pragma once
#include "framework/operator.h"
#include "operators/math/math_function.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
template <typename DeviceType, typename T>
class FusionFcReluKernel
: public framework::OpKernelBase<DeviceType, FusionFcReluParam> {
public:
void Compute(const FusionFcReluParam& param) const;
bool Init(FusionFcReluParam* param);
};
} // namespace operators
} // namespace paddle_mobile
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册