未验证 提交 8eeace69 编写于 作者: Z zhangyang0701 提交者: GitHub

Merge pull request #694 from qnqinan/develop

add FPGA ops and kernels, and update types.cpp file closed #693
...@@ -81,8 +81,8 @@ std::unordered_map< ...@@ -81,8 +81,8 @@ std::unordered_map<
{G_OP_TYPE_FUSION_CONV_ADD_RELU, {{"Input"}, {"Out"}}}, {G_OP_TYPE_FUSION_CONV_ADD_RELU, {{"Input"}, {"Out"}}},
{G_OP_TYPE_IM2SEQUENCE, {{"X"}, {"Out"}}}, {G_OP_TYPE_IM2SEQUENCE, {{"X"}, {"Out"}}},
{G_OP_TYPE_DROPOUT, {{"X"}, {"Out"}}}, {G_OP_TYPE_DROPOUT, {{"X"}, {"Out"}}},
{G_OP_TYPE_FUSION_CONV_ADD_BN, {{"Input"}, {"Out"}}}, {G_OP_TYPE_FUSION_CONV_ADD_BN, {{"Input"}, {"Y"}}},
{G_OP_TYPE_FUSION_POOL_BN, {{"X"}, {"Out"}}}, {G_OP_TYPE_FUSION_POOL_BN, {{"X"}, {"Y"}}},
{G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU, {{"X", "Y"}, {"Out"}}}, {G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU, {{"X", "Y"}, {"Out"}}},
{G_OP_TYPE_FUSION_FC_RELU, {{"X", "Y", "Z"}, {"Out"}}}, {G_OP_TYPE_FUSION_FC_RELU, {{"X", "Y", "Z"}, {"Out"}}},
{G_OP_TYPE_REGION, {{"X"}, {"Out"}}}}; {G_OP_TYPE_REGION, {{"X"}, {"Out"}}}};
......
...@@ -33,6 +33,7 @@ REGISTER_OPERATOR_CPU(dropout, ops::DropoutOp); ...@@ -33,6 +33,7 @@ REGISTER_OPERATOR_CPU(dropout, ops::DropoutOp);
#ifdef PADDLE_MOBILE_MALI_GPU #ifdef PADDLE_MOBILE_MALI_GPU
#endif #endif
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA(dropout, ops::DropoutOp);
#endif #endif
#endif #endif
...@@ -56,6 +56,7 @@ USE_OP_CPU(dropout); ...@@ -56,6 +56,7 @@ USE_OP_CPU(dropout);
#ifdef PADDLE_MOBILE_MALI_GPU #ifdef PADDLE_MOBILE_MALI_GPU
#endif #endif
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
USE_OP_FPGA(dropout);
#endif #endif
#endif #endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_ELEMENTWISEADDRELU_OP
#include "fusion_elementwise_add_relu_op.h"
namespace paddle_mobile {
namespace operators {
template <typename Dtype, typename T>
void FusionElementwiseAddReluOp<Dtype, T>::InferShape() const {
auto x_dim = this->param_.InputX()->dims();
this->param_.Out()->Resize(x_dim);
}
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
// REGISTER_OPERATOR_CPU(fusion_elementwise_add_relu,
// ops::FusionElementwiseAddReluOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
// REGISTER_OPERATOR_MALI_GPU(fusion_elementwise_add_relu,
// ops::FusionElementwiseAddReluOp);
#endif
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA(fusion_elementwise_add_relu,
ops::FusionElementwiseAddReluOp);
#endif
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_ELEMENTWISEADDRELU_OP
#pragma once
#include <string>
#include "framework/operator.h"
#include "framework/program/program-optimize/fusion_op_register.h"
#include "operators/kernel/elementwise_add_relu_kernel.h"
namespace paddle_mobile {
namespace operators {
using std::string;
using std::vector;
class FusioneElementwiseAddReluMatcher : public framework::FusionOpMatcher {
public:
FusioneElementwiseAddReluMatcher() {
node_ = framework::Node(G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU);
node_ > std::make_shared<framework::Node>(G_OP_TYPE_RELU);
}
void FolderNodes(
framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
node->Folder(node_.Depth(), Type(), {}, removed_nodes);
}
std::string Type() { return G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU; }
};
template <typename DeviceType, typename T>
class FusionElementwiseAddReluOp
: public framework::OperatorWithKernel<
DeviceType, ElementwiseAddReluParam,
operators::ElementwiseAddReluKernel<DeviceType, T>> {
public:
FusionElementwiseAddReluOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<
DeviceType, ElementwiseAddReluParam,
operators::ElementwiseAddReluKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
void InferShape() const override;
protected:
};
#ifdef PADDLE_MOBILE_CPU
/*
#ifndef FUSION_ELEMENTWISE_ADD_RELU_REGISTER
static framework::FusionOpRegistrar fusion_elementwise_relu_registrar(
new FusioneElementwiseAddReluMatcher());
#define FUSION_ELEMENTWISE_ADD_RELU_REGISTER
#endif
*/
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
/*
#ifndef FUSION_ELEMENTWISE_ADD_RELU_REGISTER
static framework::FusionOpRegistrar fusion_elementwise_relu_registrar(
new FusioneElementwiseAddReluMatcher());
#define FUSION_ELEMENTWISE_ADD_RELU_REGISTER
#endif
*/
#endif
#ifdef PADDLE_MOBILE_FPGA
#ifndef FUSION_ELEMENTWISE_ADD_RELU_REGISTER
static framework::FusionOpRegistrar fusion_elementwise_relu_registrar(
new FusioneElementwiseAddReluMatcher());
#define FUSION_ELEMENTWISE_ADD_RELU_REGISTER
#endif
} // namespace operators
} // namespace paddle_mobile
#ifdef PADDLE_MOBILE_CPU
USE_OP_CPU(fusion_elementwise_add_relu);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#endif
#ifdef PADDLE_MOBILE_FPGA
USE_OP_FPGA(fusion_elementwise_add_relu);
#endif
#endif
#endif
...@@ -61,6 +61,7 @@ REGISTER_OPERATOR_CPU(fusion_fc, ops::FusionFcOp); ...@@ -61,6 +61,7 @@ REGISTER_OPERATOR_CPU(fusion_fc, ops::FusionFcOp);
REGISTER_OPERATOR_MALI_GPU(fusion_fc, ops::FusionFcOp); REGISTER_OPERATOR_MALI_GPU(fusion_fc, ops::FusionFcOp);
#endif #endif
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA(fusion_fc, ops::FusionFcOp);
#endif #endif
#endif #endif
...@@ -67,8 +67,8 @@ class FusionFcOp ...@@ -67,8 +67,8 @@ class FusionFcOp
#ifdef PADDLE_MOBILE_CPU #ifdef PADDLE_MOBILE_CPU
#ifndef CONV_CPU_REGISTER #ifndef FUSION_FC_CPU_REGISTER
#define CONV_CPU_REGISTER #define FUSION_FC_CPU_REGISTER
static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher()); static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher());
#endif #endif
...@@ -84,6 +84,10 @@ static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher()); ...@@ -84,6 +84,10 @@ static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher());
#endif #endif
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
#ifndef FUSION_FC_CPU_REGISTER
#define FUSION_FC_CPU_REGISTER
static framework::FusionOpRegistrar fc_registrar(new FusionFcMatcher());
#endif
#endif #endif
} // namespace operators } // namespace operators
...@@ -96,6 +100,7 @@ USE_OP_CPU(fusion_fc); ...@@ -96,6 +100,7 @@ USE_OP_CPU(fusion_fc);
USE_OP_MALI_GPU(fusion_fc); USE_OP_MALI_GPU(fusion_fc);
#endif #endif
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
USE_OP_FPGA(fusion_fc);
#endif #endif
#endif #endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_FC_RELU_OP
#include "operators/fusion_fc_relu_op.h"
namespace paddle_mobile {
namespace operators {
template <typename Dtype, typename T>
void FusionFcReluOp<Dtype, T>::InferShape() const {
auto x_dims = this->param_.InputX()->dims();
auto y_dims = this->param_.InputY()->dims();
int x_num_col_dims = this->param_.XNumColDims();
int y_num_col_dims = this->param_.YNumColDims();
assert(x_dims.size() > x_num_col_dims);
assert(y_dims.size() > y_num_col_dims);
/// (1,2,3,4) , x_num_col_dims = 2 -> (2,12)
auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims);
auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims);
assert(x_mat_dims[1] == y_mat_dims[0]);
std::vector<int64_t> output_dims;
output_dims.reserve(
static_cast<size_t>(x_num_col_dims + y_dims.size() - y_num_col_dims));
for (int i = 0; i < x_num_col_dims; ++i) {
output_dims.push_back(x_dims[i]);
}
for (int i = y_num_col_dims; i < y_dims.size(); ++i) {
output_dims.push_back(y_dims[i]);
}
framework::DDim ddim = framework::make_ddim(output_dims);
this->param_.Out()->Resize(ddim);
}
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_fc_relu, ops::FusionFcReluOp);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU(fusion_fc_relu, ops::FusionFcReluOp);
#endif
#ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA(fusion_fc_relu, ops::FusionFcReluOp);
#endif
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_FCRELU_OP
#pragma once
#include <string>
#include <vector>
#include "framework/operator.h"
#include "framework/program/program-optimize/fusion_op_register.h"
#include "operators/kernel/fc_relu_kernel.h"
namespace paddle_mobile {
namespace operators {
using std::string;
using std::vector;
class FusionFcReluMatcher : public framework::FusionOpMatcher {
public:
FusionFcReluMatcher() {
node_ = framework::Node(G_OP_TYPE_MUL);
node_ > std::make_shared<framework::Node>(G_OP_TYPE_ELEMENTWISE_ADD) >
std::make_shared<framework::Node>(G_OP_TYPE_RELU);
}
void FolderNodes(
framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_ELEMENTWISE_ADD, {{"Y", "Z"}}}}, removed_nodes);
}
std::string Type() { return G_OP_TYPE_FUSION_FC_RELU; }
};
template <typename DeviceType, typename T>
class FusionFcReluOp : public framework::OperatorWithKernel<
DeviceType, FusionFcReluParam,
operators::FusionFcReluKernel<DeviceType, T>> {
public:
FusionFcReluOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<
DeviceType, FusionFcReluParam,
operators::FusionFcReluKernel<DeviceType, T>>(type, inputs, outputs,
attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FusionFcReluParam,
operators::FusionFcReluKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
};
#ifdef PADDLE_MOBILE_CPU
#ifndef FUSION_FC_RELU_REGISTER
#define FUSION_FC_RELU_REGISTER
static framework::FusionOpRegistrar fc_relu_registrar(
new FusionFcReluMatcher());
#endif
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
#ifndef FUSION_FC_RELU_REGISTER
#define FUSION_FC_RELU_REGISTER
static framework::FusionOpRegistrar fc_relu_registrar(
new FusionFcReluMatcher());
#endif
#endif
#ifdef PADDLE_MOBILE_FPGA
#ifndef FUSION_FC_RELU_REGISTER
#define FUSION_FC_RELU_REGISTER
static framework::FusionOpRegistrar fc_relu_registrar(
new FusionFcReluMatcher());
#endif
#endif
} // namespace operators
} // namespace paddle_mobile
#ifdef PADDLE_MOBILE_CPU
USE_OP_CPU(fusion_fc_relu);
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
USE_OP_MALI_GPU(fusion_fc_relu);
#endif
#ifdef PADDLE_MOBILE_FPGA
USE_OP_FPGA(fusion_fc_relu);
#endif
#endif // FUSION_FC_RELU_OP
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_ELEMENTWISEADDRELU_OP
#pragma once
#include "framework/operator.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
using namespace framework;
template <typename DeviceType, typename T>
class ElementwiseAddReluKernel
: public framework::OpKernelBase<DeviceType, ElementwiseAddReluParam> {
public:
void Compute(const ElementwiseAddReluParam &param) const;
bool Init(ElementwiseAddReluParam *param);
};
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_FCRELU_OP
#pragma once
#include "framework/operator.h"
#include "operators/math/math_function.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
template <typename DeviceType, typename T>
class FusionFcReluKernel
: public framework::OpKernelBase<DeviceType, FusionFcReluParam> {
public:
void Compute(const FusionFcReluParam& param) const;
bool Init(FusionFcReluParam* param);
};
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef DROPOUT_OP
#include "operators/kernel/dropout_kernel.h"
namespace paddle_mobile {
namespace operators {
template <>
bool DropoutKernel<FPGA, float>::Init(DropoutParam *param) {
param->Out()->ShareDataWith(*param->InputX());
return true;
}
template <>
void DropoutKernel<FPGA, float>::Compute(const DropoutParam &param) const {
// auto *input_x = param.InputX();
// auto *out = param.Out();
// auto input_x_ptr = input_x->data<float>();
// auto out_ptr = out->mutable_data<float>();
// out_ptr = const_cast<float *>(input_x_ptr);
}
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_ELEMENTWISEADDRELU_OP
#include "operators/kernel/elementwise_add_relu_kernel.h"
namespace paddle_mobile {
namespace operators {
template <>
bool ElementwiseAddReluKernel<FPGA, float>::Init(
ElementwiseAddReluParam *param) {
bool relu_enabled = true;
const Tensor *input_x = param->InputX();
const Tensor *input_y = param->InputY();
Tensor *out = param->Out();
auto input_x_ptr = input_x->data<float>();
auto input_y_ptr = input_y->data<float>();
auto out_ptr = out->mutable_data<float>();
fpga::EWAddArgs ewaddArgs;
ewaddArgs.relu_enabled = relu_enabled;
ewaddArgs.const0 = 1;
ewaddArgs.const1 = 1;
ewaddArgs.image0.address = (void *)input_x_ptr;
ewaddArgs.image0.channels = input_x->dims()[1];
ewaddArgs.image0.scale_address =
input_x->fpga_args().scale_pointer(); // ew has scale attribute??
ewaddArgs.image0.height = input_x->dims()[2];
ewaddArgs.image0.width = input_x->dims()[3];
ewaddArgs.image0.pad_height = 0;
ewaddArgs.image0.pad_width = 0;
ewaddArgs.image1.address = (void *)input_y_ptr;
ewaddArgs.image1.channels = input_y->dims()[1];
ewaddArgs.image1.scale_address =
input_y->fpga_args().scale_pointer(); // ew has scale attribute??
ewaddArgs.image1.height = input_y->dims()[2];
ewaddArgs.image1.width = input_y->dims()[3];
ewaddArgs.image1.pad_height = 0;
ewaddArgs.image1.pad_width = 0;
ewaddArgs.output.scale_address = out->fpga_args().scale_pointer();
ewaddArgs.output.address = (void *)out_ptr;
param->SetFpgaArgs(ewaddArgs);
return true;
}
template <>
void ElementwiseAddReluKernel<FPGA, float>::Compute(
const ElementwiseAddReluParam &param) const {
fpga::ComputeFpgaEWAdd(param.FpgaArgs());
}
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_FCRELU_OP
#include "operators/kernel/fc_relu_kernel.h"
#include "fpga/api/fpga_api.h"
namespace paddle_mobile {
namespace operators {
template <>
bool FusionFcReluKernel<FPGA, float>::Init(FusionFcReluParam *param) {
bool relu_enabled = true;
const Tensor *input_x = param->InputX();
auto input_x_ptr = input_x->data<float>();
const Tensor *input_y = param->InputY();
auto input_y_ptr = input_y->data<float>();
const Tensor *input_z = param->InputZ();
auto input_z_ptr = input_z->data<float>();
Tensor *out = param->Out();
auto out_ptr = out->mutable_data<float>();
PADDLE_MOBILE_ENFORCE(input_x->dims()[1] == input_y->dims()[0],
"Image channel should be equal to weight number");
int channel = input_x->dims()[1];
float *bs_ptr = (float *)fpga::fpga_malloc(2 * channel * sizeof(float));
for (int i = 0; i < channel; i++) {
bs_ptr[i * 2] = 1;
bs_ptr[i * 2 + 1] = input_z_ptr[i];
}
fpga::ConvArgs convArgs;
convArgs.relu_enabled = relu_enabled;
convArgs.bias_address = (void *)input_z_ptr;
convArgs.filter_address = (void *)input_y_ptr;
convArgs.filter_num = out->dims()[1];
convArgs.group_num = 1;
convArgs.sb_address = (void *)bs_ptr;
convArgs.kernel.stride_w = 1;
convArgs.kernel.stride_h = 1;
convArgs.kernel.height = input_x->dims()[2];
convArgs.kernel.width = input_x->dims()[3];
convArgs.image.address = (void *)input_x_ptr;
convArgs.image.channels = input_x->dims()[1];
convArgs.image.height = input_x->dims()[2];
convArgs.image.width = input_x->dims()[3];
convArgs.image.pad_height = 0;
convArgs.image.pad_width = 0;
convArgs.image.scale_address =
input_x->fpga_args().scale_pointer(); // fc input has scale attribute??
convArgs.output.address = (void *)out_ptr;
convArgs.output.scale_address =
out->fpga_args().scale_pointer(); // fc output has scale attribute??
param->SetFpgaArgs(convArgs);
return true;
}
template <>
void FusionFcReluKernel<FPGA, float>::Compute(
const FusionFcReluParam &param) const {
fpga::ComputeFpgaConv(param.FpgaArgs());
};
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_FC_OP
#include "operators/kernel/fusion_fc_kernel.h"
namespace paddle_mobile {
namespace operators {
template <>
bool FusionFcKernel<FPGA, float>::Init(FusionFcParam *param) {
bool relu_enabled = false;
const Tensor *input_x = param->InputX();
auto input_x_ptr = input_x->data<float>();
const Tensor *input_y = param->InputY();
auto input_y_ptr = input_y->data<float>();
const Tensor *input_z = param->InputZ();
auto input_z_ptr = input_z->data<float>();
Tensor *out = param->Out();
auto out_ptr = out->mutable_data<float>();
PADDLE_MOBILE_ENFORCE(input_x->dims()[1] == input_y->dims()[0],
"Image channel should be equal to weight number");
int channel = input_x->dims()[1];
float *bs_ptr = (float *)fpga::fpga_malloc(2 * channel * sizeof(float));
for (int i = 0; i < channel; i++) {
bs_ptr[i * 2] = 1;
bs_ptr[i * 2 + 1] = input_z_ptr[i];
}
fpga::ConvArgs convArgs;
convArgs.relu_enabled = relu_enabled;
convArgs.bias_address = (void *)input_z_ptr;
convArgs.filter_address = (void *)input_y_ptr;
convArgs.filter_num = out->dims()[1];
convArgs.group_num = 1;
convArgs.sb_address = (void *)bs_ptr;
convArgs.kernel.stride_w = 1;
convArgs.kernel.stride_h = 1;
convArgs.kernel.height = input_x->dims()[2];
convArgs.kernel.width = input_x->dims()[3];
convArgs.image.address = (void *)input_x_ptr;
convArgs.image.channels = input_x->dims()[1];
convArgs.image.height = input_x->dims()[2];
convArgs.image.width = input_x->dims()[3];
convArgs.image.pad_height = 0;
convArgs.image.pad_width = 0;
convArgs.image.scale_address =
input_x->fpga_args().scale_pointer(); // fc input has scale attribute??
convArgs.output.address = (void *)out_ptr;
convArgs.output.scale_address =
out->fpga_args().scale_pointer(); // fc output has scale attribute??
param->SetFpgaArgs(convArgs);
return true;
}
template <>
void FusionFcKernel<FPGA, float>::Compute(const FusionFcParam &param) const {
fpga::ComputeFpgaConv(param.FpgaArgs());
}
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef POOL_OP
#include "operators/kernel/pool_kernel.h"
class PoolingArgs;
namespace paddle_mobile {
namespace operators {
template <>
bool PoolKernel<FPGA, float>::Init(PoolParam *param) {
const Tensor *input = param->Input();
auto input_ptr = input->data<float>();
Tensor *output = param->Output();
auto output_ptr = output->mutable_data<float>();
vector<int> ksize = param->Ksize();
vector<int> strides = param->Strides();
vector<int> paddings = param->Paddings();
fpga::PoolingArgs poolArgs;
poolArgs.image.address = (void *)input_ptr;
poolArgs.image.channels = input->dims()[1];
poolArgs.image.height = input->dims()[2];
poolArgs.image.width = input->dims()[3];
poolArgs.image.pad_height = paddings[0];
poolArgs.image.pad_width = paddings[1];
poolArgs.output.address = output_ptr;
poolArgs.kernel.height = ksize[0];
poolArgs.kernel.width = ksize[1];
poolArgs.kernel.stride_h = strides[0];
poolArgs.kernel.stride_w = strides[1];
param->SetFpgaArgs(poolArgs);
return true;
}
template <>
void PoolKernel<FPGA, float>::Compute(const PoolParam &param) const {
#ifdef PADDLE_MOBILE_FPGA
fpga::ComputeFpgaPool(param.FpgaArgs());
#endif
}
} // namespace operators
} // namespace paddle_mobile
#endif
...@@ -66,6 +66,7 @@ REGISTER_OPERATOR_CPU(pool2d, ops::PoolOp); ...@@ -66,6 +66,7 @@ REGISTER_OPERATOR_CPU(pool2d, ops::PoolOp);
REGISTER_OPERATOR_MALI_GPU(pool2d, ops::PoolOp); REGISTER_OPERATOR_MALI_GPU(pool2d, ops::PoolOp);
#endif #endif
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
REGISTER_OPERATOR_FPGA(pool2d, ops::PoolOp);
#endif #endif
#endif #endif
...@@ -55,6 +55,7 @@ USE_OP_CPU(pool2d); ...@@ -55,6 +55,7 @@ USE_OP_CPU(pool2d);
USE_OP_MALI_GPU(pool2d); USE_OP_MALI_GPU(pool2d);
#endif #endif
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
USE_OP_FPGA(pool2d);
#endif #endif
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册