提交 d0de3eac 编写于 作者: qnqinan's avatar qnqinan

update FPGA support op and related files

上级 202d5f29
......@@ -21,7 +21,8 @@ namespace operators {}
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_deconv_add_relu, ops::FusionDeconvAddReluMatcher);
REGISTER_FUSION_MATCHER(fusion_deconv_add_relu,
ops::FusionDeconvAddReluMatcher);
#ifdef PADDLE_MOBILE_CPU
#endif
#ifdef PADDLE_MOBILE_MALI_GPU
......
......@@ -43,7 +43,8 @@ class FusionDeconvAddReluMatcher : public framework::FusionOpMatcher {
};
template <typename DeviceType, typename T>
class FusionDeconvAddReluOp : public framework::OperatorWithKernel<
class FusionDeconvAddReluOp
: public framework::OperatorWithKernel<
DeviceType, FusionDeconvAddReluParam<DeviceType>,
operators::DeconvAddReluKernel<DeviceType, T>> {
public:
......@@ -53,8 +54,8 @@ class FusionDeconvAddReluOp : public framework::OperatorWithKernel<
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<
DeviceType, FusionDeconvAddReluParam<DeviceType>,
operators::DeconvAddReluKernel<DeviceType, T>>(type, inputs, outputs,
attrs, scope) {}
operators::DeconvAddReluKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
void InferShape() const {
auto input = this->param_.Input();
......
......@@ -22,7 +22,8 @@ namespace paddle_mobile {
namespace operators {
template <>
bool DeconvAddReluKernel<FPGA, float>::Init(FusionDeconvAddReluParam<FPGA> *param) {
bool DeconvAddReluKernel<FPGA, float>::Init(
FusionDeconvAddReluParam<FPGA> *param) {
return true;
}
......
......@@ -25,8 +25,9 @@ bool Transpose2Kernel<FPGA, float>::Init(Transpose2Param<FPGA> *param) {
}
template <>
void Transpose2Kernel<FPGA, float>::Compute(const Transpose2Param<FPGA> &param) {
//Transpose2Compute<float>(param);
void Transpose2Kernel<FPGA, float>::Compute(
const Transpose2Param<FPGA> &param) {
// Transpose2Compute<float>(param);
}
} // namespace operators
......
......@@ -2221,7 +2221,7 @@ class ConvTransposeParam : public OpParam {
const Scope &scope) {
filter_ = FilterFrom<GType>(inputs, scope);
input_ = InputFrom<GType>(inputs, scope);
//output_ = OutputFrom<GType>(outputs, scope);
// output_ = OutputFrom<GType>(outputs, scope);
if (outputs.count("Output")) {
output_ = OpParam::OutputFrom<GType>(outputs, scope);
}
......@@ -2270,11 +2270,12 @@ template <typename Dtype>
class FusionDeconvAddParam : public ConvTransposeParam<Dtype> {
typedef typename DtypeTensorTrait<Dtype>::gtype GType;
typedef typename DtypeTensorTrait<Dtype>::rtype RType;
public:
public:
FusionDeconvAddParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
const Scope &scope)
:ConvTransposeParam<Dtype>(inputs, outputs, attrs, scope) {
const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope)
: ConvTransposeParam<Dtype>(inputs, outputs, attrs, scope) {
bias_ = OpParam::InputYFrom<GType>(inputs, scope);
axis_ = OpParam::GetAttr<int>("axis", attrs);
output_ = OpParam::OutFrom<GType>(outputs, scope);
......
......@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */
#ifdef SPLIT_OP
#include <vector>
#include "operators/split_op.h"
#include <vector>
namespace paddle_mobile {
namespace operators {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册