提交 0575e7d7 编写于 作者: L liuruilong

complie success

上级 4693638f
......@@ -7,7 +7,7 @@ option(DEBUGING "enable debug mode" ON)
option(USE_EXCEPTION "use std exception" OFF)
option(LOG_PROFILE "log profile" OFF)
# select the platform to build
option(CPU "armv7 with neon" OFF)
option(CPU "armv7 with neon" ON)
option(GPU_MALI "mali gpu" OFF)
option(GPU_CL "opencl gpu" ON)
option(FPGA "fpga" OFF)
......@@ -72,8 +72,8 @@ endif()
if (GPU_CL)
add_definitions(-DPADDLE_MOBILE_CL)
include_directories(third_party/opecl/OpenCL-Headers)
link_libraries(third_party/opecl/libOpenCL.so)
link_libraries(/Users/liuruilong/GitHub/paddle-mobile/third_party/opencl/libOpenCL.so)
include_directories(third_party/opencl/OpenCL-Headers)
else()
file(GLOB_RECURSE _tmp_list src/framework/cl/*.cpp src/operators/kernel/cl/*.cpp)
foreach(f ${_tmp_list})
......@@ -82,6 +82,7 @@ else()
file(GLOB_RECURSE _tmp_list_h src/framework/cl/*.h)
foreach(f ${_tmp_list_h})
message(STATUS ${f})
list(REMOVE_ITEM PADDLE_MOBILE_H ${f})
endforeach()
endif()
......
......@@ -65,7 +65,7 @@ bool CLEngine::SetClDeviceId() {
clGetDeviceIDs(platform_, CL_DEVICE_TYPE_GPU, 0, NULL, &numDevices);
if (numDevices > 0) {
devices_ = reinterpret_cast<cl_platform_id *>(
devices_ = reinterpret_cast<cl_device_id *>(
malloc(numDevices * sizeof(cl_device_id)));
status = clGetDeviceIDs(platform_, CL_DEVICE_TYPE_GPU, numDevices, devices_,
NULL);
......
......@@ -38,8 +38,8 @@ class CLEngine {
std::unique_ptr<_cl_command_queue, CLCommQueueDeleter> CreateClCommandQueue() {
cl_int status;
cl_command_queue = clCreateCommandQueue(context_.get(), devices_[0], 0, &status);
std::unique_ptr<_cl_command_queue, CLCommQueueDeleter> command_queue_ptr(cl_command_queue);
cl_command_queue queue = clCreateCommandQueue(context_.get(), devices_[0], 0, &status);
std::unique_ptr<_cl_command_queue, CLCommQueueDeleter> command_queue_ptr(queue);
return std::move(command_queue_ptr);
}
......@@ -67,7 +67,7 @@ class CLEngine {
return std::move(program_ptr);
}
bool CLEngine::BuildProgram(cl_program program) {
bool BuildProgram(cl_program program) {
cl_int status;
status = clBuildProgram(program, 0, 0, "-cl-fast-relaxed-math", 0, 0);
CL_CHECK_ERRORS(status);
......
......@@ -25,6 +25,7 @@ namespace framework {
class CLHelper {
public:
CLHelper() = default;
CLHelper(CLScope *scope): scope_(scope) {
}
......
......@@ -57,7 +57,7 @@ class CLScope {
auto program = CLEngine::Instance()->CreateProgramWith(context_.get(), file_name);
programs_[file_name] = std::move(program);
status_ = clBuildProgram(program, 0, 0, 0, 0, 0);
status_ = clBuildProgram(program.get(), 0, 0, 0, 0, 0);
CL_CHECK_ERRORS(status_);
return program.get();
}
......
......@@ -29,7 +29,6 @@ const char* opencl_error_to_str (cl_int error);
opencl_error_to_str(ERR), __FILE__, __LINE__ \
); \
}
#endif
}
}
......@@ -15,7 +15,9 @@ limitations under the License. */
#pragma once
#include <string>
#include <memory>
#include <tuple>
#include "common/log.h"
#include "common/type_define.h"
#include "framework/op_info.h"
......@@ -90,6 +92,7 @@ class OpRegistry {
const std::string& type, const VariableNameMap& inputs,
const VariableNameMap& outputs, const AttributeMap attrs,
std::shared_ptr<paddle_mobile::framework::Scope> scope) {
auto& info = OpInfoMap<Dtype>::Instance()->Get(type);
auto op = info.Creator()(type, inputs, outputs, attrs, scope);
return std::shared_ptr<OperatorBase<Dtype>>(op);
......
......@@ -56,7 +56,7 @@ template <typename Dtype>
void OperatorBase<Dtype>::CheckAllInputOutputSet() const {}
template <typename Dtype>
void OperatorBase<Dtype>::Run() const {
void OperatorBase<Dtype>::Run() {
RunImpl();
#ifdef PADDLE_MOBILE_DEBUG
DLOG << "-------------" << type_ << "----------------------------";
......@@ -87,6 +87,7 @@ void OperatorBase<Dtype>::Run() const {
template class OperatorBase<CPU>;
template class OperatorBase<FPGA>;
template class OperatorBase<GPU_MALI>;
template class OperatorBase<GPU_CL>;
} // namespace framework
} // namespace paddle_mobile
......@@ -31,9 +31,10 @@ limitations under the License. */
#include "framework/scope.h"
#include "framework/tensor.h"
#include "framework/variable.h"
#ifdef PADDLE_MOBILE_CL
#include "framework/cl/cl_scope.h"
#include "framework/cl/cl_helper.h"
#endif
namespace paddle_mobile {
namespace framework {
using std::string;
......@@ -61,10 +62,10 @@ class OperatorBase {
const VariableNameMap &outputs, const AttributeMap &attrs,
std::shared_ptr<Scope> scope);
virtual ~OperatorBase() {}
void Run() const;
void Run();
std::vector<string> GetOutKeys() const;
std::vector<string> GetInputKeys() const;
virtual void RunImpl() const = 0;
virtual void RunImpl() = 0;
virtual void Init() = 0;
/*
......@@ -114,10 +115,9 @@ class OperatorWithKernel : public OperatorBase<Dtype> {
const VariableNameMap &outputs, const AttributeMap &attrs,
std::shared_ptr<Scope> scope)
: OperatorBase<Dtype>(type, inputs, outputs, attrs, scope),
param_(inputs, outputs, attrs, *scope),
kernel_(scope->GetCLScpoe()) {}
param_(inputs, outputs, attrs, *scope) {}
virtual void RunImpl() const { this->kernel_.Compute(this->param_); }
virtual void RunImpl() { this->kernel_.Compute(this->param_); }
virtual void InferShape() const = 0;
......@@ -143,31 +143,33 @@ class OpKernelBase {
public:
OpKernelBase() = default;
OpKernelBase(CLScope *clscope): cl_helper_(clscope) {
}
// OpKernelBase(CLScope *clscope): cl_helper_(CLHelper(clscope)) {
// }
/*
* @b 所有kernel 需实现 Compute 方法
* @p para 这个参数为 kernel 运算时所需要用到参数组成的一个结构体,
* 所有结构体存在与: paddle-mobile/src/operators/op_param.h
* */
#ifdef PADDLE_MOBILE_MALI_GPU
#ifdef PADDLE_McOBILE_MALI_GPU
OpKernelBase() { acl_op_ = nullptr; }
void *GetAclOp() const { return acl_op_; }
void SetAclOp(void *op, void *ob) const {
reinterpret_cast<OpKernelBase<Dtype, P> *>(ob)->acl_op_ = op;
}
#endif
virtual void Compute(const P &para) const = 0;
virtual void Compute(const P &para) = 0;
virtual bool Init(P *para) { return true; }
virtual ~OpKernelBase() = default;
protected:
// CLHelper cl_helper_;
private:
#ifdef PADDLE_MOBILE_MALI_GPU
void *acl_op_;
#endif
CLHelper cl_helper_;
};
......
......@@ -16,9 +16,9 @@ limitations under the License. */
#include <list>
//#ifdef PADDLE_MOBILE_OCL
#ifdef PADDLE_MOBILE_CL
#include "framework/cl/cl_scope.h"
//#endif
#endif
#include <unordered_map>
#include "variable.h"
......@@ -39,9 +39,9 @@ class Scope {
}
kids_.clear();
//#ifdef PADDLE_MOBILE_OCL
#ifdef PADDLE_MOBILE_CL
delete cl_scope_;
//#endif
#endif
}
......@@ -82,9 +82,11 @@ class Scope {
Variable *FindVarLocally(const std::string &name) const;
#ifdef PADDLE_MOBILE_CL
CLScope *GetCLScpoe() {
return cl_scope_;
}
#endif
private:
// Call Scope::NewScope for a sub-scope.
......@@ -93,9 +95,11 @@ class Scope {
mutable std::unordered_map<std::string, Variable *> vars_;
mutable std::list<Scope *> kids_;
Scope const *parent_{nullptr};
//#ifdef PADDLE_MOBILE_CL
#ifdef PADDLE_MOBILE_CL
CLScope *cl_scope_ = new CLScope();
//#endif
#endif
};
} // namespace framework
} // namespace paddle_mobile
......@@ -720,7 +720,8 @@ void Executor<Dtype, P>::Predict_To(int end) {
#endif
template class Executor<CPU, Precision::FP32>;
template class Executor<GPU_MALI, Precision::FP32>;
template class Executor<FPGA, Precision::FP32>;
template class Executor<GPU_CL, Precision::FP32>;
template class Executor<GPU_MALI, Precision::FP32>;
} // namespace paddle_mobile
......@@ -193,5 +193,6 @@ const framework::Program<Dtype, P> Loader<Dtype, P>::LoadCombinedMemory(
template class Loader<CPU, Precision::FP32>;
template class Loader<FPGA, Precision::FP32>;
template class Loader<GPU_MALI, Precision::FP32>;
template class Loader<GPU_CL, Precision::FP32>;
} // namespace paddle_mobile
......@@ -40,10 +40,6 @@ class BilinearOp : public framework::OperatorWithKernel<
DeviceType, BilinearInterpParam<DeviceType>,
operators::BilinearInterpKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, BilinearInterpParam<DeviceType>,
operators::BilinearInterpKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
};
......
......@@ -39,10 +39,6 @@ class BoxCoderOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, BoxCoderParam<DeviceType>,
operators::BoxCoderKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, BoxCoderParam<DeviceType>,
operators::BoxCoderKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -34,10 +34,6 @@ class ConcatOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, ConcatParam<DeviceType>,
operators::ConcatKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, ConcatParam<DeviceType>,
operators::ConcatKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -34,10 +34,6 @@ class ConvOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, ConvParam<DeviceType>,
operators::ConvKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, ConvParam<DeviceType>,
operators::ConvKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
private:
......
......@@ -37,10 +37,6 @@ class CrfOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, CrfParam<DeviceType>,
operators::CrfKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, CrfParam<DeviceType>,
operators::CrfKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
};
......
......@@ -36,10 +36,6 @@ class DepthwiseConvOp : public framework::OperatorWithKernel<
DeviceType, ConvParam<DeviceType>,
operators::DepthwiseConvKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, ConvParam<DeviceType>,
operators::DepthwiseConvKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
private:
......
......@@ -38,10 +38,6 @@ class DropoutOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, DropoutParam<DeviceType>,
operators::DropoutKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
// using framework::OperatorWithKernel<DeviceType, DropoutParam<DeviceType>,
// operators::DropoutKernel<DeviceType,
// T>>;
void InferShape() const override;
protected:
......
......@@ -37,10 +37,6 @@ class ElementwiseAddOp : public framework::OperatorWithKernel<
DeviceType, ElementwiseAddParam<DeviceType>,
operators::ElementwiseAddKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, ElementwiseAddParam<DeviceType>,
operators::ElementwiseAddKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -74,7 +74,7 @@ class FeedOp : public framework::OperatorBase<DeviceType> {
#else
void Init() {}
void RunImpl() const {
void RunImpl() {
param_.Out()->ShareDataWith(*param_.InputX());
param_.Out()->set_lod(param_.InputX()->lod());
}
......
......@@ -31,7 +31,7 @@ class FetchOp : public framework::OperatorBase<DeviceType> {
: framework::OperatorBase<DeviceType>(type, inputs, outputs, attrs,
scope),
param_(inputs, outputs, attrs, *scope) {}
void RunImpl() const { param_.Out()->ShareDataWith(*param_.InputX()); }
void RunImpl() { param_.Out()->ShareDataWith(*param_.InputX()); }
void Init() {}
......
......@@ -53,10 +53,6 @@ class FlattenOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, FlattenParam<DeviceType>,
operators::FlattenKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FlattenParam<DeviceType>,
operators::FlattenKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
};
......
......@@ -56,10 +56,6 @@ class FusionConvAddOp : public framework::OperatorWithKernel<
FusionConvAddParam<DeviceType>,
operators::ConvAddKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FusionConvAddParam<DeviceType>,
operators::ConvAddKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -67,10 +67,6 @@ class FusionConvAddAddPReluOp
DeviceType, FusionConvAddAddPReluParam<DeviceType>,
operators::ConvAddAddPReluKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FusionConvAddAddPReluParam<DeviceType>,
operators::ConvAddAddPReluKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -66,10 +66,6 @@ class FusionConvAddBNReluOp
DeviceType, FusionConvAddBNReluParam<DeviceType>,
operators::ConvAddBNReluKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FusionConvAddBNReluParam<DeviceType>,
operators::ConvAddBNReluKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -63,9 +63,6 @@ class FusionConvAddPReluOp
operators::ConvAddPReluKernel<DeviceType, T>>(type, inputs, outputs,
attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FusionConvAddPReluParam<DeviceType>,
operators::ConvAddPReluKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -57,9 +57,6 @@ class FusionConvAddReluOp : public framework::OperatorWithKernel<
operators::ConvAddReluKernel<DeviceType, T>>(type, inputs, outputs,
attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FusionConvAddReluParam<DeviceType>,
operators::ConvAddReluKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -71,10 +71,6 @@ class FusionConvBNAddReluOp
DeviceType, FusionConvBNAddReluParam<DeviceType>,
operators::ConvBNAddReluKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FusionConvBNAddReluParam<DeviceType>,
operators::ConvBNAddReluKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -63,10 +63,6 @@ class FusionConvBNReluOp : public framework::OperatorWithKernel<
DeviceType, FusionConvBNReluParam<DeviceType>,
operators::ConvBNReluKernel<DeviceType, T>>(type, inputs, outputs,
attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FusionConvBNReluParam<DeviceType>,
operators::ConvBNReluKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -65,9 +65,6 @@ class FusionDWConvBNReluOp
operators::DWConvBNReluKernel<DeviceType, T>>(type, inputs, outputs,
attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FusionDWConvBNReluParam<DeviceType>,
operators::DWConvBNReluKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -56,10 +56,6 @@ class FusionFcOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, FusionFcParam<DeviceType>,
operators::FusionFcKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FusionFcParam<DeviceType>,
operators::FusionFcKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -56,9 +56,6 @@ class FusionFcReluOp : public framework::OperatorWithKernel<
operators::FusionFcReluKernel<DeviceType, T>>(type, inputs, outputs,
attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, FusionFcReluParam<DeviceType>,
operators::FusionFcReluKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -37,10 +37,6 @@ class GruOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, GruParam<DeviceType>,
operators::GruKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, GruParam<DeviceType>,
operators::GruKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
};
......
......@@ -39,9 +39,6 @@ class Im2SequenceOp : public framework::OperatorWithKernel<
operators::Im2SequenceKernel<DeviceType, T>>(type, inputs, outputs,
attrs, scope) {}
// using framework::OperatorWithKernel<
// DeviceType, Im2SequenceParam<DeviceType>,
// operators::Im2SequenceKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
private:
......
......@@ -27,7 +27,7 @@ bool BatchNormKernel<CPU, float>::Init(BatchNormParam<CPU> *param) {
template <>
void BatchNormKernel<CPU, float>::Compute(
const BatchNormParam<CPU> &param) const {
const BatchNormParam<CPU> &param) {
BatchnormCompute<float>(param);
}
......
......@@ -28,7 +28,7 @@ bool BilinearInterpKernel<CPU, float>::Init(BilinearInterpParam<CPU> *param) {
template <>
void BilinearInterpKernel<CPU, float>::Compute(
const BilinearInterpParam<CPU> &param) const {
const BilinearInterpParam<CPU> &param) {
BilinearInterpCompute<float>(param);
}
......
......@@ -27,7 +27,7 @@ bool BoxCoderKernel<CPU, float>::Init(BoxCoderParam<CPU> *param) {
template <>
void BoxCoderKernel<CPU, float>::Compute(
const BoxCoderParam<CPU> &param) const {
const BoxCoderParam<CPU> &param) {
BoxCoderCompute<float>(param);
}
......
......@@ -26,7 +26,7 @@ bool ConcatKernel<CPU, float>::Init(ConcatParam<CPU> *param) {
}
template <>
void ConcatKernel<CPU, float>::Compute(const ConcatParam<CPU> &param) const {
void ConcatKernel<CPU, float>::Compute(const ConcatParam<CPU> &param) {
ConcatCompute<float>(param);
param.Out()->set_lod(param.Inputs()[0]->lod());
}
......
......@@ -28,7 +28,7 @@ bool ConvAddAddPReluKernel<CPU, float>::Init(
template <>
void ConvAddAddPReluKernel<CPU, float>::Compute(
const FusionConvAddAddPReluParam<CPU> &param) const {
const FusionConvAddAddPReluParam<CPU> &param) {
ConvAddAddPReluCompute<float>(param);
}
template class ConvAddAddPReluKernel<CPU, float>;
......
......@@ -55,7 +55,7 @@ bool ConvAddBNReluKernel<CPU, float>::Init(
template <>
void ConvAddBNReluKernel<CPU, float>::Compute(
const FusionConvAddBNReluParam<CPU> &param) const {
const FusionConvAddBNReluParam<CPU> &param) {
ConvAddBNReluCompute<float>(param);
}
template class ConvAddBNReluKernel<CPU, float>;
......
......@@ -26,7 +26,7 @@ bool ConvAddKernel<CPU, float>::Init(FusionConvAddParam<CPU> *param) {
template <>
void ConvAddKernel<CPU, float>::Compute(
const FusionConvAddParam<CPU> &param) const {
const FusionConvAddParam<CPU> &param) {
ConvAddCompute<float>(param);
}
......
......@@ -27,7 +27,7 @@ bool ConvAddPReluKernel<CPU, float>::Init(FusionConvAddPReluParam<CPU> *param) {
template <>
void ConvAddPReluKernel<CPU, float>::Compute(
const FusionConvAddPReluParam<CPU> &param) const {
const FusionConvAddPReluParam<CPU> &param) {
ConvAddPReluCompute<float>(param);
}
template class ConvAddPReluKernel<CPU, float>;
......
......@@ -27,7 +27,7 @@ bool ConvAddReluKernel<CPU, float>::Init(FusionConvAddReluParam<CPU> *param) {
template <>
void ConvAddReluKernel<CPU, float>::Compute(
const FusionConvAddReluParam<CPU> &param) const {
const FusionConvAddReluParam<CPU> &param) {
ConvAddReluCompute<float>(param);
}
template class ConvAddReluKernel<CPU, float>;
......
......@@ -55,7 +55,7 @@ bool ConvBNAddReluKernel<CPU, float>::Init(
template <>
void ConvBNAddReluKernel<CPU, float>::Compute(
const FusionConvBNAddReluParam<CPU> &param) const {
const FusionConvBNAddReluParam<CPU> &param) {
ConvBNAddReluCompute<float>(param);
}
template class ConvBNAddReluKernel<CPU, float>;
......
......@@ -57,7 +57,7 @@ bool ConvBNReluKernel<CPU, float>::Init(FusionConvBNReluParam<CPU> *param) {
template <>
void ConvBNReluKernel<CPU, float>::Compute(
const FusionConvBNReluParam<CPU> &param) const {
const FusionConvBNReluParam<CPU> &param) {
ConvBNReluCompute<float>(param);
}
template class ConvBNReluKernel<CPU, float>;
......
......@@ -26,7 +26,7 @@ bool ConvKernel<CPU, float>::Init(ConvParam<CPU> *param) {
}
template <>
void ConvKernel<CPU, float>::Compute(const ConvParam<CPU> &param) const {
void ConvKernel<CPU, float>::Compute(const ConvParam<CPU> &param) {
ConvCompute<float>(param);
}
......
......@@ -27,7 +27,7 @@ bool ConvTransposeKernel<CPU, float>::Init(ConvTransposeParam<CPU> *param) {
template <>
void ConvTransposeKernel<CPU, float>::Compute(
const ConvTransposeParam<CPU> &param) const {
const ConvTransposeParam<CPU> &param) {
ConvTransposeCompute<float>(param);
}
......
......@@ -27,7 +27,7 @@ bool CrfKernel<CPU, float>::Init(CrfParam<CPU> *param) {
}
template <>
void CrfKernel<CPU, float>::Compute(const CrfParam<CPU> &param) const {
void CrfKernel<CPU, float>::Compute(const CrfParam<CPU> &param) {
CrfCompute<float>(param);
}
......
......@@ -27,7 +27,7 @@ bool DepthwiseConvKernel<CPU, float>::Init(ConvParam<CPU> *param) {
template <>
void DepthwiseConvKernel<CPU, float>::Compute(
const ConvParam<CPU> &param) const {
const ConvParam<CPU> &param) {
DepthwiseConvCompute<float>(param);
}
......
......@@ -35,7 +35,7 @@ struct DropoutFunctor {
};
template <>
void DropoutKernel<CPU, float>::Compute(const DropoutParam<CPU> &param) const {
void DropoutKernel<CPU, float>::Compute(const DropoutParam<CPU> &param) {
const auto *input_x = param.InputX();
auto *input_x_ptr = input_x->data<float>();
auto *out = param.Out();
......
......@@ -54,7 +54,7 @@ bool DWConvBNReluKernel<CPU, float>::Init(FusionDWConvBNReluParam<CPU> *param) {
template <>
void DWConvBNReluKernel<CPU, float>::Compute(
const FusionDWConvBNReluParam<CPU> &param) const {
const FusionDWConvBNReluParam<CPU> &param) {
DWConvBNReluCompute<float>(param);
}
template class DWConvBNReluKernel<CPU, float>;
......
......@@ -27,7 +27,7 @@ bool ElementwiseAddKernel<CPU, float>::Init(ElementwiseAddParam<CPU> *param) {
template <>
void ElementwiseAddKernel<CPU, float>::Compute(
const ElementwiseAddParam<CPU> &param) const {
const ElementwiseAddParam<CPU> &param) {
ElementwiseAddCompute<float>(param);
param.Out()->set_lod(param.InputX()->lod());
}
......
......@@ -27,7 +27,7 @@ bool FlattenKernel<CPU, float>::Init(FlattenParam<CPU> *param) {
}
template <>
void FlattenKernel<CPU, float>::Compute(const FlattenParam<CPU> &param) const {
void FlattenKernel<CPU, float>::Compute(const FlattenParam<CPU> &param) {
FlattenCompute<float>(param);
}
......
......@@ -27,7 +27,7 @@ bool FusionFcKernel<CPU, float>::Init(FusionFcParam<CPU> *param) {
template <>
void FusionFcKernel<CPU, float>::Compute(
const FusionFcParam<CPU> &param) const {
const FusionFcParam<CPU> &param) {
FusionFcCompute<float>(param);
param.Out()->set_lod(param.InputX()->lod());
}
......
......@@ -26,7 +26,7 @@ bool GruKernel<CPU, float>::Init(GruParam<CPU> *param) {
}
template <>
void GruKernel<CPU, float>::Compute(const GruParam<CPU> &param) const {
void GruKernel<CPU, float>::Compute(const GruParam<CPU> &param) {
GruCompute<float>(param);
param.OutHidden()->set_lod(param.InputInput()->lod());
// DLOG << "________________" << param.OutHidden()->dims();
......
......@@ -33,7 +33,7 @@ inline int Im2SeqOutputSize(int input_size, int filter_size, int padding_0,
template <>
void Im2SequenceKernel<CPU, float>::Compute(
const Im2SequenceParam<CPU> &param) const {
const Im2SequenceParam<CPU> &param) {
const Tensor *in_x = param.Input();
Tensor *out = param.Output();
out->mutable_data<float>();
......
......@@ -25,7 +25,7 @@ bool LookupKernel<CPU, float>::Init(LookupParam<CPU> *param) {
}
template <>
void LookupKernel<CPU, float>::Compute(const LookupParam<CPU> &param) const {
void LookupKernel<CPU, float>::Compute(const LookupParam<CPU> &param) {
LookupCompute<float>(param);
param.Out()->set_lod(param.InputIds()->lod());
}
......
......@@ -26,7 +26,7 @@ bool LrnKernel<CPU, float>::Init(LrnParam<CPU> *param) {
}
template <>
void LrnKernel<CPU, float>::Compute(const LrnParam<CPU> &param) const {
void LrnKernel<CPU, float>::Compute(const LrnParam<CPU> &param) {
LrnCompute<float>(param);
}
......
......@@ -26,7 +26,7 @@ bool MulKernel<CPU, float>::Init(MulParam<CPU> *param) {
}
template <>
void MulKernel<CPU, float>::Compute(const MulParam<CPU> &param) const {
void MulKernel<CPU, float>::Compute(const MulParam<CPU> &param) {
MulCompute<float>(param);
param.Out()->set_lod(param.InputX()->lod());
}
......
......@@ -27,7 +27,7 @@ bool MultiClassNMSKernel<CPU, float>::Init(MultiClassNMSParam<CPU> *param) {
template <>
void MultiClassNMSKernel<CPU, float>::Compute(
const MultiClassNMSParam<CPU> &param) const {
const MultiClassNMSParam<CPU> &param) {
MultiClassNMSCompute<float>(param);
}
......
......@@ -25,7 +25,7 @@ bool PoolKernel<CPU, float>::Init(PoolParam<CPU> *param) {
}
template <>
void PoolKernel<CPU, float>::Compute(const PoolParam<CPU> &param) const {
void PoolKernel<CPU, float>::Compute(const PoolParam<CPU> &param) {
PoolCompute<float>(param);
}
} // namespace operators
......
......@@ -35,7 +35,7 @@ struct PReluFunctor {
* @b 特化到具体平台的实现, param 从 op 层传入
* */
template <>
void PReluKernel<CPU, float>::Compute(const PReluParam<CPU> &param) const {
void PReluKernel<CPU, float>::Compute(const PReluParam<CPU> &param) {
auto *x = param.InputX();
auto *alpha = param.InputAlpha();
auto *out = param.Out();
......
......@@ -27,7 +27,7 @@ bool PriorBoxKernel<CPU, float>::Init(PriorBoxParam<CPU> *param) {
template <>
void PriorBoxKernel<CPU, float>::Compute(
const PriorBoxParam<CPU> &param) const {
const PriorBoxParam<CPU> &param) {
PriorBoxCompute<float>(param);
}
......
......@@ -26,7 +26,7 @@ bool ReluKernel<CPU, float>::Init(ReluParam<CPU> *param) {
}
template <>
void ReluKernel<CPU, float>::Compute(const ReluParam<CPU> &param) const {
void ReluKernel<CPU, float>::Compute(const ReluParam<CPU> &param) {
ReluCompute<float>(param);
}
......
......@@ -26,7 +26,7 @@ bool ReshapeKernel<CPU, float>::Init(ReshapeParam<CPU> *param) {
}
template <>
void ReshapeKernel<CPU, float>::Compute(const ReshapeParam<CPU> &param) const {
void ReshapeKernel<CPU, float>::Compute(const ReshapeParam<CPU> &param) {
ReshapeCompute<float>(param);
}
......
......@@ -108,7 +108,7 @@ void ResizeTensor(const Tensor* src, Tensor* dst) {
}
template <>
void ResizeKernel<CPU, float>::Compute(const ResizeParam<CPU>& param) const {
void ResizeKernel<CPU, float>::Compute(const ResizeParam<CPU>& param) {
const auto* input_x = param.InputX();
const auto& input_x_dims = input_x->dims();
auto* out = param.Out();
......
......@@ -23,7 +23,7 @@ namespace operators {
* @b 特化到具体平台的实现, param 从 op 层传入
* */
template <>
void ScaleKernel<CPU, float>::Compute(const ScaleParam<CPU> &param) const {
void ScaleKernel<CPU, float>::Compute(const ScaleParam<CPU> &param) {
const auto *input_x = param.InputX();
auto *input_x_ptr = input_x->data<float>();
auto *out = param.Out();
......
......@@ -27,7 +27,7 @@ bool ShapeKernel<CPU, float>::Init(ShapeParam<CPU> *param) {
}
template <>
void ShapeKernel<CPU, float>::Compute(const ShapeParam<CPU> &param) const {
void ShapeKernel<CPU, float>::Compute(const ShapeParam<CPU> &param) {
ShapeCompute<float>(param);
}
......
......@@ -32,7 +32,7 @@ bool SigmoidKernel<CPU, float>::Init(SigmoidParam<CPU> *param) {
}
template <>
void SigmoidKernel<CPU, float>::Compute(const SigmoidParam<CPU> &param) const {
void SigmoidKernel<CPU, float>::Compute(const SigmoidParam<CPU> &param) {
SigmoidCompute<float>(param);
}
......
......@@ -26,7 +26,7 @@ bool SoftmaxKernel<CPU, float>::Init(SoftmaxParam<CPU> *param) {
}
template <>
void SoftmaxKernel<CPU, float>::Compute(const SoftmaxParam<CPU> &param) const {
void SoftmaxKernel<CPU, float>::Compute(const SoftmaxParam<CPU> &param) {
SoftmaxCompute<float>(param);
}
......
......@@ -27,7 +27,7 @@ bool SplitKernel<CPU, float>::Init(SplitParam<CPU> *param) {
}
template <>
void SplitKernel<CPU, float>::Compute(const SplitParam<CPU> &param) const {
void SplitKernel<CPU, float>::Compute(const SplitParam<CPU> &param) {
SplitCompute<float>(param);
}
......
......@@ -26,7 +26,7 @@ bool TransposeKernel<CPU, float>::Init(TransposeParam<CPU> *param) {
template <>
void TransposeKernel<CPU, float>::Compute(
const TransposeParam<CPU> &param) const {
const TransposeParam<CPU> &param) {
TransposeCompute<float>(param);
}
......
......@@ -28,7 +28,7 @@ template <typename DeviceType, typename T>
class BatchNormKernel
: public framework::OpKernelBase<DeviceType, BatchNormParam<DeviceType>> {
public:
void Compute(const BatchNormParam<DeviceType> &param) const;
void Compute(const BatchNormParam<DeviceType> &param);
bool Init(BatchNormParam<DeviceType> *param);
};
......
......@@ -29,7 +29,7 @@ class BilinearInterpKernel
: public framework::OpKernelBase<DeviceType,
BilinearInterpParam<DeviceType>> {
public:
void Compute(const BilinearInterpParam<DeviceType>& param) const;
void Compute(const BilinearInterpParam<DeviceType>& param);
bool Init(BilinearInterpParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -29,7 +29,7 @@ template <typename DeviceType, typename T>
class BoxCoderKernel
: public framework::OpKernelBase<DeviceType, BoxCoderParam<DeviceType>> {
public:
void Compute(const BoxCoderParam<DeviceType>& param) const;
void Compute(const BoxCoderParam<DeviceType>& param);
bool Init(BoxCoderParam<DeviceType>* param);
};
} // namespace operators
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_OP
#include "operators/kernel/conv_kernel.h"
#include "operators/kernel/central-arm-func/conv_arm_func.h"
namespace paddle_mobile {
namespace operators {
template <>
bool ConvKernel<GPU_CL, float>::Init(ConvParam<GPU_CL> *param) {
return true;
}
template <>
void ConvKernel<GPU_CL, float>::Compute(const ConvParam<GPU_CL> &param) const {
}
template class ConvKernel<GPU_CL, float>;
} // namespace operators
} // namespace paddle_mobile
#endif
///* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License. */
//
//#ifdef CONV_OP
//
//#include "operators/kernel/conv_kernel.h"
//#include "operators/kernel/central-arm-func/conv_arm_func.h"
//
//namespace paddle_mobile {
//namespace operators {
//
//template <>
//bool ConvKernel<GPU_CL, float>::Init(ConvParam<GPU_CL> *param) {
// this->cl_helper_.AddKernel("conv_3x3", "conv_kernel.cl");
// return true;
//}
//
//template <>
//void ConvKernel<GPU_CL, float>::Compute(const ConvParam<GPU_CL> &param) {
// auto kernel = this->cl_helper_.KernelAt(0);
//
//
//}
//
//template class ConvKernel<GPU_CL, float>;
//
//} // namespace operators
//} // namespace paddle_mobile
//
//#endif
......@@ -27,7 +27,7 @@ template <typename DeviceType, typename T>
class ConcatKernel
: public framework::OpKernelBase<DeviceType, ConcatParam<DeviceType>> {
public:
void Compute(const ConcatParam<DeviceType> &param) const;
void Compute(const ConcatParam<DeviceType> &param);
bool Init(ConcatParam<DeviceType> *param);
};
......
......@@ -35,7 +35,7 @@ template <typename DeviceType, typename T>
class ConvAddAddPReluKernel
: public OpKernelBase<DeviceType, FusionConvAddAddPReluParam<DeviceType>> {
public:
void Compute(const FusionConvAddAddPReluParam<DeviceType> &param) const;
void Compute(const FusionConvAddAddPReluParam<DeviceType> &param);
bool Init(FusionConvAddAddPReluParam<DeviceType> *param);
};
......
......@@ -35,7 +35,7 @@ template <typename DeviceType, typename T>
class ConvAddBNKernel
: public OpKernelBase<DeviceType, FusionConvAddBNParam<DeviceType>> {
public:
void Compute(const FusionConvAddBNParam<DeviceType> &param) const;
void Compute(const FusionConvAddBNParam<DeviceType> &param);
bool Init(FusionConvAddBNParam<DeviceType> *param);
};
......
......@@ -35,7 +35,7 @@ template <typename DeviceType, typename T>
class ConvAddBNReluKernel
: public OpKernelBase<DeviceType, FusionConvAddBNReluParam<DeviceType>> {
public:
void Compute(const FusionConvAddBNReluParam<DeviceType> &param) const;
void Compute(const FusionConvAddBNReluParam<DeviceType> &param);
bool Init(FusionConvAddBNReluParam<DeviceType> *param);
};
......
......@@ -40,7 +40,7 @@ template <typename DeviceType, typename T>
class ConvAddKernel
: public OpKernelBase<DeviceType, FusionConvAddParam<DeviceType>> {
public:
void Compute(const FusionConvAddParam<DeviceType> &param) const;
void Compute(const FusionConvAddParam<DeviceType> &param);
bool Init(FusionConvAddParam<DeviceType> *param);
};
......
......@@ -35,7 +35,7 @@ template <typename DeviceType, typename T>
class ConvAddPReluKernel
: public OpKernelBase<DeviceType, FusionConvAddPReluParam<DeviceType>> {
public:
void Compute(const FusionConvAddPReluParam<DeviceType> &param) const;
void Compute(const FusionConvAddPReluParam<DeviceType> &param);
bool Init(FusionConvAddPReluParam<DeviceType> *param);
};
......
......@@ -35,7 +35,7 @@ template <typename DeviceType, typename T>
class ConvAddReluKernel
: public OpKernelBase<DeviceType, FusionConvAddReluParam<DeviceType>> {
public:
void Compute(const FusionConvAddReluParam<DeviceType> &param) const;
void Compute(const FusionConvAddReluParam<DeviceType> &param);
bool Init(FusionConvAddReluParam<DeviceType> *param);
};
......
......@@ -35,7 +35,7 @@ template <typename DeviceType, typename T>
class ConvBNAddReluKernel
: public OpKernelBase<DeviceType, FusionConvBNAddReluParam<DeviceType>> {
public:
void Compute(const FusionConvBNAddReluParam<DeviceType> &param) const;
void Compute(const FusionConvBNAddReluParam<DeviceType> &param);
bool Init(FusionConvBNAddReluParam<DeviceType> *param);
};
......
......@@ -35,7 +35,7 @@ template <typename DeviceType, typename T>
class ConvBNKernel
: public OpKernelBase<DeviceType, FusionConvBNParam<DeviceType>> {
public:
void Compute(const FusionConvBNParam<DeviceType> &param) const;
void Compute(const FusionConvBNParam<DeviceType> &param);
bool Init(FusionConvBNParam<DeviceType> *param);
};
......
......@@ -35,7 +35,7 @@ template <typename DeviceType, typename T>
class ConvBNReluKernel
: public OpKernelBase<DeviceType, FusionConvBNReluParam<DeviceType>> {
public:
void Compute(const FusionConvBNReluParam<DeviceType> &param) const;
void Compute(const FusionConvBNReluParam<DeviceType> &param);
bool Init(FusionConvBNReluParam<DeviceType> *param);
};
......
......@@ -31,7 +31,7 @@ using framework::OpKernelBase;
template <typename DeviceType, typename T>
class ConvKernel : public OpKernelBase<DeviceType, ConvParam<DeviceType>> {
public:
void Compute(const ConvParam<DeviceType> &param) const;
void Compute(const ConvParam<DeviceType> &param);
bool Init(ConvParam<DeviceType> *param);
};
......
......@@ -28,7 +28,7 @@ template <typename DeviceType, typename T>
class ConvTransposeKernel
: public OpKernelBase<DeviceType, ConvTransposeParam<DeviceType>> {
public:
void Compute(const ConvTransposeParam<DeviceType> &param) const;
void Compute(const ConvTransposeParam<DeviceType> &param);
bool Init(ConvTransposeParam<DeviceType> *param);
};
......
......@@ -28,7 +28,7 @@ template <typename DeviceType, typename T>
class CrfKernel
: public framework::OpKernelBase<DeviceType, CrfParam<DeviceType>> {
public:
void Compute(const CrfParam<DeviceType>& param) const;
void Compute(const CrfParam<DeviceType>& param);
bool Init(CrfParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -31,7 +31,7 @@ template <typename DeviceType, typename T>
class DepthwiseConvKernel
: public OpKernelBase<DeviceType, ConvParam<DeviceType>> {
public:
void Compute(const ConvParam<DeviceType> &param) const;
void Compute(const ConvParam<DeviceType> &param);
bool Init(ConvParam<DeviceType> *param);
};
} // namespace operators
......
......@@ -26,7 +26,7 @@ template <typename DeviceType, typename T>
class DropoutKernel
: public framework::OpKernelBase<DeviceType, DropoutParam<DeviceType>> {
public:
void Compute(const DropoutParam<DeviceType>& param) const;
void Compute(const DropoutParam<DeviceType>& param);
bool Init(DropoutParam<DeviceType>* para);
};
} // namespace operators
......
......@@ -35,7 +35,7 @@ template <typename DeviceType, typename T>
class DWConvBNReluKernel
: public OpKernelBase<DeviceType, FusionDWConvBNReluParam<DeviceType>> {
public:
void Compute(const FusionDWConvBNReluParam<DeviceType> &param) const;
void Compute(const FusionDWConvBNReluParam<DeviceType> &param);
bool Init(FusionDWConvBNReluParam<DeviceType> *param);
};
......
......@@ -30,7 +30,7 @@ class ElementwiseAddKernel
: public framework::OpKernelBase<DeviceType,
ElementwiseAddParam<DeviceType>> {
public:
void Compute(const ElementwiseAddParam<DeviceType> &param) const;
void Compute(const ElementwiseAddParam<DeviceType> &param);
bool Init(ElementwiseAddParam<DeviceType> *param);
};
} // namespace operators
......
......@@ -29,7 +29,7 @@ class ElementwiseAddReluKernel
: public framework::OpKernelBase<DeviceType,
ElementwiseAddReluParam<DeviceType>> {
public:
void Compute(const ElementwiseAddReluParam<DeviceType> &param) const;
void Compute(const ElementwiseAddReluParam<DeviceType> &param);
bool Init(ElementwiseAddReluParam<DeviceType> *param);
};
} // namespace operators
......
......@@ -28,7 +28,7 @@ class FusionFcReluKernel
: public framework::OpKernelBase<DeviceType,
FusionFcReluParam<DeviceType>> {
public:
void Compute(const FusionFcReluParam<DeviceType>& param) const;
void Compute(const FusionFcReluParam<DeviceType>& param);
bool Init(FusionFcReluParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -28,7 +28,7 @@ template <typename DeviceType, typename T>
class FlattenKernel
: public framework::OpKernelBase<DeviceType, FlattenParam<DeviceType>> {
public:
void Compute(const FlattenParam<DeviceType>& param) const;
void Compute(const FlattenParam<DeviceType>& param);
bool Init(FlattenParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -57,7 +57,7 @@ bool ConcatKernel<FPGA, float>::Init(ConcatParam<FPGA> *param) {
}
template <>
void ConcatKernel<FPGA, float>::Compute(const ConcatParam<FPGA> &param) const {
void ConcatKernel<FPGA, float>::Compute(const ConcatParam<FPGA> &param) {
ComputeFPGAConcat(param.FpgaArgs());
}
template class ConcatKernel<FPGA, float>;
......
......@@ -77,7 +77,7 @@ bool ConvAddBNKernel<FPGA, float>::Init(FusionConvAddBNParam<FPGA> *param) {
template <>
void ConvAddBNKernel<FPGA, float>::Compute(
const FusionConvAddBNParam<FPGA> &param) const {
const FusionConvAddBNParam<FPGA> &param) {
fpga::ComputeFpgaConv(param.FpgaArgs());
}
......
......@@ -74,7 +74,7 @@ bool ConvAddBNReluKernel<FPGA, float>::Init(
template <>
void ConvAddBNReluKernel<FPGA, float>::Compute(
const FusionConvAddBNReluParam<FPGA> &param) const {
const FusionConvAddBNReluParam<FPGA> &param) {
fpga::ComputeFpgaConv(param.FpgaArgs());
}
......
......@@ -56,7 +56,7 @@ bool ConvAddReluKernel<FPGA, float>::Init(FusionConvAddReluParam<FPGA> *param) {
template <>
void ConvAddReluKernel<FPGA, float>::Compute(
const FusionConvAddReluParam<FPGA> &param) const {
const FusionConvAddReluParam<FPGA> &param) {
fpga::ComputeFpgaConv(param.FpgaArgs());
}
......
......@@ -68,7 +68,7 @@ bool ConvBNKernel<FPGA, float>::Init(FusionConvBNParam<FPGA> *param) {
template <>
void ConvBNKernel<FPGA, float>::Compute(
const FusionConvBNParam<FPGA> &param) const {
const FusionConvBNParam<FPGA> &param) {
fpga::ComputeFpgaConv(param.FpgaArgs());
}
......
......@@ -68,7 +68,7 @@ bool ConvBNReluKernel<FPGA, float>::Init(FusionConvBNReluParam<FPGA> *param) {
template <>
void ConvBNReluKernel<FPGA, float>::Compute(
const FusionConvBNReluParam<FPGA> &param) const {
const FusionConvBNReluParam<FPGA> &param) {
fpga::ComputeFpgaConv(param.FpgaArgs());
}
......
......@@ -27,7 +27,7 @@ bool DropoutKernel<FPGA, float>::Init(DropoutParam<FPGA> *param) {
template <>
void DropoutKernel<FPGA, float>::Compute(
const DropoutParam<FPGA> &param) const {}
const DropoutParam<FPGA> &param) {}
} // namespace operators
} // namespace paddle_mobile
......
......@@ -56,7 +56,7 @@ bool ElementwiseAddReluKernel<FPGA, float>::Init(
template <>
void ElementwiseAddReluKernel<FPGA, float>::Compute(
const ElementwiseAddReluParam<FPGA> &param) const {
const ElementwiseAddReluParam<FPGA> &param) {
fpga::ComputeFpgaEWAdd(param.FpgaArgs());
}
} // namespace operators
......
......@@ -59,7 +59,7 @@ bool FusionFcReluKernel<FPGA, float>::Init(FusionFcReluParam<FPGA> *param) {
}
template <>
void FusionFcReluKernel<FPGA, float>::Compute(
const FusionFcReluParam<FPGA> &param) const {
const FusionFcReluParam<FPGA> &param) {
fpga::ComputeFpgaConv(param.FpgaArgs());
};
......
......@@ -61,7 +61,7 @@ bool FusionFcKernel<FPGA, float>::Init(FusionFcParam<FPGA> *param) {
template <>
void FusionFcKernel<FPGA, float>::Compute(
const FusionFcParam<FPGA> &param) const {
const FusionFcParam<FPGA> &param) {
fpga::ComputeFpgaConv(param.FpgaArgs());
}
} // namespace operators
......
......@@ -49,7 +49,7 @@ bool PoolKernel<FPGA, float>::Init(PoolParam<FPGA> *param) {
}
template <>
void PoolKernel<FPGA, float>::Compute(const PoolParam<FPGA> &param) const {
void PoolKernel<FPGA, float>::Compute(const PoolParam<FPGA> &param) {
fpga::ComputeFpgaPool(param.FpgaArgs());
}
} // namespace operators
......
......@@ -48,7 +48,7 @@ bool SoftmaxKernel<FPGA, float>::Init(SoftmaxParam<FPGA> *param) {
template <>
void SoftmaxKernel<FPGA, float>::Compute(
const SoftmaxParam<FPGA> &param) const {
const SoftmaxParam<FPGA> &param) {
Tensor *in_x = param.FloatInput();
Tensor *out = param.Out();
......
......@@ -27,7 +27,7 @@ template <typename DeviceType, typename T>
class FusionFcKernel
: public framework::OpKernelBase<DeviceType, FusionFcParam<DeviceType>> {
public:
void Compute(const FusionFcParam<DeviceType>& param) const;
void Compute(const FusionFcParam<DeviceType>& param);
bool Init(FusionFcParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -28,7 +28,7 @@ template <typename DeviceType, typename T>
class GruKernel
: public framework::OpKernelBase<DeviceType, GruParam<DeviceType>> {
public:
void Compute(const GruParam<DeviceType>& param) const;
void Compute(const GruParam<DeviceType>& param);
bool Init(GruParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -29,7 +29,7 @@ template <typename DeviceType, typename T>
class Im2SequenceKernel
: public framework::OpKernelBase<DeviceType, Im2SequenceParam<DeviceType>> {
public:
void Compute(const Im2SequenceParam<DeviceType>& param) const;
void Compute(const Im2SequenceParam<DeviceType>& param);
bool Init(Im2SequenceParam<DeviceType>* para);
};
} // namespace operators
......
......@@ -28,7 +28,7 @@ template <typename DeviceType, typename T>
class LookupKernel
: public framework::OpKernelBase<DeviceType, LookupParam<DeviceType>> {
public:
void Compute(const LookupParam<DeviceType>& param) const;
void Compute(const LookupParam<DeviceType>& param);
bool Init(LookupParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -173,7 +173,7 @@ template <typename DeviceType, typename T>
class LrnKernel
: public framework::OpKernelBase<DeviceType, LrnParam<DeviceType>> {
public:
void Compute(const LrnParam<DeviceType> &param) const;
void Compute(const LrnParam<DeviceType> &param);
bool Init(LrnParam<DeviceType> *param);
};
} // namespace operators
......
......@@ -145,7 +145,7 @@ bool BatchNormKernel<GPU_MALI, float>::Init(BatchNormParam<GPU_MALI>* param) {
template <>
void BatchNormKernel<GPU_MALI, float>::Compute(
const BatchNormParam<GPU_MALI>& param) const {
const BatchNormParam<GPU_MALI>& param) {
std::cout << "init acl" << std::endl;
AclBatchNormOp<GPU_MALI, float>* acl_op =
reinterpret_cast<AclBatchNormOp<GPU_MALI, float>*>(this->GetAclOp());
......
......@@ -118,7 +118,7 @@ bool ConcatKernel<GPU_MALI, float>::Init(ConcatParam<GPU_MALI>* param) {
template <>
void ConcatKernel<GPU_MALI, float>::Compute(
const ConcatParam<GPU_MALI>& param) const {
const ConcatParam<GPU_MALI>& param) {
std::cout << "init acl" << std::endl;
AclConcatOp<GPU_MALI, float>* acl_op =
reinterpret_cast<AclConcatOp<GPU_MALI, float>*>(this->GetAclOp());
......
......@@ -212,7 +212,7 @@ bool ConvAddKernel<GPU_MALI, float>::Init(FusionConvAddParam<GPU_MALI>* param) {
template <>
void ConvAddKernel<GPU_MALI, float>::Compute(
const FusionConvAddParam<GPU_MALI>& param) const {
const FusionConvAddParam<GPU_MALI>& param) {
std::cout << "init acl" << std::endl;
AclConvAddOp<GPU_MALI, float>* acl_op =
reinterpret_cast<AclConvAddOp<GPU_MALI, float>*>(this->GetAclOp());
......
......@@ -212,7 +212,7 @@ bool ConvKernel<GPU_MALI, float>::Init(ConvParam<GPU_MALI>* param) {
template <>
void ConvKernel<GPU_MALI, float>::Compute(
const ConvParam<GPU_MALI>& param) const {
const ConvParam<GPU_MALI>& param) {
std::cout << "init acl" << std::endl;
AclConvOp<GPU_MALI, float>* acl_op =
reinterpret_cast<AclConvOp<GPU_MALI, float>*>(this->GetAclOp());
......
......@@ -34,7 +34,7 @@ bool ElementwiseAddKernel<GPU_MALI, float>::Init(
template <>
void ElementwiseAddKernel<GPU_MALI, float>::Compute(
const ElementwiseAddParam<GPU_MALI> &param) const {
const ElementwiseAddParam<GPU_MALI> &param) {
const Tensor *input_x = param.InputX();
const Tensor *input_y = param.InputY();
Tensor *Out = param.Out();
......
......@@ -26,7 +26,7 @@ bool FusionFcKernel<GPU_MALI, float>::Init(FusionFcParam<GPU_MALI> *param) {
template <>
void FusionFcKernel<GPU_MALI, float>::Compute(
const FusionFcParam<GPU_MALI> &param) const {
const FusionFcParam<GPU_MALI> &param) {
const Tensor *input_x = param.InputX();
const Tensor *input_y = param.InputY();
const Tensor *input_z = param.InputZ();
......
......@@ -128,7 +128,7 @@ bool LrnKernel<GPU_MALI, float>::Init(LrnParam<GPU_MALI>* param) {
template <>
void LrnKernel<GPU_MALI, float>::Compute(
const LrnParam<GPU_MALI>& param) const {
const LrnParam<GPU_MALI>& param) {
std::cout << "init acl" << std::endl;
AclLrnOp<GPU_MALI, float>* acl_op =
reinterpret_cast<AclLrnOp<GPU_MALI, float>*>(this->GetAclOp());
......
......@@ -28,7 +28,7 @@ bool MulKernel<GPU_MALI, float>::Init(MulParam<GPU_MALI> *param) {
template <>
void MulKernel<GPU_MALI, float>::Compute(
const MulParam<GPU_MALI> &param) const {
const MulParam<GPU_MALI> &param) {
const Tensor *input_x = param.InputX();
const Tensor *input_y = param.InputY();
Tensor *out = param.Out();
......
......@@ -196,7 +196,7 @@ bool PoolKernel<GPU_MALI, float>::Init(PoolParam<GPU_MALI>* param) {
template <>
void PoolKernel<GPU_MALI, float>::Compute(
const PoolParam<GPU_MALI>& param) const {
const PoolParam<GPU_MALI>& param) {
std::cout << "init acl" << std::endl;
AclPoolOp<GPU_MALI, float>* acl_op =
reinterpret_cast<AclPoolOp<GPU_MALI, float>*>(this->GetAclOp());
......
......@@ -116,7 +116,7 @@ bool ReluKernel<GPU_MALI, float>::Init(ReluParam<GPU_MALI>* param) {
template <>
void ReluKernel<GPU_MALI, float>::Compute(
const ReluParam<GPU_MALI>& param) const {
const ReluParam<GPU_MALI>& param) {
std::cout << "init acl" << std::endl;
AclReluOp<GPU_MALI, float>* acl_op =
reinterpret_cast<AclReluOp<GPU_MALI, float>*>(this->GetAclOp());
......
......@@ -28,7 +28,7 @@ bool ReshapeKernel<GPU_MALI, float>::Init(ReshapeParam<GPU_MALI> *param) {
template <>
void ReshapeKernel<GPU_MALI, float>::Compute(
const ReshapeParam<GPU_MALI> &param) const {
const ReshapeParam<GPU_MALI> &param) {
const auto *input_x = param.InputX();
const auto &input_x_dims = input_x->dims();
auto *out = param.Out();
......
......@@ -113,7 +113,7 @@ bool SoftmaxKernel<GPU_MALI, float>::Init(SoftmaxParam<GPU_MALI>* param) {
template <>
void SoftmaxKernel<GPU_MALI, float>::Compute(
const SoftmaxParam<GPU_MALI>& param) const {
const SoftmaxParam<GPU_MALI>& param) {
std::cout << "init acl" << std::endl;
AclSoftmaxOp<GPU_MALI, float>* acl_op =
reinterpret_cast<AclSoftmaxOp<GPU_MALI, float>*>(this->GetAclOp());
......
......@@ -29,7 +29,7 @@ template <typename DeviceType, typename T>
class MulKernel
: public framework::OpKernelBase<DeviceType, MulParam<DeviceType>> {
public:
void Compute(const MulParam<DeviceType> &param) const;
void Compute(const MulParam<DeviceType> &param);
bool Init(MulParam<DeviceType> *param);
};
} // namespace operators
......
......@@ -28,7 +28,7 @@ class MultiClassNMSKernel
: public framework::OpKernelBase<DeviceType,
MultiClassNMSParam<DeviceType>> {
public:
void Compute(const MultiClassNMSParam<DeviceType>& param) const;
void Compute(const MultiClassNMSParam<DeviceType>& param);
bool Init(MultiClassNMSParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -26,7 +26,7 @@ using framework::OpKernelBase;
template <typename DeviceType, typename T>
class PoolKernel : public OpKernelBase<DeviceType, PoolParam<DeviceType>> {
public:
void Compute(const PoolParam<DeviceType> &param) const override;
void Compute(const PoolParam<DeviceType> &param);
bool Init(PoolParam<DeviceType> *param);
};
} // namespace operators
......
......@@ -24,7 +24,7 @@ template <typename DeviceType, typename T>
class PReluKernel
: public framework::OpKernelBase<DeviceType, PReluParam<DeviceType>> {
public:
void Compute(const PReluParam<DeviceType>& param) const;
void Compute(const PReluParam<DeviceType>& param);
};
} // namespace operators
} // namespace paddle_mobile
......@@ -54,7 +54,7 @@ template <typename DeviceType, typename T>
class PriorBoxKernel
: public framework::OpKernelBase<DeviceType, PriorBoxParam<DeviceType>> {
public:
void Compute(const PriorBoxParam<DeviceType>& param) const;
void Compute(const PriorBoxParam<DeviceType>& param);
bool Init(PriorBoxParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -27,7 +27,7 @@ template <typename DeviceType, typename T>
class ReluKernel
: public framework::OpKernelBase<DeviceType, ReluParam<DeviceType>> {
public:
void Compute(const ReluParam<DeviceType>& param) const;
void Compute(const ReluParam<DeviceType>& param);
bool Init(ReluParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -71,7 +71,7 @@ template <typename DeviceType, typename T>
class ReshapeKernel
: public framework::OpKernelBase<DeviceType, ReshapeParam<DeviceType>> {
public:
void Compute(const ReshapeParam<DeviceType>& param) const;
void Compute(const ReshapeParam<DeviceType>& param);
bool Init(ReshapeParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -73,7 +73,7 @@ template <typename DeviceType, typename T>
class ResizeKernel
: public framework::OpKernelBase<DeviceType, ResizeParam<DeviceType>> {
public:
void Compute(const ResizeParam<DeviceType> &param) const;
void Compute(const ResizeParam<DeviceType> &param);
};
} // namespace operators
} // namespace paddle_mobile
......
......@@ -24,7 +24,7 @@ template <typename DeviceType, typename T>
class ScaleKernel
: public framework::OpKernelBase<DeviceType, ScaleParam<DeviceType>> {
public:
void Compute(const ScaleParam<DeviceType>& param) const;
void Compute(const ScaleParam<DeviceType>& param);
};
} // namespace operators
} // namespace paddle_mobile
......@@ -28,7 +28,7 @@ template <typename DeviceType, typename T>
class ShapeKernel
: public framework::OpKernelBase<DeviceType, ShapeParam<DeviceType>> {
public:
void Compute(const ShapeParam<DeviceType>& param) const;
void Compute(const ShapeParam<DeviceType>& param);
bool Init(ShapeParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -26,7 +26,7 @@ template <typename DeviceType, typename T>
class SigmoidKernel
: public OpKernelBase<DeviceType, SigmoidParam<DeviceType>> {
public:
void Compute(const SigmoidParam<DeviceType>& param) const override;
void Compute(const SigmoidParam<DeviceType>& param);
bool Init(SigmoidParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -24,7 +24,7 @@ template <typename DeviceType, typename T>
class SliceKernel
: public framework::OpKernelBase<DeviceType, SliceParam<DeviceType>> {
public:
void Compute(const SliceParam<DeviceType>& param) const {}
void Compute(const SliceParam<DeviceType>& param) {}
};
} // namespace operators
} // namespace paddle_mobile
......@@ -27,7 +27,7 @@ template <typename DeviceType, typename T>
class SoftmaxKernel
: public OpKernelBase<DeviceType, SoftmaxParam<DeviceType>> {
public:
void Compute(const SoftmaxParam<DeviceType> &param) const override;
void Compute(const SoftmaxParam<DeviceType> &param);
bool Init(SoftmaxParam<DeviceType> *param);
};
} // namespace operators
......
......@@ -28,7 +28,7 @@ template <typename DeviceType, typename T>
class SplitKernel
: public framework::OpKernelBase<DeviceType, SplitParam<DeviceType>> {
public:
void Compute(const SplitParam<DeviceType>& param) const;
void Compute(const SplitParam<DeviceType>& param);
bool Init(SplitParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -28,7 +28,7 @@ template <typename DeviceType, typename T>
class TransposeKernel
: public framework::OpKernelBase<DeviceType, TransposeParam<DeviceType>> {
public:
void Compute(const TransposeParam<DeviceType>& param) const;
void Compute(const TransposeParam<DeviceType>& param);
bool Init(TransposeParam<DeviceType>* param);
};
} // namespace operators
......
......@@ -37,10 +37,6 @@ class LookupOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, LookupParam<DeviceType>,
operators::LookupKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, LookupParam<DeviceType>,
operators::LookupKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
};
......
......@@ -35,10 +35,6 @@ class LrnOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, LrnParam<DeviceType>,
operators::LrnKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, LrnParam<DeviceType>,
operators::LrnKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -35,10 +35,6 @@ class MulOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, MulParam<DeviceType>,
operators::MulKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, MulParam<DeviceType>,
operators::MulKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -40,10 +40,6 @@ class MultiClassNMSOp : public framework::OperatorWithKernel<
DeviceType, MultiClassNMSParam<DeviceType>,
operators::MultiClassNMSKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, MultiClassNMSParam<DeviceType>,
operators::MultiClassNMSKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -38,9 +38,6 @@ class PoolOp : public OperatorWithKernel<DeviceType, PoolParam<DeviceType>,
: OperatorWithKernel<DeviceType, PoolParam<DeviceType>,
operators::PoolKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using OperatorWithKernel<
DeviceType, PoolParam<DeviceType>,
operators::PoolKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
private:
......
......@@ -38,10 +38,6 @@ class PReluOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, PReluParam<DeviceType>,
operators::PReluKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, PReluParam<DeviceType>,
operators::PReluKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -40,9 +40,6 @@ class PriorBoxOp : public framework::OperatorWithKernel<
operators::PriorBoxKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, PriorBoxParam<DeviceType>,
operators::PriorBoxKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -41,10 +41,6 @@ class ReluOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, ReluParam<DeviceType>,
operators::ReluKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, ReluParam<DeviceType>,
operators::ReluKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -39,10 +39,6 @@ class ReshapeOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, ReshapeParam<DeviceType>,
operators::ReshapeKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, ReshapeParam<DeviceType>,
operators::ReshapeKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -38,10 +38,6 @@ class ResizeOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, ResizeParam<DeviceType>,
operators::ResizeKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, ResizeParam<DeviceType>,
operators::ResizeKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -38,10 +38,6 @@ class ScaleOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, ScaleParam<DeviceType>,
operators::ScaleKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, ScaleParam<DeviceType>,
operators::ScaleKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -38,10 +38,6 @@ class ShapeOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, ShapeParam<DeviceType>,
operators::ShapeKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, ShapeParam<DeviceType>,
operators::ShapeKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
};
......
......@@ -36,11 +36,6 @@ class SigmoidOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, SigmoidParam<DeviceType>,
operators::SigmoidKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, SigmoidParam<DeviceType>,
operators::SigmoidKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
};
} // namespace operators
......
......@@ -38,10 +38,6 @@ class SliceOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, SliceParam<DeviceType>,
operators::SliceKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, SliceParam<DeviceType>,
operators::SliceKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
protected:
......
......@@ -36,11 +36,6 @@ class SoftmaxOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, SoftmaxParam<DeviceType>,
operators::SoftmaxKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, SoftmaxParam<DeviceType>,
operators::SoftmaxKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
private:
......
......@@ -38,10 +38,6 @@ class SplitOp : public framework::OperatorWithKernel<
: framework::OperatorWithKernel<DeviceType, SplitParam<DeviceType>,
operators::SplitKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, SplitParam<DeviceType>,
operators::SplitKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
};
} // namespace operators
......
......@@ -40,10 +40,6 @@ class TransposeOp : public framework::OperatorWithKernel<
DeviceType, TransposeParam<DeviceType>,
operators::TransposeKernel<DeviceType, T>>(type, inputs, outputs,
attrs, scope) {}
using framework::OperatorWithKernel<
DeviceType, TransposeParam<DeviceType>,
operators::TransposeKernel<DeviceType, T>>::OperatorWithKernel;
void InferShape() const override;
};
......
......@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "../../src/operators/kernel/sigmoid_kernel.h"
#include "../../src/operators/kernel/central-arm-func/sigmoid_arm_func.h"
#include "../test_helper.h"
#include "io/executor.h"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册