提交 10e64565 编写于 作者: S superjomn

rename OpKernel to KernelLite

上级 29f9aade
......@@ -150,7 +150,7 @@ class KernelBase {
// MKLDNN, plain CUDA C implementations.
template <TargetType Target, PrecisionType Precision,
DataLayoutType DataLayout = DataLayoutType::kNCHW>
class OpKernel : public KernelBase {
class KernelLite : public KernelBase {
public:
// Set runtime context.
void SetContext(std::unique_ptr<KernelContext>&& ctx) { ctx_ = ctx; }
......@@ -166,15 +166,15 @@ class OpKernel : public KernelBase {
void Touch() {}
OpKernel() = default;
virtual ~OpKernel() = default;
KernelLite() = default;
virtual ~KernelLite() = default;
protected:
std::unique_ptr<KernelContext> ctx_;
};
template <TargetType Target, PrecisionType Precision, DataLayoutType DataLayout>
std::string OpKernel<Target, Precision, DataLayout>::name() const {
std::string KernelLite<Target, Precision, DataLayout>::name() const {
return op_type() + ":" + TargetToStr(Target) + "/" +
PrecisionToStr(Precision) + "/" + DataLayoutToStr(DataLayout);
}
......
......@@ -21,7 +21,7 @@ namespace lite {
namespace core {
int test_code{-1};
class SomeKernel : public OpKernel<TARGET(kHost), PRECISION(kFloat)> {
class SomeKernel : public KernelLite<TARGET(kHost), PRECISION(kFloat)> {
public:
void Run() override {
LOG(INFO) << "SomeKernel executed";
......
......@@ -52,7 +52,7 @@ class OpLiteRegistor : public Registor<OpClass> {
template <TargetType Target, PrecisionType Precision, DataLayoutType Layout>
using KernelRegistryForTarget =
Factory<OpKernel<Target, Precision, Layout>, std::unique_ptr<KernelBase>>;
Factory<KernelLite<Target, Precision, Layout>, std::unique_ptr<KernelBase>>;
class KernelRegistry final {
public:
......
......@@ -42,7 +42,7 @@ void CopyToHostSync(void* target, const void* source, size_t size) {
* This kernel copies a tensor from host to CUDA space.
*/
class IoCopyHostToCudaCompute
: public OpKernel<TARGET(kCUDA), PRECISION(kAny), DATALAYOUT(kAny)> {
: public KernelLite<TARGET(kCUDA), PRECISION(kAny), DATALAYOUT(kAny)> {
public:
void Run() override {
auto& param = Param<operators::IoCopyParam>();
......@@ -77,7 +77,7 @@ class IoCopyHostToCudaCompute
* This kernel copies a tensor from CUDA to host space.
*/
class IoCopyCudaToHostCompute
: public OpKernel<TARGET(kCUDA), PRECISION(kAny), DATALAYOUT(kAny)> {
: public KernelLite<TARGET(kCUDA), PRECISION(kAny), DATALAYOUT(kAny)> {
public:
void Run() override {
auto& param = Param<operators::IoCopyParam>();
......
......@@ -30,7 +30,7 @@ void mul_compute(const lite::cuda::Blas<float>& blas, const T* x, int x_h,
nullptr, out, x_h);
}
class MulCompute : public OpKernel<TARGET(kCUDA), PRECISION(kFloat)> {
class MulCompute : public KernelLite<TARGET(kCUDA), PRECISION(kFloat)> {
public:
using param_t = operators::MulParam;
......
......@@ -23,7 +23,7 @@ namespace lite {
namespace kernels {
namespace host {
class FcCompute : public OpKernel<TARGET(kHost), PRECISION(kFloat)> {
class FcCompute : public KernelLite<TARGET(kHost), PRECISION(kFloat)> {
public:
using param_t = operators::FcParam;
......
......@@ -21,7 +21,7 @@ namespace kernels {
namespace host {
class FeedCompute
: public OpKernel<TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny)> {
: public KernelLite<TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny)> {
public:
using param_t = operators::FeedParam;
......
......@@ -21,7 +21,7 @@ namespace kernels {
namespace host {
class FetchCompute
: public OpKernel<TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny)> {
: public KernelLite<TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny)> {
public:
using param_t = operators::FeedParam;
......
......@@ -35,7 +35,7 @@ void mul_compute_eigen(const T* x, int x_h, int x_w, const T* y, int y_h,
Out = X * Y;
}
class MulCompute : public OpKernel<TARGET(kHost), PRECISION(kFloat)> {
class MulCompute : public KernelLite<TARGET(kHost), PRECISION(kFloat)> {
public:
using param_t = operators::MulParam;
......
......@@ -21,7 +21,7 @@ namespace lite {
namespace kernels {
namespace host {
class ReluCompute : public OpKernel<TARGET(kHost), PRECISION(kFloat)> {
class ReluCompute : public KernelLite<TARGET(kHost), PRECISION(kFloat)> {
public:
void Run() override {
auto& theparam = Param<operators::ReluParam>();
......
......@@ -31,7 +31,7 @@ void scale_compute(const T* x, T* out, int size, float scale, float bias,
}
}
class ScaleCompute : public OpKernel<TARGET(kHost), PRECISION(kFloat)> {
class ScaleCompute : public KernelLite<TARGET(kHost), PRECISION(kFloat)> {
public:
using param_t = operators::MulParam;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册