提交 b6533389 编写于 作者: Y Yuan Shuai 提交者: GitHub

[LITE][PROFILE] Add ops calc. of operators for profiler. test=develop (#3680)

* add calltime/gops for summary. test=develop

* fix act case for kunk. test=develop
上级 77d07df4
......@@ -24,10 +24,16 @@ namespace profile {
namespace {
auto op_comp = [](const OpCharacter& c1, const OpCharacter& c2) {
if (c1.kernel_func_name == "NotImpl" && c2.kernel_func_name == "NotImpl") {
return (c1.target < c2.target) || (c1.op_type < c2.op_type) ||
(c1.kernel_name < c2.kernel_name) || (c1.remark < c2.remark);
} else { // compare with ch.kernel_func_name
return (c1.target < c2.target) || (c1.op_type < c2.op_type) ||
(c1.kernel_name < c2.kernel_name) ||
(c1.kernel_func_name < c2.kernel_func_name);
}
};
}
} // namespace
std::map<Type, std::string> TypeStr{
{Type::kUnk, "Unknown"},
......@@ -88,6 +94,36 @@ void Profiler::StopTiming(Type type, const int index, KernelContext* ctx) {
#endif
}
int Profiler::GetKernelFuncCalledTimes(const std::string& op_type,
const std::string& kernel_func_name) {
int count = 0;
for (size_t i = 0; i < units_.size(); ++i) {
if ((units_[i].character.kernel_func_name == kernel_func_name) &&
(units_[i].character.kernel_func_name != "NotImpl")) {
++count;
} else if ((units_[i].character.kernel_func_name == "NotImpl") &&
(units_[i].character.op_type == op_type)) {
++count;
}
}
return count;
}
float Profiler::GetKernelFuncSummaryGOPs(const std::string& op_type,
const std::string& kernel_func_name) {
float GOPs = 0;
for (size_t i = 0; i < units_.size(); ++i) {
if ((units_[i].character.kernel_func_name == kernel_func_name) &&
(units_[i].character.kernel_func_name != "NotImpl")) {
GOPs += units_[i].character.macs;
} else if ((units_[i].character.kernel_func_name == "NotImpl") &&
(units_[i].character.op_type == op_type)) {
GOPs += units_[i].character.macs;
}
}
return GOPs * 1e-9f;
}
std::string Profiler::Summary(Type type, bool concise, size_t w) {
using std::setw;
using std::left;
......@@ -108,13 +144,11 @@ std::string Profiler::Summary(Type type, bool concise, size_t w) {
<< " warm-ups =====" << std::endl;
}
ss << setw(20) << left << "OperatorType"
<< " " << setw(30) << left << "KerneAttr";
<< " " << setw(30) << left << "KerneAttr(Place)"
<< " " << setw(24) << left << "KernelFuncName";
if (!concise) {
ss << " " << setw(24) << left << "KernelName";
}
ss << " " << setw(26) << left << "Remark";
if (!concise) {
ss << " " << setw(15) << left << "InDim"
ss << " " << setw(26) << left << "Remark"
<< " " << setw(15) << left << "InDim"
<< " " << setw(15) << left << "FilterDim"
<< " " << setw(15) << left << "OutDim";
}
......@@ -124,10 +158,13 @@ std::string Profiler::Summary(Type type, bool concise, size_t w) {
if (!concise) {
ss << " " << setw(7) << left << "Last(ms)";
}
ss << " " << setw(7) << left << "Avg(%)";
ss << " " << setw(7) << left << "Avg(%)"
<< " " << setw(7) << left << "GOPs";
if (!concise) {
ss << " " << setw(7) << left << "GOPs"
<< " " << setw(7) << left << "GOPS";
ss << " " << setw(7) << left << "GOPS";
}
if (concise) {
ss << " " << setw(11) << left << "CalledTimes";
}
#ifdef LITE_WITH_OPENCL
ss << " " << setw(9) << left << "clAvg(ms)"
......@@ -185,14 +222,20 @@ std::string Profiler::Summary(Type type, bool concise, size_t w) {
// clang-format off
ss << setw(20) << left << fixed << item.first.op_type
<< " " << setw(30) << left << fixed << item.first.kernel_attr
<< " " << setw(26) << left << fixed << item.first.remark
<< " " << setw(24) << left << fixed << item.first.kernel_func_name
<< " " << setw(7) << left << fixed << setprecision(3)
<< item.second.avg
<< " " << setw(7) << left << fixed << setprecision(3)
<< item.second.min
<< " " << setw(7) << left << fixed << setprecision(3)
<< item.second.max
<< " " << setprecision(2) << percent << "% ";
<< " " << setprecision(2) << percent << "% "
<< " " << setw(7) << left << fixed << setprecision(3)
<< GetKernelFuncSummaryGOPs(item.first.op_type,
item.first.kernel_func_name)
<< " " << setw(11) << left << fixed
<< GetKernelFuncCalledTimes(item.first.op_type,
item.first.kernel_func_name);
#ifdef LITE_WITH_OPENCL
float cl_percent = 0;
if (cl_total > 0) {
......@@ -204,7 +247,7 @@ std::string Profiler::Summary(Type type, bool concise, size_t w) {
<< item.second.cl_min
<< " " << setw(9) << left << fixed << setprecision(3)
<< item.second.cl_max
<< " " << left << fixed <<setprecision(2) << cl_percent << "% ";
<< " " << left << fixed << setprecision(2) << cl_percent << "% ";
#endif
ss << std::endl;
// clang-format on
......@@ -253,7 +296,7 @@ std::string Profiler::Summary(Type type, bool concise, size_t w) {
<< " " << setw(7) << left << fixed << setprecision(3) << times.Max(w)
<< " " << setw(7) << left << fixed << setprecision(3) << times.Last(w)
<< " " << left << setprecision(2) << percent << "% "
<< " " << setw(7) << left << fixed << setprecision(2)
<< " " << setw(7) << left << fixed << setprecision(3)
<< 1e-9f * unit.Character().macs
<< " " << setw(7) << left << fixed << setprecision(2)
<< 1e-6f * unit.Character().macs / times.Avg(w);
......
......@@ -101,6 +101,10 @@ class Profiler final {
void StartTiming(Type type, const int index, KernelContext* ctx);
void StopTiming(Type type, const int index, KernelContext* ctx);
std::string Summary(Type type, bool concise = true, size_t warm_up = 10);
int GetKernelFuncCalledTimes(const std::string& op_type,
const std::string& kernel_func_name);
float GetKernelFuncSummaryGOPs(const std::string& op_type,
const std::string& kernel_func_name);
OpCharacter* GetOpCharacter(const size_t index);
private:
......
......@@ -15,6 +15,9 @@
#pragma once
#include <string>
#include "lite/core/op_lite.h"
#ifdef LITE_WITH_PROFILE
#include "lite/api/paddle_place.h"
#endif
namespace paddle {
namespace lite {
......@@ -34,6 +37,58 @@ class ActivationOp : public OpLite {
std::string DebugString() const override { return "activation_op"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter* ch) {
auto input_dims = param_.X->dims();
auto output_dims = param_.Out->dims();
ch->input_shape = ch->DimToStr(input_dims);
ch->output_shape = ch->DimToStr(output_dims);
ch->remark = ActivationTypeToStr(param_.active_type);
switch (param_.active_type) {
case lite_api::ActivationType::kRelu:
ch->macs = param_.X->numel();
break;
case lite_api::ActivationType::kRelu6:
ch->macs = param_.X->numel() * 2.0;
break;
case lite_api::ActivationType::kLeakyRelu:
ch->macs = param_.X->numel() * 2.0;
break;
case lite_api::ActivationType::kPRelu:
ch->macs = param_.X->numel() * 2.0;
break;
case lite_api::ActivationType::kSwish:
ch->macs = param_.X->numel() * 4.0;
break;
case lite_api::ActivationType::kSigmoid:
ch->macs = param_.X->numel() * 3.0;
break;
case lite_api::ActivationType::kTanh:
ch->macs = param_.X->numel() * 5.0;
break;
case lite_api::ActivationType::kExp:
ch->macs = param_.X->numel();
break;
case lite_api::ActivationType::kAbs:
ch->macs = param_.X->numel();
break;
case lite_api::ActivationType::kHardSwish:
ch->macs = param_.X->numel() * 5.0;
break;
case lite_api::ActivationType::kReciprocal:
ch->macs = param_.X->numel();
break;
case lite_api::ActivationType::kIndentity:
break;
default:
LOG(FATAL) << "This Type of Activation:"
<< static_cast<int>(param_.active_type)
<< ActivationTypeToStr(param_.active_type)
<< " doesn't support";
}
}
#endif
private:
mutable operators::ActivationParam param_;
};
......
......@@ -39,6 +39,17 @@ class AffineChannelOpLite : public OpLite {
std::string DebugString() const override { return "affine_channel"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
auto input_dims = param_.X->dims();
auto output_dims = param_.Out->dims();
ch->input_shape = ch->DimToStr(input_dims);
ch->output_shape = ch->DimToStr(output_dims);
ch->remark = param_.data_layout;
ch->macs = param_.X->numel() * 2.0;
}
#endif
private:
mutable AffineChannelParam param_;
};
......
......@@ -39,6 +39,27 @@ class ArgmaxOpLite : public OpLite {
std::string DebugString() const override { return "argmax"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
auto input_dims = param_.X->dims();
auto output_dims = param_.Out->dims();
ch->input_shape = ch->DimToStr(input_dims);
ch->output_shape = ch->DimToStr(output_dims);
ch->remark = "axis" + std::to_string(param_.Axis);
auto axis = param_.Axis;
if (axis < 0) {
axis += input_dims.size();
}
int max_num = 1;
for (int64_t i = axis + 1; i < input_dims.size(); i++)
max_num *= input_dims[i];
float gops = 1.0f;
for (int i = 1; i <= max_num; i++) gops *= i;
ch->macs = gops * output_dims.production();
}
#endif
private:
mutable ArgmaxParam param_;
};
......
......@@ -37,6 +37,17 @@ class AssignOpLite : public OpLite {
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "assign"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
auto input_dims = param_.X->dims();
auto output_dims = param_.Out->dims();
ch->input_shape = ch->DimToStr(input_dims);
ch->output_shape = ch->DimToStr(output_dims);
// ch->remark = "";
ch->macs = param_.X->numel() * 1.0;
}
#endif
private:
mutable AssignParam param_;
};
......
......@@ -39,6 +39,17 @@ class AssignValueOpLite : public OpLite {
std::string DebugString() const override { return "assign value"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
// auto input_dims = param_.X->dims();
auto output_dims = param_.Out->dims();
// ch->input_shape = ch->DimToStr(input_dims);
ch->output_shape = ch->DimToStr(output_dims);
ch->remark = "dtype" + std::to_string(param_.dtype);
ch->macs = param_.Out->numel() * 1.0;
}
#endif
private:
mutable AssignValueParam param_;
};
......
......@@ -39,6 +39,17 @@ class AxpyOpLite : public OpLite {
std::string DebugString() const override { return "axpy"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
auto input_dims = param_.X->dims();
auto output_dims = param_.Out->dims();
ch->input_shape = ch->DimToStr(input_dims);
ch->output_shape = ch->DimToStr(output_dims);
// ch->remark = "";
ch->macs = param_.X->numel() * 2.0;
}
#endif
private:
mutable AxpyParam param_;
};
......
......@@ -37,6 +37,17 @@ class BatchNormOp : public OpLite {
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "batch_norm"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
auto input_dims = param_.x->dims();
auto output_dims = param_.y->dims();
ch->input_shape = ch->DimToStr(input_dims);
ch->output_shape = ch->DimToStr(output_dims);
// ch->remark = "";
ch->macs = param_.y->numel() * 2.0;
}
#endif
private:
mutable BatchNormParam param_;
};
......
......@@ -39,6 +39,17 @@ class BoxClipOpLite : public OpLite {
std::string DebugString() const override { return "box clip"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
auto input_dims = param_.Input->dims();
auto output_dims = param_.Output->dims();
ch->input_shape = ch->DimToStr(input_dims);
ch->output_shape = ch->DimToStr(output_dims);
// ch->remark = "";
ch->macs = param_.Output->numel() * 2.0;
}
#endif
private:
mutable BoxClipParam param_;
};
......
......@@ -34,8 +34,21 @@ class BoxCoderOpLite : public OpLite {
bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "box_coder"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
// auto input_dims = param_.Input->dims();
// auto output_dims = param_.Output->dims();
// ch->input_shape = ch->DimToStr(input_dims);
// ch->output_shape = ch->DimToStr(output_dims);
ch->remark = "proposals" + std::to_string(param_.proposals->dims()[0]) +
"x" + std::to_string(param_.proposals->dims()[1]);
ch->macs = param_.proposals->dims()[0] * param_.proposals->dims()[1] * 30.f;
}
#endif
private:
mutable BoxCoderParam param_;
};
......
......@@ -50,6 +50,17 @@ class CalibOpLite : public OpLite {
std::string DebugString() const override { return "calib"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
auto input_dims = param_.input->dims();
auto output_dims = param_.output->dims();
ch->input_shape = ch->DimToStr(input_dims);
ch->output_shape = ch->DimToStr(output_dims);
ch->remark = "scale" + std::to_string(param_.scale);
ch->macs = param_.output->numel() * 1.0f;
}
#endif
private:
mutable CalibParam param_;
};
......
......@@ -38,6 +38,18 @@ class CompareOp : public OpLite {
std::string DebugString() const override { return "binary logical"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
auto output_dims = param_.Out->dims();
ch->input_shape = "X:" + ch->DimToStr(param_.X->dims()) + "Y:" +
ch->DimToStr(param_.Y->dims());
ch->output_shape = ch->DimToStr(output_dims);
ch->remark = "axis" + std::to_string(param_.axis) + "force_cpu" +
std::to_string(param_.force_cpu);
ch->macs = param_.Out->numel() * 1.0f;
}
#endif
private:
mutable CompareParam param_;
};
......
......@@ -37,6 +37,21 @@ class ConcatOpLite : public OpLite {
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "concat"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
auto output_dims = param_.output->dims();
std::string inputs_shape = "";
for (size_t i = 0; i < param_.x.size(); ++i) {
inputs_shape += ch->DimToStr(param_.x[i]->dims());
if (i != param_.x.size() - 1) inputs_shape += "/";
}
ch->input_shape = inputs_shape;
ch->output_shape = ch->DimToStr(output_dims);
ch->remark = "axis" + std::to_string(param_.axis);
ch->macs = 0.f; // no calc. only io operation
}
#endif
private:
mutable ConcatParam param_;
};
......
......@@ -21,6 +21,9 @@
#include "lite/core/tensor.h"
#include "lite/operators/op_params.h"
#include "lite/utils/all.h"
#ifdef LITE_WITH_PROFILE
#include "lite/api/paddle_place.h"
#endif
namespace paddle {
namespace lite {
......@@ -42,6 +45,29 @@ class ConvTransposeOpLite : public OpLite {
std::string DebugString() const override { return "conv_transpose"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
auto filter_dims = param_.filter->dims();
auto input_dims = param_.x->dims();
auto output_dims = param_.output->dims();
ch->input_shape = ch->DimToStr(input_dims);
ch->output_shape = ch->DimToStr(output_dims);
ch->filter_shape = ch->DimToStr(filter_dims);
ch->remark =
std::to_string(filter_dims[2]) + "x" + std::to_string(filter_dims[3]) +
"p" + std::to_string((*param_.paddings)[0]) + "s" +
std::to_string(param_.strides[0]) + "g" +
std::to_string(param_.groups) + "d" +
std::to_string((*param_.dilations)[0]) + (param_.bias ? "Bias" : "") +
ActivationTypeToStr(param_.activation_param.active_type);
// MACs = 2.f * kw * kh * batchsize * out_c * out_h * out_w * in_c / group
// GMACs = 1e-9f * MACs
// GMACPS = 1e-6f * MACs / predict_ms
ch->macs = 2.f * filter_dims[2] * filter_dims[3] *
output_dims.production() * input_dims[1] / param_.groups;
}
#endif
private:
mutable ConvParam param_;
std::string padding_algorithm_{""};
......
......@@ -35,6 +35,17 @@ class ElementwiseOp : public OpLite {
std::string DebugString() const override { return "elementwise_op"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter* ch) {
auto output_dims = param_.Out->dims();
ch->input_shape = "X" + ch->DimToStr(param_.X->dims()) + "Y" +
ch->DimToStr(param_.Y->dims());
ch->output_shape = ch->DimToStr(output_dims);
ch->remark = "axis" + std::to_string(param_.axis);
ch->macs = 1.0f * param_.Out->numel();
}
#endif
private:
mutable operators::ElementwiseParam param_;
};
......
......@@ -43,6 +43,17 @@ class FcOpLite : public OpLite {
std::string DebugString() const override { return "fc"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
auto m = param_.input->dims().count(0, param_.in_num_col_dims);
ch->input_shape = ch->DimToStr(param_.input->dims());
ch->filter_shape = ch->DimToStr(param_.w->dims());
ch->output_shape = ch->DimToStr(param_.output->dims());
ch->remark = (param_.bias ? "Bias" : "") + param_.activation_type;
ch->macs = m * param_.w->dims()[0] * param_.w->dims()[1] * 3.0f;
}
#endif
private:
mutable FcParam param_;
};
......
......@@ -38,6 +38,15 @@ class IncrementOp : public OpLite {
std::string DebugString() const override { return "increment"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.X->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
ch->remark = "step" + std::to_string(param_.step);
ch->macs = param_.X->numel() * 1.0f;
}
#endif
private:
mutable IncrementParam param_;
};
......
......@@ -36,8 +36,22 @@ class InstanceNormOp : public OpLite {
bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "instance_norm"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.x->dims());
ch->output_shape = ch->DimToStr(param_.out->dims());
// ch->remark = "";
auto x_dims = param_.x->dims();
auto nc = x_dims[0] * x_dims[1];
auto hw = x_dims[2] * x_dims[3];
auto nchw = x_dims.production();
ch->macs = 5.f * nchw + 3.f * (nc + hw);
}
#endif
private:
mutable InstanceNormParam param_;
};
......
......@@ -36,8 +36,18 @@ class InterpolateOp : public OpLite {
bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "interpolate"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.X->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
ch->remark = param_.interp_method;
ch->macs = param_.Out->numel() * 14.f;
}
#endif
private:
mutable InterpolateParam param_;
};
......
......@@ -38,6 +38,15 @@ class LayerNormOp : public OpLite {
std::string DebugString() const override { return "layer_norm"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.X->dims());
ch->output_shape = ch->DimToStr(param_.Y->dims());
ch->remark = "begin_norm_axis" + std::to_string(param_.begin_norm_axis);
ch->macs = param_.Y->numel() * 7.f;
}
#endif
private:
mutable LayerNormParam param_;
};
......
......@@ -38,6 +38,16 @@ class BinaryLogicalOp : public OpLite {
std::string DebugString() const override { return "binary logical"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = "X" + ch->DimToStr(param_.X->dims()) + "Y" +
ch->DimToStr(param_.Y->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
// ch->remark = "";
ch->macs = param_.Out->numel() * 3.f;
}
#endif
private:
mutable LogicalParam param_;
};
......@@ -57,6 +67,16 @@ class UnaryLogicalOp : public OpLite {
std::string DebugString() const override { return "binary logical"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = "X" + ch->DimToStr(param_.X->dims()) + "Y" +
ch->DimToStr(param_.Y->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
// ch->remark = "";
ch->macs = param_.Out->numel() * 3.f;
}
#endif
private:
mutable LogicalParam param_;
};
......
......@@ -33,8 +33,18 @@ class LrnOpLite : public OpLite {
bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "lrn"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.X->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
ch->remark = "n" + std::to_string(param_.n) + param_.norm_region;
ch->macs = param_.Out->numel() * param_.k * 2.f;
}
#endif
private:
mutable LrnParam param_;
};
......
......@@ -41,6 +41,31 @@ class MatMulOpLite : public OpLite {
std::string DebugString() const override { return "matmul"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.X->dims());
ch->filter_shape = ch->DimToStr(param_.Y->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
ch->remark = "alpha" + std::to_string(param_.alpha) + "trans_x" +
std::to_string(param_.transpose_X) + "trans_y" +
std::to_string(param_.transpose_Y);
auto x_dims = param_.X->dims();
auto y_dims = param_.Y->dims();
auto m = x_dims[x_dims.size() - 2];
auto k = x_dims[x_dims.size() - 1];
auto n = y_dims[y_dims.size() - 1];
if (param_.transpose_X) {
m = x_dims[x_dims.size() - 1];
k = x_dims[x_dims.size() - 2];
}
if (param_.transpose_Y) {
n = y_dims[y_dims.size() - 2];
}
ch->macs = 3.f * m * n * k;
}
#endif
private:
mutable MatMulParam param_;
};
......
......@@ -35,6 +35,15 @@ class MeanOp : public OpLite {
std::string DebugString() const override { return "mean"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.X->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
// ch->remark = "";
ch->macs = param_.X->numel() * 1.f;
}
#endif
private:
mutable operators::MeanParam param_;
};
......
......@@ -63,6 +63,20 @@ class MulOpLite : public OpLite {
std::string DebugString() const override { return "mul"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.x->dims());
ch->filter_shape = ch->DimToStr(param_.y->dims());
ch->output_shape = ch->DimToStr(param_.output->dims());
// ch->remark = "";
auto x_dims = param_.x->dims();
auto y_dims = param_.y->dims();
auto x_mat_dims = x_dims.Flatten2D(param_.x_num_col_dims);
auto y_mat_dims = y_dims.Flatten2D(param_.y_num_col_dims);
ch->macs = 1.f * x_mat_dims[0] * x_mat_dims[1] * y_mat_dims[1];
}
#endif
private:
mutable MulParam param_;
};
......
......@@ -35,8 +35,18 @@ class NegativeOpLite : public OpLite {
bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "negative"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.X->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
// ch->remark = "";
ch->macs = 1.f * param_.Out->numel();
}
#endif
private:
mutable NegativeParam param_;
};
......
......@@ -36,8 +36,18 @@ class PowerOp : public OpLite {
bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "power"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.X->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
// ch->remark = "";
ch->macs = param_.Out->numel() * 3.0f;
}
#endif
private:
mutable PowerParam param_;
};
......
......@@ -32,8 +32,29 @@ class ReduceMaxOp : public OpLite {
bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "reduce_max"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.X->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
ch->remark = "keep_dim" + std::to_string(param_.keep_dim);
auto dims = param_.dim;
auto in_sum = param_.X->numel();
if (dims.size() == 0 || dims.size() == 1) {
ch->macs = 1.f * in_sum;
} else if (dims.size() == 2) {
ch->macs = 2.f * in_sum;
} else {
LOG(FATAL) << "This dims size of ReduceMaxParm: " << dims.size()
<< " doesn't support";
ch->macs = 0.f;
}
}
#endif
private:
mutable ReduceMaxParam param_;
};
......
......@@ -26,14 +26,41 @@ namespace operators {
class ReduceMeanOp : public OpLite {
public:
ReduceMeanOp() {}
explicit ReduceMeanOp(const std::string &op_type) : OpLite(op_type) {}
bool CheckShape() const override;
bool InferShapeImpl() const override;
bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "reduce_mean"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.X->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
ch->remark = "keep_dim" + std::to_string(param_.keep_dim);
auto dims = param_.dim;
auto in_sum = param_.X->numel();
if (dims.size() == 0) {
ch->macs = 1.f * in_sum;
} else if (dims.size() == 1) {
ch->macs = 2.f * in_sum;
} else if (dims.size() == 2) {
ch->macs = 4.f * in_sum;
} else {
LOG(FATAL) << "This dims size of ReduceMean: " << dims.size()
<< " doesn't support";
ch->macs = 0.f;
}
}
#endif
private:
mutable ReduceMeanParam param_;
};
......
......@@ -37,6 +37,27 @@ class ReduceProdOpLite : public OpLite {
std::string DebugString() const override { return "reduce_prod"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.x->dims());
ch->output_shape = ch->DimToStr(param_.output->dims());
ch->remark = "keep_dim" + std::to_string(param_.keep_dim) + "reduce_all" +
std::to_string(param_.reduce_all);
auto dims = param_.dim;
auto in_sum = param_.x->numel();
if (dims.size() == 0 || dims.size() == 1) {
ch->macs = 1.f * in_sum;
} else if (dims.size() == 2) {
ch->macs = 2.f * in_sum;
} else {
LOG(FATAL) << "This dims size of ReduceProd: " << dims.size()
<< " doesn't support";
ch->macs = 0.f;
}
}
#endif
private:
mutable ReduceParam param_;
};
......
......@@ -18,6 +18,9 @@
#include "lite/core/op_lite.h"
#include "lite/core/scope.h"
#include "lite/utils/all.h"
#ifdef LITE_WITH_PROFILE
#include "lite/api/paddle_place.h"
#endif
namespace paddle {
namespace lite {
......@@ -35,8 +38,61 @@ class ReluOp : public OpLite {
bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "relu"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
auto input_dims = param_.X->dims();
auto output_dims = param_.Out->dims();
ch->input_shape = ch->DimToStr(input_dims);
ch->output_shape = ch->DimToStr(output_dims);
ch->remark = ActivationTypeToStr(param_.active_type);
switch (param_.active_type) {
case lite_api::ActivationType::kRelu:
ch->macs = param_.X->numel();
break;
case lite_api::ActivationType::kRelu6:
ch->macs = param_.X->numel() * 2.0;
break;
case lite_api::ActivationType::kLeakyRelu:
ch->macs = param_.X->numel() * 2.0;
break;
case lite_api::ActivationType::kPRelu:
ch->macs = param_.X->numel() * 2.0;
break;
case lite_api::ActivationType::kSwish:
ch->macs = param_.X->numel() * 4.0;
break;
case lite_api::ActivationType::kSigmoid:
ch->macs = param_.X->numel() * 3.0;
break;
case lite_api::ActivationType::kTanh:
ch->macs = param_.X->numel() * 5.0;
break;
case lite_api::ActivationType::kExp:
ch->macs = param_.X->numel();
break;
case lite_api::ActivationType::kAbs:
ch->macs = param_.X->numel();
break;
case lite_api::ActivationType::kHardSwish:
ch->macs = param_.X->numel() * 5.0;
break;
case lite_api::ActivationType::kReciprocal:
ch->macs = param_.X->numel();
break;
case lite_api::ActivationType::kIndentity:
break;
default:
LOG(FATAL) << "This Type of Activation:"
<< static_cast<int>(param_.active_type)
<< ActivationTypeToStr(param_.active_type)
<< " doesn't support";
}
}
#endif
private:
mutable ActivationParam param_;
};
......
......@@ -35,8 +35,19 @@ class ScaleOp : public OpLite {
bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "scale"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.x->dims());
ch->output_shape = ch->DimToStr(param_.output->dims());
ch->remark =
param_.activation_type + "alpha" + std::to_string(param_.alpha);
ch->macs = param_.x->numel() * 1.f;
}
#endif
private:
mutable ScaleParam param_;
};
......
......@@ -27,17 +27,48 @@ class SearchAlignedMatMulOpLite : public OpLite {
public:
SearchAlignedMatMulOpLite() {}
explicit SearchAlignedMatMulOpLite(const std::string &type) : OpLite(type) {}
explicit SearchAlignedMatMulOpLite(const std::string& type) : OpLite(type) {}
bool CheckShape() const override;
bool InferShapeImpl() const override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
void AttachKernel(KernelBase* kernel) override { kernel->SetParam(param_); }
bool AttachImpl(const cpp::OpDesc& op_desc, lite::Scope* scope) override;
bool AttachImpl(const cpp::OpDesc &op_desc, lite::Scope *scope) override;
std::string DebugString() const override { return "search_aligned_mat_mul"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter* ch) {
ch->input_shape = ch->DimToStr(param_.X->dims());
ch->filter_shape = ch->DimToStr(param_.Y->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
ch->remark = "alpha" + std::to_string(param_.alpha) + "trans_x" +
std::to_string(param_.transpose_X) + "trans_y" +
std::to_string(param_.transpose_Y);
const auto x_dims = param_.X->dims();
const auto y_dims = param_.Y->dims();
const auto& x_lod = param_.X->lod();
const auto& y_lod = param_.Y->lod();
const auto& x_lod_0 = x_lod[0];
const auto& y_lod_0 = y_lod[0];
int x_inner_size = x_dims[1];
int y_inner_size = y_dims[1];
int x_batch_size = x_lod_0[1];
int y_batch_size = y_lod_0[1];
int M = param_.transpose_X ? x_inner_size : x_batch_size;
int N = param_.transpose_Y ? y_batch_size : y_inner_size;
int X_K = param_.transpose_X ? x_batch_size : x_inner_size;
int Y_K = param_.transpose_Y ? y_inner_size : y_batch_size;
CHECK_EQ(X_K, Y_K) << "K of Input(X) and Input(Y) is not equal";
int K = X_K;
ch->macs = 2.0 * M * N * K;
}
#endif
private:
mutable MatMulParam param_;
};
......
......@@ -35,8 +35,21 @@ class SearchFcOpLite : public OpLite {
bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "search_fc"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.X->dims());
ch->filter_shape = ch->DimToStr(param_.W->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
ch->remark = "out_size" + std::to_string(param_.out_size);
auto x_dims = param_.X->dims();
auto w_dims = param_.W->dims();
ch->macs = 2.f * x_dims[0] * x_dims[1] * w_dims[0];
}
#endif
private:
mutable SearchFcParam param_;
};
......
......@@ -36,8 +36,21 @@ class SearchSeqFcOpLite : public OpLite {
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
bool AttachImpl(const cpp::OpDesc &op_desc, lite::Scope *scope) override;
std::string DebugString() const override { return "search_seq_fc"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.x->dims());
ch->filter_shape = ch->DimToStr(param_.w->dims());
ch->output_shape = ch->DimToStr(param_.out->dims());
ch->remark = "out_size" + std::to_string(param_.out_size);
auto x_dims = param_.x->dims();
auto w_dims = param_.w->dims();
ch->macs = 2.f * x_dims[0] * x_dims[1] * w_dims[0];
}
#endif
private:
mutable SearchSeqFcParam param_;
};
......
......@@ -36,8 +36,20 @@ class SearchSeqSoftmaxOp : public OpLite {
bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "search_seq_softmax_op"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
auto input_dims = param_.x->dims();
auto output_dims = param_.output->dims();
ch->input_shape = ch->DimToStr(input_dims);
ch->output_shape = ch->DimToStr(output_dims);
ch->remark = "axis" + std::to_string(param_.axis);
ch->macs = 4.f * param_.x->numel();
}
#endif
private:
mutable SoftmaxParam param_;
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册