提交 aff26fa1 编写于 作者: W weihaoji

[XPU] stylized funx name as camel case

上级 92dc2ec6
......@@ -26,7 +26,7 @@ namespace lite {
namespace kernels {
namespace xpu {
inline xdnn::Activation_t get_gru_act_type(const std::string& type) {
inline xdnn::Activation_t GetGruActType(const std::string& type) {
std::map<std::string, xdnn::Activation_t> act_type_map = {
{"sigmoid", xdnn::Activation_t::SIGMOID},
{"tanh", xdnn::Activation_t::TANH},
......@@ -36,6 +36,7 @@ inline xdnn::Activation_t get_gru_act_type(const std::string& type) {
return it->second;
} else {
LOG(FATAL) << "unsupported activation type: " << type;
return xdnn::Activation_t(xdnn::Activation_t::act_enum(0));
}
}
......@@ -93,10 +94,10 @@ void GruCompute::PrepareForRun() {
std::max(weight_c_max_cpu[2], weight_c_max_cpu[3]));
}
void GruCompute::prepare_layout(const paddle::lite::LoD& lods,
int* offset_xpu,
int* new_offset_xpu,
int* idx_sorted_by_width_data_xpu) {
void GruCompute::PrepareLayout(const paddle::lite::LoD& lods,
int* offset_xpu,
int* new_offset_xpu,
int* idx_sorted_by_width_data_xpu) {
const auto& lod = lods[0];
for (auto i = 0; i < lod.size(); i++) {
offset_cpu[i] = lod[i];
......@@ -162,8 +163,7 @@ void GruCompute::Run() {
// prepare seq_info
auto lods = input->lod();
const auto& lod = lods[0];
prepare_layout(
lods, offset_xpu, new_offset_xpu, idx_sorted_by_width_data_xpu);
PrepareLayout(lods, offset_xpu, new_offset_xpu, idx_sorted_by_width_data_xpu);
int max_width = seq_info[0].length;
// sequence to batch
......@@ -237,8 +237,8 @@ void GruCompute::Run() {
new_offset_cpu[batch_idx + 1] - new_offset_cpu[batch_idx],
frame_size,
origin_mode,
get_gru_act_type(param.gate_activation),
get_gru_act_type(param.activation),
GetGruActType(param.gate_activation),
GetGruActType(param.activation),
x,
xpu_h0,
param.weight->data<float>(),
......
......@@ -29,10 +29,10 @@ class GruCompute : public KernelLite<TARGET(kXPU), PRECISION(kFloat)> {
void PrepareForRun() override;
void prepare_layout(const paddle::lite::LoD& lods,
int* offset_xpu,
int* new_offset_xpu,
int* idx_sorted_by_width_data_xpu);
void PrepareLayout(const paddle::lite::LoD& lods,
int* offset_xpu,
int* new_offset_xpu,
int* idx_sorted_by_width_data_xpu);
void Run() override;
......
......@@ -47,6 +47,7 @@ bool SumOpLite::AttachImpl(const cpp::OpDesc &op_desc, lite::Scope *scope) {
param_.x.clear();
for (auto var : inputs) {
CHECK(scope->FindVar(var));
param_.x.push_back(scope->FindVar(var)->GetMutable<lite::Tensor>());
}
CHECK(scope->FindVar(out));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册