提交 664bb9a8 编写于 作者: Z zhangwen31

[op][arm] mod: remove power_op, rename all `power` to `pow` test=develop

上级 8205860e
......@@ -159,8 +159,7 @@
| not_equal | Y |   |   |   |   |   |   |   |   |   |
| one_hot | Y |   |   |   |   |   |   |   |   |   |
| pixel_shuffle | Y |   |   | Y | Y |   |   |   |   |   |
| pow |   |   |   |   |   |   |   |   |   |   |
| power |   |   |   | Y |   |   |   |   |   |   |
| pow |   |   |   | Y |   |   |   |   |   |   |
| print | Y |   |   |   |   |   |   |   |   |   |
| read_from_array | Y |   |   |   |   |   |   |   |   |   |
| reciprocal |   |   |   | Y |   |   |   |   |   |   |
......
......@@ -101,7 +101,7 @@ if (NOT HAS_ARM_MATH_LIB_DIR)
activation.cc
yolo_box.cc
dropout.cc
power.cc
pow.cc
interpolate.cc
argmax.cc
axpy.cc
......
......@@ -15,6 +15,7 @@
#pragma once
#include <arm_neon.h>
#include <algorithm>
#include <cmath>
......@@ -48,7 +49,7 @@
#include "lite/backends/arm/math/packed_sgemm_c4.h"
#include "lite/backends/arm/math/pad2d.h"
#include "lite/backends/arm/math/pooling.h"
#include "lite/backends/arm/math/power.h"
#include "lite/backends/arm/math/pow.h"
#include "lite/backends/arm/math/prior_box.h"
#include "lite/backends/arm/math/reduce_max.h"
#include "lite/backends/arm/math/reduce_mean.h"
......
......@@ -12,7 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/backends/arm/math/power.h"
#include "lite/backends/arm/math/pow.h"
#include "lite/backends/arm/math/funcs.h"
namespace paddle {
......@@ -26,13 +27,13 @@ void power<float>(const float* din,
const int num,
float scale_,
float shift_,
float power_) {
float factor_) {
int cnt = num >> 4;
int remain = num % 16;
bool _do_power = true;
bool _do_scale = true;
bool _do_shift = true;
if (fabsf(power_ - 1.f) < 1e-6f) {
if (fabsf(factor_ - 1.f) < 1e-6f) {
_do_power = false;
}
if (fabsf(scale_ - 1.f) < 1e-6f) {
......@@ -45,7 +46,7 @@ void power<float>(const float* din,
const float* ptr_in = din;
float32x4_t vscale = vdupq_n_f32(scale_);
float32x4_t vshift = vdupq_n_f32(shift_);
float32x4_t vpower = vdupq_n_f32(power_);
float32x4_t vpower = vdupq_n_f32(factor_);
#pragma omp parallel for
for (int nums = 0; nums < cnt; ++nums) {
float32x4_t vr0 = vld1q_f32(ptr_in);
......@@ -84,7 +85,7 @@ void power<float>(const float* din,
ptr_out += 4;
}
for (int j = 0; j < remain; ++j) {
ptr_out[0] = std::pow((ptr_in[0] * scale_ + shift_), power_);
ptr_out[0] = std::pow((ptr_in[0] * scale_ + shift_), factor_);
ptr_in++;
ptr_out++;
}
......
......@@ -20,12 +20,12 @@ namespace arm {
namespace math {
template <typename T>
void power(const T* din,
void pow(const T* din,
T* dout,
const int num,
float scale_,
float shift_,
float power_);
float factor_);
} /* namespace math */
} /* namespace arm */
......
......@@ -29,9 +29,7 @@ void PowCompute::Run() {
float shift = 0.0;
float power = param.factor;
// fixme: update lite::arm::math::power if necessary, for scale and shift is
// not used
lite::arm::math::power(
lite::arm::math::pow(
x_data, output_data, x_dims.production(), scale, shift, power);
}
......
......@@ -60,7 +60,6 @@ add_operator(sign_op extra SRCS sign_op.cc DEPS ${op_DEPS})
add_operator(negative_op extra SRCS negative_op.cc DEPS ${op_DEPS})
add_operator(crop_op extra SRCS crop_op.cc DEPS ${op_DEPS})
add_operator(assign_op extra SRCS assign_op.cc DEPS ${op_DEPS})
add_operator(power_op extra SRCS power_op.cc DEPS ${op_DEPS})
add_operator(group_norm_op extra SRCS group_norm_op.cc DEPS ${op_DEPS})
add_operator(norm_op extra SRCS norm_op.cc DEPS ${op_DEPS})
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/operators/power_op.h"
#include "lite/core/op_lite.h"
#include "lite/core/op_registry.h"
#include "lite/core/tensor.h"
namespace paddle {
namespace lite {
namespace operators {
bool PowerOp::CheckShape() const {
CHECK_OR_FALSE(param_.X);
CHECK_OR_FALSE(param_.Out);
return true;
}
bool PowerOp::InferShapeImpl() const {
param_.Out->Resize(param_.X->dims());
return true;
}
bool PowerOp::AttachImpl(const cpp::OpDesc &op_desc, lite::Scope *scope) {
auto X = op_desc.Input("X").front();
auto Out = op_desc.Output("Out").front();
param_.X = scope->FindVar(X)->GetMutable<lite::Tensor>();
param_.Out = scope->FindVar(Out)->GetMutable<lite::Tensor>();
param_.scale = op_desc.GetAttr<float>("scale");
param_.shift = op_desc.GetAttr<float>("shift");
param_.power = op_desc.GetAttr<float>("power");
CHECK(param_.X);
CHECK(param_.Out);
return true;
}
} /* namespace operators */
} /* namespace lite */
} /* namespace paddle */
REGISTER_LITE_OP(power, paddle::lite::operators::PowerOp);
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "lite/core/op_lite.h"
#include "lite/core/scope.h"
#include "lite/utils/all.h"
namespace paddle {
namespace lite {
namespace operators {
/**
* @deprecated There is NO power op in paddle fluid
*/
class PowerOp : public OpLite {
public:
PowerOp() {}
explicit PowerOp(const std::string &op_type) : OpLite(op_type) {}
bool CheckShape() const override;
bool InferShapeImpl() const override;
bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override;
void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }
std::string DebugString() const override { return "power"; }
#ifdef LITE_WITH_PROFILE
void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) {
ch->input_shape = ch->DimToStr(param_.X->dims());
ch->output_shape = ch->DimToStr(param_.Out->dims());
// ch->remark = "";
ch->macs = param_.Out->numel() * 3.0f;
}
#endif
private:
mutable PowerParam param_;
};
} /* namespace operators */
} /* namespace lite */
} /* namespace paddle */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册