提交 727cc8dc 编写于 作者: Z zhangyang

change format

上级 17160531
......@@ -51,17 +51,16 @@ class FusionConvBNMatcher : public framework::FusionOpMatcher {
template <typename DeviceType, typename T>
class FusionConvBNOp : public framework::OperatorWithKernel<
DeviceType, FusionConvBNParam,
operators::ConvBNKernel<DeviceType, T>> {
DeviceType, FusionConvBNParam,
operators::ConvBNKernel<DeviceType, T>> {
public:
FusionConvBNOp(const string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<
DeviceType, FusionConvBNParam,
operators::ConvBNKernel<DeviceType, T>>(type, inputs, outputs,
attrs, scope) {}
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType, FusionConvBNParam,
operators::ConvBNKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
void InferShape() const override;
......
......@@ -32,8 +32,7 @@ using framework::DDim;
using framework::OpKernelBase;
template <typename DeviceType, typename T>
class ConvBNKernel
: public OpKernelBase<DeviceType, FusionConvBNParam> {
class ConvBNKernel : public OpKernelBase<DeviceType, FusionConvBNParam> {
public:
void Compute(const FusionConvBNParam &param) const;
bool Init(FusionConvBNParam *param);
......
......@@ -49,8 +49,7 @@ bool ConvBNKernel<FPGA, float>::Init(FusionConvBNParam *param) {
for (int i = 0; i < channel; i++) {
new_scale_ptr[i] = bn_scale_ptr[i] /
static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5));
new_bias_ptr[i] =
bn_bias_ptr[i] + (0 - bn_mean_ptr[i]) * new_scale_ptr[i];
new_bias_ptr[i] = bn_bias_ptr[i] + (0 - bn_mean_ptr[i]) * new_scale_ptr[i];
bs_ptr[i * 2] = new_scale_ptr[i];
bs_ptr[i * 2 + 1] = new_bias_ptr[i];
}
......@@ -84,8 +83,7 @@ bool ConvBNKernel<FPGA, float>::Init(FusionConvBNParam *param) {
}
template <>
void ConvBNKernel<FPGA, float>::Compute(
const FusionConvBNParam &param) const {
void ConvBNKernel<FPGA, float>::Compute(const FusionConvBNParam &param) const {
fpga::ComputeFpgaConv(param.FpgaArgs());
}
template class ConvBNKernel<FPGA, float>;
......
......@@ -46,8 +46,7 @@ bool ConvBNReluKernel<FPGA, float>::Init(FusionConvBNReluParam *param) {
for (int i = 0; i < channel; i++) {
new_scale_ptr[i] = bn_scale_ptr[i] /
static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5));
new_bias_ptr[i] =
bn_bias_ptr[i] + (0 - bn_mean_ptr[i]) * new_scale_ptr[i];
new_bias_ptr[i] = bn_bias_ptr[i] + (0 - bn_mean_ptr[i]) * new_scale_ptr[i];
bs_ptr[i * 2] = new_scale_ptr[i];
bs_ptr[i * 2 + 1] = new_bias_ptr[i];
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册