提交 92be0525 编写于 作者: Z zhaojiaying01

adjust code style

上级 d22e273b
...@@ -32,12 +32,7 @@ void ConvAddReluCompute(const FusionConvAddReluParam &param) { ...@@ -32,12 +32,7 @@ void ConvAddReluCompute(const FusionConvAddReluParam &param) {
Tensor bias = *param.Bias(); Tensor bias = *param.Bias();
int axis = param.Axis(); int axis = param.Axis();
Tensor *output = param.Output(); Tensor *output = param.Output();
// math::expand_bias(bias, axis, output->dims());
float *output_data = output->data<float>();
float *biase_data = bias.data<float>(); float *biase_data = bias.data<float>();
// for (int k = 0; k < output->numel(); ++k) {
// output_data[k] = biase_data[k];
// }
int groups = param.Groups(); int groups = param.Groups();
std::vector<int> strides = param.Strides(); std::vector<int> strides = param.Strides();
......
...@@ -30,7 +30,6 @@ inline void ConvBasic(const ConvParam &param) { ...@@ -30,7 +30,6 @@ inline void ConvBasic(const ConvParam &param) {
Tensor filter = *param.Filter(); Tensor filter = *param.Filter();
Tensor *output = param.Output(); Tensor *output = param.Output();
output->mutable_data<float>(); output->mutable_data<float>();
float *bias_data = output->mutable_data<float>();
int groups = param.Groups(); int groups = param.Groups();
std::vector<int> strides = param.Strides(); std::vector<int> strides = param.Strides();
std::vector<int> paddings = param.Paddings(); std::vector<int> paddings = param.Paddings();
...@@ -107,7 +106,7 @@ inline void ConvBasic(const ConvParam &param) { ...@@ -107,7 +106,7 @@ inline void ConvBasic(const ConvParam &param) {
Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step); Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
math::matmul<float>(filter_slice, false, col_matrix, false, math::matmul<float>(filter_slice, false, col_matrix, false,
static_cast<float>(1), &out_slice, static_cast<float>(1), &out_slice,
static_cast<float>(0), false, bias_data); static_cast<float>(0));
} }
} }
} }
......
...@@ -55,9 +55,9 @@ void FusionFcCompute(const FusionFcParam &param) { ...@@ -55,9 +55,9 @@ void FusionFcCompute(const FusionFcParam &param) {
memory::Copy(out_data + i * classes, input_z_data, sizeof(float) * classes); memory::Copy(out_data + i * classes, input_z_data, sizeof(float) * classes);
} }
// for (int i = 0; i < out->numel(); i++) { for (int i = 0; i < out->numel(); i++) {
// DLOG << out_data[i]; DLOG << out_data[i];
// } }
math::matmul<float>(x_matrix, false, y_matrix, false, static_cast<float>(1), math::matmul<float>(x_matrix, false, y_matrix, false, static_cast<float>(1),
out, static_cast<float>(1), false, bias_data); out, static_cast<float>(1), false, bias_data);
PADDLE_MOBILE_ENFORCE(out_dim.size() == 2, " out_dim.size must be 2."); PADDLE_MOBILE_ENFORCE(out_dim.size() == 2, " out_dim.size must be 2.");
......
...@@ -59,7 +59,6 @@ void MulCompute(const MulParam &param) { ...@@ -59,7 +59,6 @@ void MulCompute(const MulParam &param) {
const Tensor *input_y = param.InputY(); const Tensor *input_y = param.InputY();
Tensor *out = param.Out(); Tensor *out = param.Out();
out->mutable_data<float>(); out->mutable_data<float>();
float *bias_data = out->mutable_data<float>();
const Tensor x_matrix = const Tensor x_matrix =
input_x->dims().size() > 2 input_x->dims().size() > 2
? framework::ReshapeToMatrix(*input_x, param.XNumColDims()) ? framework::ReshapeToMatrix(*input_x, param.XNumColDims())
...@@ -73,7 +72,7 @@ void MulCompute(const MulParam &param) { ...@@ -73,7 +72,7 @@ void MulCompute(const MulParam &param) {
out->Resize({x_matrix.dims()[0], y_matrix.dims()[1]}); out->Resize({x_matrix.dims()[0], y_matrix.dims()[1]});
} }
math::matmul<float>(x_matrix, false, y_matrix, false, static_cast<float>(1), math::matmul<float>(x_matrix, false, y_matrix, false, static_cast<float>(1),
out, static_cast<float>(0), false, bias_data); out, static_cast<float>(0));
if (out_dim.size() != 2) { if (out_dim.size() != 2) {
out->Resize(out_dim); out->Resize(out_dim);
} }
......
...@@ -373,9 +373,9 @@ void InnerKernel(int mc, int nc, float alpha, const float *a, const float *b, ...@@ -373,9 +373,9 @@ void InnerKernel(int mc, int nc, float alpha, const float *a, const float *b,
#endif #endif
} }
} }
if (alpha != 1) { if (alpha != 1) {
WriteWithAlphaBeta(mc, nc, c, C, ldc); WriteWithAlphaBeta(mc, nc, c, C, ldc);
return; return;
} }
if (beta == 0) { if (beta == 0) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册