提交 358f8f36 编写于 作者: H Houjiang Chen 提交者: GitHub

Merge pull request #1384 from smilejames/develop

optimize norm_op
...@@ -41,7 +41,6 @@ void NormCompute(const NormParam<CPU> &param) { ...@@ -41,7 +41,6 @@ void NormCompute(const NormParam<CPU> &param) {
int axis = param.Axis(); int axis = param.Axis();
const framework::Tensor *input = param.InputX(); const framework::Tensor *input = param.InputX();
framework::Tensor square;
framework::Tensor *norm = param.OutputNorm(); framework::Tensor *norm = param.OutputNorm();
framework::Tensor *out = param.Out(); framework::Tensor *out = param.Out();
...@@ -52,58 +51,51 @@ void NormCompute(const NormParam<CPU> &param) { ...@@ -52,58 +51,51 @@ void NormCompute(const NormParam<CPU> &param) {
int pre, n, post; int pre, n, post;
GetDims(x_dims, axis, &pre, &n, &post); GetDims(x_dims, axis, &pre, &n, &post);
square.Resize(input->dims());
const float *input_ptr = input->data<float>(); const float *input_ptr = input->data<float>();
float *square_ptr = square.mutable_data<float>();
float *norm_ptr = norm->mutable_data<float>(); float *norm_ptr = norm->mutable_data<float>();
float *out_ptr = out->mutable_data<float>(); float *out_ptr = out->mutable_data<float>();
const float *in_tmp = input_ptr; for (int p = 0; p < pre; ++p) {
float *square_tmp = square_ptr; const float *in_tmp = input_ptr + p * n * post;
for (int i = 0; i < input->numel(); ++i) { float *norm_tmp = norm_ptr + p * post;
float element = *in_tmp;
*square_tmp = element * element;
square_tmp++;
in_tmp++;
}
// const float *norm_tmp = norm_ptr; // in_ch = 0; norm = epsilon + x * x
// for (int i = 0; i < norm->numel(); ++i) { for (int i = 0; i < post; ++i) {
// *norm_tmp = 0; *norm_tmp = epsilon;
// norm_tmp++; *norm_tmp += (*in_tmp) * (*in_tmp);
// } norm_tmp++;
in_tmp++;
square_tmp = square_ptr; }
float *norm_tmp = norm_ptr;
for (int i = 0; i < pre; ++i) { // in_ch >= 1; norm += x * x
for (int j = 0; j < post; ++j) { for (int c = 1; c < n; ++c) {
for (int k = 0; k < n; ++k) { norm_tmp = norm_ptr + p * post;
if (k == 0) { for (int i = 0; i < post; ++i) {
*norm_tmp = *square_tmp; *norm_tmp += (*in_tmp) * (*in_tmp);
} else { norm_tmp++;
*norm_tmp += *(square_tmp + k * post); in_tmp++;
}
} }
float sum = *norm_tmp + epsilon; }
*norm_tmp = sqrtf(sum);
// norm = sqart(norm)
norm_tmp = norm_ptr + p * post;
for (int i = 0; i < post; ++i) {
*norm_tmp = sqrtf(*norm_tmp);
norm_tmp++; norm_tmp++;
square_tmp++;
} }
}
in_tmp = input_ptr; // out = input / norm
norm_tmp = norm_ptr; in_tmp = input_ptr + p * n * post;
float *out_tmp = out_ptr; float *out_tmp = out_ptr + p * n * post;
for (int i = 0; i < pre; ++i) { for (int c = 0; c < n; ++c) {
for (int k = 0; k < n; ++k) { norm_tmp = norm_ptr + p * post;
for (int j = 0; j < post; ++j) { for (int j = 0; j < post; ++j) {
*out_tmp = *in_tmp / *norm_tmp; *out_tmp = *in_tmp / *norm_tmp;
in_tmp++; in_tmp++;
norm_tmp++; norm_tmp++;
out_tmp++; out_tmp++;
} }
norm_tmp = norm_ptr + i * post;
} }
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册