提交 7b0cfd82 编写于 作者: L liuruilong

format files

上级 f8347e36
...@@ -108,7 +108,7 @@ class OpRegistry { ...@@ -108,7 +108,7 @@ class OpRegistry {
__op_registrar_##op_type##_##device_name(#op_type); \ __op_registrar_##op_type##_##device_name(#op_type); \
int TouchOpRegistrar_##op_type##_##device_name() { \ int TouchOpRegistrar_##op_type##_##device_name() { \
__op_registrar_##op_type##_##device_name.Touch(); \ __op_registrar_##op_type##_##device_name.Touch(); \
printf(" registering !! \n"); \ printf(" registering !! \n"); \
return 0; \ return 0; \
} }
......
...@@ -355,8 +355,9 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict( ...@@ -355,8 +355,9 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
const auto &pInfo = profile[i]; const auto &pInfo = profile[i];
uint64_t timeCost = pInfo.runEnd - pInfo.runBegin; uint64_t timeCost = pInfo.runEnd - pInfo.runBegin;
_tp[ops[i]->Type()] += timeCost; _tp[ops[i]->Type()] += timeCost;
// fprintf(pf, "%d\t%s\t%d\t%llu\t%llu\t%llu\n", i, ops[i]->Type().c_str(), // fprintf(pf, "%d\t%s\t%d\t%llu\t%llu\t%llu\n", i,
// pInfo.tid, pInfo.runBegin, pInfo.runEnd, timeCost); // ops[i]->Type().c_str(),
// pInfo.tid, pInfo.runBegin, pInfo.runEnd, timeCost);
} }
// fclose(pf); // fclose(pf);
......
...@@ -75,7 +75,7 @@ void PaddleMobile<Dtype, P>::Clear() { ...@@ -75,7 +75,7 @@ void PaddleMobile<Dtype, P>::Clear() {
} }
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
PaddleMobile<Dtype, P>::~PaddleMobile(){ PaddleMobile<Dtype, P>::~PaddleMobile() {
executor_ = nullptr; executor_ = nullptr;
loader_ = nullptr; loader_ = nullptr;
} }
......
...@@ -61,6 +61,7 @@ class PaddleMobile { ...@@ -61,6 +61,7 @@ class PaddleMobile {
void Clear(); void Clear();
~PaddleMobile(); ~PaddleMobile();
private: private:
std::shared_ptr<Loader<Dtype, P>> loader_; std::shared_ptr<Loader<Dtype, P>> loader_;
std::shared_ptr<Executor<Dtype, P>> executor_; std::shared_ptr<Executor<Dtype, P>> executor_;
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
......
...@@ -29,5 +29,3 @@ REGISTER_OPERATOR_MALI_GPU(fetch, ops::FetchOp); ...@@ -29,5 +29,3 @@ REGISTER_OPERATOR_MALI_GPU(fetch, ops::FetchOp);
#endif #endif
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
#endif #endif
...@@ -44,7 +44,6 @@ class FetchOp : public framework::OperatorBase<DeviceType> { ...@@ -44,7 +44,6 @@ class FetchOp : public framework::OperatorBase<DeviceType> {
FetchParam param_; FetchParam param_;
}; };
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
...@@ -48,8 +48,7 @@ void FusionConvAddOp<Dtype, T>::InferShape() const { ...@@ -48,8 +48,7 @@ void FusionConvAddOp<Dtype, T>::InferShape() const {
#ifdef PADDLE_MOBILE_CPU #ifdef PADDLE_MOBILE_CPU
#ifndef CONV_ADD_REGISTER #ifndef CONV_ADD_REGISTER
framework::FusionOpRegistrar convadd_registrar( framework::FusionOpRegistrar convadd_registrar(new FusionConvAddMatcher());
new FusionConvAddMatcher());
#define CONV_ADD_REGISTER #define CONV_ADD_REGISTER
#endif #endif
......
...@@ -59,4 +59,3 @@ USE_OP_CPU(im2sequence); ...@@ -59,4 +59,3 @@ USE_OP_CPU(im2sequence);
#endif #endif
#endif #endif
...@@ -231,37 +231,35 @@ void BatchnormCompute(const BatchNormParam &param) { ...@@ -231,37 +231,35 @@ void BatchnormCompute(const BatchNormParam &param) {
} }
#endif #endif
float *inv_std_ptr = new float[C]; float *inv_std_ptr = new float[C];
for (int i = 0; i < C; i++) { for (int i = 0; i < C; i++) {
inv_std_ptr[i] = inv_std_ptr[i] =
1 / static_cast<float>(pow((variance_ptr[i] + epsilon), 0.5)); 1 / static_cast<float>(pow((variance_ptr[i] + epsilon), 0.5));
} }
Tensor new_scale;
auto new_scale_ptr =
new_scale.mutable_data<float>(framework::make_ddim({C}));
Tensor new_bias;
auto new_bias_ptr = new_bias.mutable_data<float>(framework::make_ddim({C}));
/// ((x - est_mean) * (inv_var) * scale + bias equal to Tensor new_scale;
/// (x * inv_var * scale) + (bias - est_mean * inv_var * scale) auto new_scale_ptr = new_scale.mutable_data<float>(framework::make_ddim({C}));
for (int i = 0; i < C; i++) { Tensor new_bias;
new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[i]; auto new_bias_ptr = new_bias.mutable_data<float>(framework::make_ddim({C}));
new_bias_ptr[i] =
bias_ptr[i] - mean_ptr[i] * inv_std_ptr[i] * scale_ptr[i]; /// ((x - est_mean) * (inv_var) * scale + bias equal to
{ /// (x * inv_var * scale) + (bias - est_mean * inv_var * scale)
for (int n = 0; n < N; n++) { for (int i = 0; i < C; i++) {
for (int h = 0; h < H; h++) { new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[i];
int tmp_index = n * stride0 + i * stride1 + h * stride2; new_bias_ptr[i] = bias_ptr[i] - mean_ptr[i] * inv_std_ptr[i] * scale_ptr[i];
for (int w = 0; w < W; w++) { {
int index = tmp_index + w; for (int n = 0; n < N; n++) {
out_ptr[index] = for (int h = 0; h < H; h++) {
input_x_ptr[index] * new_scale_ptr[i] + new_bias_ptr[i]; int tmp_index = n * stride0 + i * stride1 + h * stride2;
} for (int w = 0; w < W; w++) {
int index = tmp_index + w;
out_ptr[index] =
input_x_ptr[index] * new_scale_ptr[i] + new_bias_ptr[i];
} }
} }
} }
} }
delete[] inv_std_ptr; }
delete[] inv_std_ptr;
} }
} // namespace operators } // namespace operators
......
...@@ -129,14 +129,13 @@ void PackMatrixB_(int k, int n, int n_tail, const float *B, int ldb, ...@@ -129,14 +129,13 @@ void PackMatrixB_(int k, int n, int n_tail, const float *B, int ldb,
} }
#else #else
for (i = 0; i < k; ++i) { for (i = 0; i < k; ++i) {
Bij = &B(i, j); Bij = &B(i, j);
*buffer++ = *Bij; *buffer++ = *Bij;
*buffer++ = *(Bij + 1); *buffer++ = *(Bij + 1);
*buffer++ = *(Bij + 2); *buffer++ = *(Bij + 2);
*buffer++ = *(Bij + 3); *buffer++ = *(Bij + 3);
} }
#endif #endif
} }
if (n_tail != 0) { if (n_tail != 0) {
for (i = 0; i < k; ++i) { for (i = 0; i < k; ++i) {
......
...@@ -102,7 +102,7 @@ void Pool2x2Avg(vector<int> strides, vector<int> paddings, const Tensor *input, ...@@ -102,7 +102,7 @@ void Pool2x2Avg(vector<int> strides, vector<int> paddings, const Tensor *input,
#if __ARM_NEON #if __ARM_NEON
#ifdef ARMV7 #ifdef ARMV7
const int batch_size = input->dims()[0]; const int batch_size = input->dims()[0];
const int input_height = input->dims()[2]; const int input_height = input->dims()[2];
...@@ -173,7 +173,7 @@ const int batch_size = input->dims()[0]; ...@@ -173,7 +173,7 @@ const int batch_size = input->dims()[0];
} }
#else #else
//TODO(): to imp other asm // TODO(): to imp other asm
#endif #endif
......
...@@ -57,7 +57,7 @@ class PoolFunctor<CPU, PoolProcess, T> { ...@@ -57,7 +57,7 @@ class PoolFunctor<CPU, PoolProcess, T> {
T *output_data = output->mutable_data<T>(); T *output_data = output->mutable_data<T>();
for (int i = 0; i < batch_size; i++) { for (int i = 0; i < batch_size; i++) {
#pragma omp parallel for // <TRICKY-CLANG-FORMAT-PRAGMA-FIX> #pragma omp parallel for
for (int c = 0; c < output_channels; ++c) { for (int c = 0; c < output_channels; ++c) {
for (int ph = 0; ph < output_height; ++ph) { for (int ph = 0; ph < output_height; ++ph) {
int hstart = ph * stride_height - padding_height; int hstart = ph * stride_height - padding_height;
......
...@@ -50,7 +50,6 @@ class TransposeOp : public framework::OperatorWithKernel< ...@@ -50,7 +50,6 @@ class TransposeOp : public framework::OperatorWithKernel<
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#ifdef PADDLE_MOBILE_CPU #ifdef PADDLE_MOBILE_CPU
USE_OP_CPU(transpose); USE_OP_CPU(transpose);
#endif #endif
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
int main(void) { int main(void) {
#ifdef PADDLE_MOBILE_USE_OPENMP #ifdef PADDLE_MOBILE_USE_OPENMP
#pragma omp parallel num_threads(2) // <TRICKY-CLANG-FORMAT-PRAGMA-FIX> #pragma omp parallel num_threads(2)
{ {
// int thread_id = omp_get_thread_num(); // int thread_id = omp_get_thread_num();
// int nthreads = omp_get_num_threads(); // int nthreads = omp_get_num_threads();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册