提交 7b0cfd82 编写于 作者: L liuruilong

format files

上级 f8347e36
......@@ -355,7 +355,8 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
const auto &pInfo = profile[i];
uint64_t timeCost = pInfo.runEnd - pInfo.runBegin;
_tp[ops[i]->Type()] += timeCost;
// fprintf(pf, "%d\t%s\t%d\t%llu\t%llu\t%llu\n", i, ops[i]->Type().c_str(),
// fprintf(pf, "%d\t%s\t%d\t%llu\t%llu\t%llu\n", i,
// ops[i]->Type().c_str(),
// pInfo.tid, pInfo.runBegin, pInfo.runEnd, timeCost);
}
// fclose(pf);
......
......@@ -75,7 +75,7 @@ void PaddleMobile<Dtype, P>::Clear() {
}
template <typename Dtype, Precision P>
PaddleMobile<Dtype, P>::~PaddleMobile(){
PaddleMobile<Dtype, P>::~PaddleMobile() {
executor_ = nullptr;
loader_ = nullptr;
}
......
......@@ -61,6 +61,7 @@ class PaddleMobile {
void Clear();
~PaddleMobile();
private:
std::shared_ptr<Loader<Dtype, P>> loader_;
std::shared_ptr<Executor<Dtype, P>> executor_;
......
......@@ -29,5 +29,3 @@ REGISTER_OPERATOR_MALI_GPU(fetch, ops::FetchOp);
#endif
#ifdef PADDLE_MOBILE_FPGA
#endif
......@@ -44,7 +44,6 @@ class FetchOp : public framework::OperatorBase<DeviceType> {
FetchParam param_;
};
} // namespace operators
} // namespace paddle_mobile
......
......@@ -48,8 +48,7 @@ void FusionConvAddOp<Dtype, T>::InferShape() const {
#ifdef PADDLE_MOBILE_CPU
#ifndef CONV_ADD_REGISTER
framework::FusionOpRegistrar convadd_registrar(
new FusionConvAddMatcher());
framework::FusionOpRegistrar convadd_registrar(new FusionConvAddMatcher());
#define CONV_ADD_REGISTER
#endif
......
......@@ -59,4 +59,3 @@ USE_OP_CPU(im2sequence);
#endif
#endif
......@@ -237,8 +237,7 @@ void BatchnormCompute(const BatchNormParam &param) {
}
Tensor new_scale;
auto new_scale_ptr =
new_scale.mutable_data<float>(framework::make_ddim({C}));
auto new_scale_ptr = new_scale.mutable_data<float>(framework::make_ddim({C}));
Tensor new_bias;
auto new_bias_ptr = new_bias.mutable_data<float>(framework::make_ddim({C}));
......@@ -246,8 +245,7 @@ void BatchnormCompute(const BatchNormParam &param) {
/// (x * inv_var * scale) + (bias - est_mean * inv_var * scale)
for (int i = 0; i < C; i++) {
new_scale_ptr[i] = inv_std_ptr[i] * scale_ptr[i];
new_bias_ptr[i] =
bias_ptr[i] - mean_ptr[i] * inv_std_ptr[i] * scale_ptr[i];
new_bias_ptr[i] = bias_ptr[i] - mean_ptr[i] * inv_std_ptr[i] * scale_ptr[i];
{
for (int n = 0; n < N; n++) {
for (int h = 0; h < H; h++) {
......
......@@ -136,7 +136,6 @@ void PackMatrixB_(int k, int n, int n_tail, const float *B, int ldb,
*buffer++ = *(Bij + 3);
}
#endif
}
if (n_tail != 0) {
for (i = 0; i < k; ++i) {
......
......@@ -102,7 +102,7 @@ void Pool2x2Avg(vector<int> strides, vector<int> paddings, const Tensor *input,
#if __ARM_NEON
#ifdef ARMV7
const int batch_size = input->dims()[0];
const int batch_size = input->dims()[0];
const int input_height = input->dims()[2];
......@@ -173,7 +173,7 @@ const int batch_size = input->dims()[0];
}
#else
//TODO(): to imp other asm
// TODO(): to imp other asm
#endif
......
......@@ -57,7 +57,7 @@ class PoolFunctor<CPU, PoolProcess, T> {
T *output_data = output->mutable_data<T>();
for (int i = 0; i < batch_size; i++) {
#pragma omp parallel for
// <TRICKY-CLANG-FORMAT-PRAGMA-FIX> #pragma omp parallel for
for (int c = 0; c < output_channels; ++c) {
for (int ph = 0; ph < output_height; ++ph) {
int hstart = ph * stride_height - padding_height;
......
......@@ -50,7 +50,6 @@ class TransposeOp : public framework::OperatorWithKernel<
} // namespace operators
} // namespace paddle_mobile
#ifdef PADDLE_MOBILE_CPU
USE_OP_CPU(transpose);
#endif
......
......@@ -17,7 +17,7 @@ limitations under the License. */
int main(void) {
#ifdef PADDLE_MOBILE_USE_OPENMP
#pragma omp parallel num_threads(2)
// <TRICKY-CLANG-FORMAT-PRAGMA-FIX> #pragma omp parallel num_threads(2)
{
// int thread_id = omp_get_thread_num();
// int nthreads = omp_get_num_threads();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册