From 1a9008c420cf95e38d49959c313705ebf3d3ff8c Mon Sep 17 00:00:00 2001 From: peizhilin Date: Wed, 14 Nov 2018 17:09:21 +0800 Subject: [PATCH] code style fix test=develop --- .../framework/ir/attention_lstm_fuse_pass.cc | 6 +- paddle/fluid/framework/ir/node.cc | 4 +- paddle/fluid/framework/ir/node.h | 4 +- paddle/fluid/framework/ir/pass.h | 36 ++++++------ paddle/fluid/framework/operator.cc | 6 +- paddle/fluid/inference/api/helper.h | 2 +- .../fluid/operators/elementwise_op_function.h | 4 +- paddle/fluid/operators/grid_sampler_op.h | 4 +- paddle/fluid/platform/init.cc | 6 +- paddle/fluid/platform/port.h | 56 +++++++++---------- paddle/fluid/platform/variant.h | 2 +- paddle/fluid/pybind/pybind.cc | 4 +- 12 files changed, 67 insertions(+), 67 deletions(-) diff --git a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc index 64d585c222b..c436dd414d0 100644 --- a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc @@ -213,10 +213,10 @@ void PrepareLSTMWeight(const LoDTensor& W_forget_w0, float* out_data = out->mutable_data(platform::CPUPlace()); std::array tensors{ W_forget_w0.data(), W_input_w0.data(), - W_output_w0.data(), W_cell_w0.data()}; + W_output_w0.data(), W_cell_w0.data()}; std::array tensors1{ W_forget_w1.data(), W_input_w1.data(), - W_output_w1.data(), W_cell_w1.data()}; + W_output_w1.data(), W_cell_w1.data()}; for (int row = 0; row < D; row++) { for (int col = 0; col < 4; col++) { @@ -240,7 +240,7 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input, LoDTensor* out) { std::array tensors{ B_forget.data(), B_input.data(), B_output.data(), - B_cell.data()}; + B_cell.data()}; PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1); int D = B_forget.dims()[0]; diff --git a/paddle/fluid/framework/ir/node.cc b/paddle/fluid/framework/ir/node.cc index f34ce62b1e7..50d91130889 100644 --- a/paddle/fluid/framework/ir/node.cc +++ b/paddle/fluid/framework/ir/node.cc @@ -19,9 +19,9 @@ namespace framework { namespace ir { // msvc15 don't support constexpr in correct way. #if !defined(_WIN32) - constexpr char Node::kControlDepVarName[]; +constexpr char Node::kControlDepVarName[]; #else - const char Node::kControlDepVarName[] = "__control_var"; +const char Node::kControlDepVarName[] = "__control_var"; #endif std::unique_ptr CreateNodeForTest(const std::string& name, diff --git a/paddle/fluid/framework/ir/node.h b/paddle/fluid/framework/ir/node.h index 21dd43bc1db..d2a393b3f19 100644 --- a/paddle/fluid/framework/ir/node.h +++ b/paddle/fluid/framework/ir/node.h @@ -56,9 +56,9 @@ class Node { enum class Type { kOperation, kVariable }; #if !defined(_WIN32) // msvc not support constexpr correctly. - static constexpr char kControlDepVarName[] = "__control_var"; + static constexpr char kControlDepVarName[] = "__control_var"; #else - static const char kControlDepVarName[]; + static const char kControlDepVarName[]; #endif Type NodeType() const { return type_; } diff --git a/paddle/fluid/framework/ir/pass.h b/paddle/fluid/framework/ir/pass.h index e8dd48a5351..615b539695d 100644 --- a/paddle/fluid/framework/ir/pass.h +++ b/paddle/fluid/framework/ir/pass.h @@ -197,26 +197,26 @@ struct PassRegistrar : public Registrar { msg) // Register a new pass that can be applied on the IR. -#define REGISTER_PASS(pass_type, pass_class) \ - STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ - __reg_pass__##pass_type, \ - "REGISTER_PASS must be called in global namespace"); \ - static ::paddle::framework::ir::PassRegistrar \ - __pass_registrar_##pass_type##__(#pass_type); \ - int TouchPassRegistrar_##pass_type() { \ - __pass_registrar_##pass_type##__.Touch(); \ - return 0; \ - } \ - static ::paddle::framework::ir::PassRegistrar \ - &__pass_tmp_registrar_##pass_type##__ UNUSED = \ +#define REGISTER_PASS(pass_type, pass_class) \ + STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ + __reg_pass__##pass_type, \ + "REGISTER_PASS must be called in global namespace"); \ + static ::paddle::framework::ir::PassRegistrar \ + __pass_registrar_##pass_type##__(#pass_type); \ + int TouchPassRegistrar_##pass_type() { \ + __pass_registrar_##pass_type##__.Touch(); \ + return 0; \ + } \ + static ::paddle::framework::ir::PassRegistrar \ + &__pass_tmp_registrar_##pass_type##__ UNUSED = \ __pass_registrar_##pass_type##__ -#define USE_PASS(pass_type) \ - STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ - __use_pass_itself_##pass_type, \ - "USE_PASS must be called in global namespace"); \ - extern int TouchPassRegistrar_##pass_type(); \ - static int use_pass_itself_##pass_type##_ UNUSED = \ +#define USE_PASS(pass_type) \ + STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ + __use_pass_itself_##pass_type, \ + "USE_PASS must be called in global namespace"); \ + extern int TouchPassRegistrar_##pass_type(); \ + static int use_pass_itself_##pass_type##_ UNUSED = \ TouchPassRegistrar_##pass_type() } // namespace ir diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 36fe5724ea0..6bd744edc22 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -150,9 +150,9 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) { #endif } - // The profile has a process-wide mutex, results in serious performance issue - // in concurrency scenerio. Here use an `if` to fix this issue. - // Please not remove the `if`, ask @Superjomn if there are any concern. +// The profile has a process-wide mutex, results in serious performance issue +// in concurrency scenerio. Here use an `if` to fix this issue. +// Please not remove the `if`, ask @Superjomn if there are any concern. #ifndef _WIN32 if (platform::IsProfileEnabled()) { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/helper.h index ba72fba8be8..6f9d6631210 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/helper.h @@ -20,9 +20,9 @@ #else #endif -#include #include #include // NOLINT +#include #include #include #include diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h index d7444bcfe0b..7bb6934e149 100644 --- a/paddle/fluid/operators/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise_op_function.h @@ -112,7 +112,7 @@ class RowwiseTransformIterator } RowwiseTransformIterator &operator+(int n) { - while(n-- > 0) { + while (n-- > 0) { ++i_; if (UNLIKELY(i_ == n_)) { i_ = 0; @@ -161,7 +161,7 @@ class MidWiseTransformIterator } MidWiseTransformIterator &operator+(int n) { - while(n-- > 0) { + while (n-- > 0) { ++j_; if (UNLIKELY(j_ == post_)) { ++i_; diff --git a/paddle/fluid/operators/grid_sampler_op.h b/paddle/fluid/operators/grid_sampler_op.h index 00fba457bba..08a6043eb07 100644 --- a/paddle/fluid/operators/grid_sampler_op.h +++ b/paddle/fluid/operators/grid_sampler_op.h @@ -67,10 +67,10 @@ static void CalcGridLocations(const platform::CPUDeviceContext& ctx, Tensor half_ymax; half_xmax.mutable_data({n, h, w}, ctx.GetPlace()); auto half_xmax_t = - EigenTensor::From(half_xmax).setConstant(0.5 * x_max); + EigenTensor::From(half_xmax).setConstant(0.5 * x_max); half_ymax.mutable_data({n, h, w}, ctx.GetPlace()); auto half_ymax_t = - EigenTensor::From(half_ymax).setConstant(0.5 * y_max); + EigenTensor::From(half_ymax).setConstant(0.5 * y_max); // scale grid to [0, h-1/w-1] auto grid_x_t = EigenTensor::From(grid_x); diff --git a/paddle/fluid/platform/init.cc b/paddle/fluid/platform/init.cc index 61560676455..84d1b852cbe 100644 --- a/paddle/fluid/platform/init.cc +++ b/paddle/fluid/platform/init.cc @@ -115,9 +115,9 @@ void InitDevices(bool init_p2p, const std::vector devices) { // windows has no support for openblas multi-thread #ifdef _WIN32 - if (FLAGS_paddle_num_threads > 1) { - FLAGS_paddle_num_threads = 1; - } + if (FLAGS_paddle_num_threads > 1) { + FLAGS_paddle_num_threads = 1; + } #endif #ifndef PADDLE_WITH_MKLDNN diff --git a/paddle/fluid/platform/port.h b/paddle/fluid/platform/port.h index d3a6e285492..8823e97b0b6 100644 --- a/paddle/fluid/platform/port.h +++ b/paddle/fluid/platform/port.h @@ -24,38 +24,38 @@ #include "glog/logging.h" #if !defined(_WIN32) - #include // dladdr - #include // backtrace - #include - #include // std::accumulate +#include // dladdr +#include // backtrace +#include +#include // std::accumulate #else - #include - #include // _popen, _pclose - #include - #include // std::accumulate in msvc - #ifndef S_ISDIR // windows port for sys/stat.h - #define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR) - #endif // S_ISDIR - - static void *dlsym(void *handle, const char *symbol_name) { - FARPROC found_symbol; - found_symbol = GetProcAddress((HMODULE)handle, symbol_name); - - if (found_symbol == NULL) { - throw std::runtime_error(std::string(symbol_name) + " not found."); - } - return reinterpret_cast(found_symbol); +#include // _popen, _pclose +#include +#include +#include // std::accumulate in msvc +#ifndef S_ISDIR // windows port for sys/stat.h +#define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR) +#endif // S_ISDIR + +static void *dlsym(void *handle, const char *symbol_name) { + FARPROC found_symbol; + found_symbol = GetProcAddress((HMODULE)handle, symbol_name); + + if (found_symbol == NULL) { + throw std::runtime_error(std::string(symbol_name) + " not found."); } + return reinterpret_cast(found_symbol); +} - static void *dlopen(const char *filename, int flag) { - std::string file_name(filename); - file_name.replace(0, file_name.size() - 1, '/', '\\'); - HMODULE hModule = LoadLibrary(file_name.c_str()); - if (!hModule) { - throw std::runtime_error(file_name + " not found."); - } - return reinterpret_cast(hModule); +static void *dlopen(const char *filename, int flag) { + std::string file_name(filename); + file_name.replace(0, file_name.size() - 1, '/', '\\'); + HMODULE hModule = LoadLibrary(file_name.c_str()); + if (!hModule) { + throw std::runtime_error(file_name + " not found."); } + return reinterpret_cast(hModule); +} #endif // !_WIN32 diff --git a/paddle/fluid/platform/variant.h b/paddle/fluid/platform/variant.h index 1b10db8669f..42bff087d2b 100644 --- a/paddle/fluid/platform/variant.h +++ b/paddle/fluid/platform/variant.h @@ -46,7 +46,7 @@ limitations under the License. */ // some platform-independent defintion #if defined(_WIN32) #define UNUSED -#define __builtin_expect(EXP, C) (EXP) +#define __builtin_expect(EXP, C) (EXP) #else #define UNUSED __attribute__((unused)) #endif diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 6cba3395bf7..592c40cf1ce 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -352,7 +352,7 @@ All parameter, weight, gradient are variables in Paddle. [](Variable &self) { return self.GetMutable(); }, py::return_value_policy::reference) #if (defined(PADDLE_WITH_CUDA) && !defined(_WIN32)) - .def("get_communicator", + .def("get_communicator", [](Variable &self) -> platform::Communicator * { return self.GetMutable(); }, @@ -364,7 +364,7 @@ All parameter, weight, gradient are variables in Paddle. }, py::return_value_policy::reference) #endif -; + ; #if !defined(_WIN32) py::class_(m, "Reader", "") -- GitLab