diff --git a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc index 64d585c222b0ca013044be6440b882de08544b43..c436dd414d01ab61d143427fe7ecd34a82f11f8d 100644 --- a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc @@ -213,10 +213,10 @@ void PrepareLSTMWeight(const LoDTensor& W_forget_w0, float* out_data = out->mutable_data(platform::CPUPlace()); std::array tensors{ W_forget_w0.data(), W_input_w0.data(), - W_output_w0.data(), W_cell_w0.data()}; + W_output_w0.data(), W_cell_w0.data()}; std::array tensors1{ W_forget_w1.data(), W_input_w1.data(), - W_output_w1.data(), W_cell_w1.data()}; + W_output_w1.data(), W_cell_w1.data()}; for (int row = 0; row < D; row++) { for (int col = 0; col < 4; col++) { @@ -240,7 +240,7 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input, LoDTensor* out) { std::array tensors{ B_forget.data(), B_input.data(), B_output.data(), - B_cell.data()}; + B_cell.data()}; PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1); int D = B_forget.dims()[0]; diff --git a/paddle/fluid/framework/ir/node.cc b/paddle/fluid/framework/ir/node.cc index f34ce62b1e79b82285d130d4320d5c11113fac1a..50d9113088903aa7681d6c6af5cc65f846d32787 100644 --- a/paddle/fluid/framework/ir/node.cc +++ b/paddle/fluid/framework/ir/node.cc @@ -19,9 +19,9 @@ namespace framework { namespace ir { // msvc15 don't support constexpr in correct way. #if !defined(_WIN32) - constexpr char Node::kControlDepVarName[]; +constexpr char Node::kControlDepVarName[]; #else - const char Node::kControlDepVarName[] = "__control_var"; +const char Node::kControlDepVarName[] = "__control_var"; #endif std::unique_ptr CreateNodeForTest(const std::string& name, diff --git a/paddle/fluid/framework/ir/node.h b/paddle/fluid/framework/ir/node.h index 21dd43bc1db95110548017eccb40facc6c025c16..d2a393b3f19e9aab79098757dae663d030b0fa2b 100644 --- a/paddle/fluid/framework/ir/node.h +++ b/paddle/fluid/framework/ir/node.h @@ -56,9 +56,9 @@ class Node { enum class Type { kOperation, kVariable }; #if !defined(_WIN32) // msvc not support constexpr correctly. - static constexpr char kControlDepVarName[] = "__control_var"; + static constexpr char kControlDepVarName[] = "__control_var"; #else - static const char kControlDepVarName[]; + static const char kControlDepVarName[]; #endif Type NodeType() const { return type_; } diff --git a/paddle/fluid/framework/ir/pass.h b/paddle/fluid/framework/ir/pass.h index e8dd48a535126e7f6b880239efd8f15ecd0dea12..615b539695de8c3f9a256d17d4d49e61902da394 100644 --- a/paddle/fluid/framework/ir/pass.h +++ b/paddle/fluid/framework/ir/pass.h @@ -197,26 +197,26 @@ struct PassRegistrar : public Registrar { msg) // Register a new pass that can be applied on the IR. -#define REGISTER_PASS(pass_type, pass_class) \ - STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ - __reg_pass__##pass_type, \ - "REGISTER_PASS must be called in global namespace"); \ - static ::paddle::framework::ir::PassRegistrar \ - __pass_registrar_##pass_type##__(#pass_type); \ - int TouchPassRegistrar_##pass_type() { \ - __pass_registrar_##pass_type##__.Touch(); \ - return 0; \ - } \ - static ::paddle::framework::ir::PassRegistrar \ - &__pass_tmp_registrar_##pass_type##__ UNUSED = \ +#define REGISTER_PASS(pass_type, pass_class) \ + STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ + __reg_pass__##pass_type, \ + "REGISTER_PASS must be called in global namespace"); \ + static ::paddle::framework::ir::PassRegistrar \ + __pass_registrar_##pass_type##__(#pass_type); \ + int TouchPassRegistrar_##pass_type() { \ + __pass_registrar_##pass_type##__.Touch(); \ + return 0; \ + } \ + static ::paddle::framework::ir::PassRegistrar \ + &__pass_tmp_registrar_##pass_type##__ UNUSED = \ __pass_registrar_##pass_type##__ -#define USE_PASS(pass_type) \ - STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ - __use_pass_itself_##pass_type, \ - "USE_PASS must be called in global namespace"); \ - extern int TouchPassRegistrar_##pass_type(); \ - static int use_pass_itself_##pass_type##_ UNUSED = \ +#define USE_PASS(pass_type) \ + STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ + __use_pass_itself_##pass_type, \ + "USE_PASS must be called in global namespace"); \ + extern int TouchPassRegistrar_##pass_type(); \ + static int use_pass_itself_##pass_type##_ UNUSED = \ TouchPassRegistrar_##pass_type() } // namespace ir diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 36fe5724ea04d271fb7921d9f72f1ad6ab95e081..6bd744edc22e6a90ce64e9d699e7f3c5c60d4908 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -150,9 +150,9 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) { #endif } - // The profile has a process-wide mutex, results in serious performance issue - // in concurrency scenerio. Here use an `if` to fix this issue. - // Please not remove the `if`, ask @Superjomn if there are any concern. +// The profile has a process-wide mutex, results in serious performance issue +// in concurrency scenerio. Here use an `if` to fix this issue. +// Please not remove the `if`, ask @Superjomn if there are any concern. #ifndef _WIN32 if (platform::IsProfileEnabled()) { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/helper.h index ba72fba8be8dcd1b122f21630123d3e7dee92faf..6f9d663121004470d57c17b8154d725fdf2b9689 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/helper.h @@ -20,9 +20,9 @@ #else #endif -#include #include #include // NOLINT +#include #include #include #include diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h index d7444bcfe0b876b17a52a38628cbc526af50cb31..7bb6934e1496cc989eee8ba82f56959522803bfb 100644 --- a/paddle/fluid/operators/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise_op_function.h @@ -112,7 +112,7 @@ class RowwiseTransformIterator } RowwiseTransformIterator &operator+(int n) { - while(n-- > 0) { + while (n-- > 0) { ++i_; if (UNLIKELY(i_ == n_)) { i_ = 0; @@ -161,7 +161,7 @@ class MidWiseTransformIterator } MidWiseTransformIterator &operator+(int n) { - while(n-- > 0) { + while (n-- > 0) { ++j_; if (UNLIKELY(j_ == post_)) { ++i_; diff --git a/paddle/fluid/operators/grid_sampler_op.h b/paddle/fluid/operators/grid_sampler_op.h index 00fba457bbacde0ec9d02d2a5688e3976088c829..08a6043eb07a6e44d46428ee195f6cb28c2ee77c 100644 --- a/paddle/fluid/operators/grid_sampler_op.h +++ b/paddle/fluid/operators/grid_sampler_op.h @@ -67,10 +67,10 @@ static void CalcGridLocations(const platform::CPUDeviceContext& ctx, Tensor half_ymax; half_xmax.mutable_data({n, h, w}, ctx.GetPlace()); auto half_xmax_t = - EigenTensor::From(half_xmax).setConstant(0.5 * x_max); + EigenTensor::From(half_xmax).setConstant(0.5 * x_max); half_ymax.mutable_data({n, h, w}, ctx.GetPlace()); auto half_ymax_t = - EigenTensor::From(half_ymax).setConstant(0.5 * y_max); + EigenTensor::From(half_ymax).setConstant(0.5 * y_max); // scale grid to [0, h-1/w-1] auto grid_x_t = EigenTensor::From(grid_x); diff --git a/paddle/fluid/platform/init.cc b/paddle/fluid/platform/init.cc index 6156067645522d31a93856fdb2b046a1ca45a480..84d1b852cbe5a334ddfc27d404e879b178e341fe 100644 --- a/paddle/fluid/platform/init.cc +++ b/paddle/fluid/platform/init.cc @@ -115,9 +115,9 @@ void InitDevices(bool init_p2p, const std::vector devices) { // windows has no support for openblas multi-thread #ifdef _WIN32 - if (FLAGS_paddle_num_threads > 1) { - FLAGS_paddle_num_threads = 1; - } + if (FLAGS_paddle_num_threads > 1) { + FLAGS_paddle_num_threads = 1; + } #endif #ifndef PADDLE_WITH_MKLDNN diff --git a/paddle/fluid/platform/port.h b/paddle/fluid/platform/port.h index d3a6e2854926f175e17b6f367d3ccbb4e7bfb04f..8823e97b0b696556b32724acd096e8fc79a49f53 100644 --- a/paddle/fluid/platform/port.h +++ b/paddle/fluid/platform/port.h @@ -24,38 +24,38 @@ #include "glog/logging.h" #if !defined(_WIN32) - #include // dladdr - #include // backtrace - #include - #include // std::accumulate +#include // dladdr +#include // backtrace +#include +#include // std::accumulate #else - #include - #include // _popen, _pclose - #include - #include // std::accumulate in msvc - #ifndef S_ISDIR // windows port for sys/stat.h - #define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR) - #endif // S_ISDIR - - static void *dlsym(void *handle, const char *symbol_name) { - FARPROC found_symbol; - found_symbol = GetProcAddress((HMODULE)handle, symbol_name); - - if (found_symbol == NULL) { - throw std::runtime_error(std::string(symbol_name) + " not found."); - } - return reinterpret_cast(found_symbol); +#include // _popen, _pclose +#include +#include +#include // std::accumulate in msvc +#ifndef S_ISDIR // windows port for sys/stat.h +#define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR) +#endif // S_ISDIR + +static void *dlsym(void *handle, const char *symbol_name) { + FARPROC found_symbol; + found_symbol = GetProcAddress((HMODULE)handle, symbol_name); + + if (found_symbol == NULL) { + throw std::runtime_error(std::string(symbol_name) + " not found."); } + return reinterpret_cast(found_symbol); +} - static void *dlopen(const char *filename, int flag) { - std::string file_name(filename); - file_name.replace(0, file_name.size() - 1, '/', '\\'); - HMODULE hModule = LoadLibrary(file_name.c_str()); - if (!hModule) { - throw std::runtime_error(file_name + " not found."); - } - return reinterpret_cast(hModule); +static void *dlopen(const char *filename, int flag) { + std::string file_name(filename); + file_name.replace(0, file_name.size() - 1, '/', '\\'); + HMODULE hModule = LoadLibrary(file_name.c_str()); + if (!hModule) { + throw std::runtime_error(file_name + " not found."); } + return reinterpret_cast(hModule); +} #endif // !_WIN32 diff --git a/paddle/fluid/platform/variant.h b/paddle/fluid/platform/variant.h index 1b10db8669f3c47169ee267090b54640b8b3b7dc..42bff087d2bda90889a106bc5f4fb32bccaa8a9b 100644 --- a/paddle/fluid/platform/variant.h +++ b/paddle/fluid/platform/variant.h @@ -46,7 +46,7 @@ limitations under the License. */ // some platform-independent defintion #if defined(_WIN32) #define UNUSED -#define __builtin_expect(EXP, C) (EXP) +#define __builtin_expect(EXP, C) (EXP) #else #define UNUSED __attribute__((unused)) #endif diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 6cba3395bf725532aa9b2946fe243e354176999f..592c40cf1ce07e28f3fe4a1ca0b11d249600e5f1 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -352,7 +352,7 @@ All parameter, weight, gradient are variables in Paddle. [](Variable &self) { return self.GetMutable(); }, py::return_value_policy::reference) #if (defined(PADDLE_WITH_CUDA) && !defined(_WIN32)) - .def("get_communicator", + .def("get_communicator", [](Variable &self) -> platform::Communicator * { return self.GetMutable(); }, @@ -364,7 +364,7 @@ All parameter, weight, gradient are variables in Paddle. }, py::return_value_policy::reference) #endif -; + ; #if !defined(_WIN32) py::class_(m, "Reader", "")