提交 1a9008c4 编写于 作者: P peizhilin

code style fix

test=develop
上级 b2a770cf
...@@ -213,10 +213,10 @@ void PrepareLSTMWeight(const LoDTensor& W_forget_w0, ...@@ -213,10 +213,10 @@ void PrepareLSTMWeight(const LoDTensor& W_forget_w0,
float* out_data = out->mutable_data<float>(platform::CPUPlace()); float* out_data = out->mutable_data<float>(platform::CPUPlace());
std::array<const float*, 4> tensors{ std::array<const float*, 4> tensors{
W_forget_w0.data<float>(), W_input_w0.data<float>(), W_forget_w0.data<float>(), W_input_w0.data<float>(),
W_output_w0.data<float>(), W_cell_w0.data<float>()}; W_output_w0.data<float>(), W_cell_w0.data<float>()};
std::array<const float*, 4> tensors1{ std::array<const float*, 4> tensors1{
W_forget_w1.data<float>(), W_input_w1.data<float>(), W_forget_w1.data<float>(), W_input_w1.data<float>(),
W_output_w1.data<float>(), W_cell_w1.data<float>()}; W_output_w1.data<float>(), W_cell_w1.data<float>()};
for (int row = 0; row < D; row++) { for (int row = 0; row < D; row++) {
for (int col = 0; col < 4; col++) { for (int col = 0; col < 4; col++) {
...@@ -240,7 +240,7 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input, ...@@ -240,7 +240,7 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input,
LoDTensor* out) { LoDTensor* out) {
std::array<const float*, 4> tensors{ std::array<const float*, 4> tensors{
B_forget.data<float>(), B_input.data<float>(), B_output.data<float>(), B_forget.data<float>(), B_input.data<float>(), B_output.data<float>(),
B_cell.data<float>()}; B_cell.data<float>()};
PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1); PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1);
int D = B_forget.dims()[0]; int D = B_forget.dims()[0];
......
...@@ -19,9 +19,9 @@ namespace framework { ...@@ -19,9 +19,9 @@ namespace framework {
namespace ir { namespace ir {
// msvc15 don't support constexpr in correct way. // msvc15 don't support constexpr in correct way.
#if !defined(_WIN32) #if !defined(_WIN32)
constexpr char Node::kControlDepVarName[]; constexpr char Node::kControlDepVarName[];
#else #else
const char Node::kControlDepVarName[] = "__control_var"; const char Node::kControlDepVarName[] = "__control_var";
#endif #endif
std::unique_ptr<Node> CreateNodeForTest(const std::string& name, std::unique_ptr<Node> CreateNodeForTest(const std::string& name,
......
...@@ -56,9 +56,9 @@ class Node { ...@@ -56,9 +56,9 @@ class Node {
enum class Type { kOperation, kVariable }; enum class Type { kOperation, kVariable };
#if !defined(_WIN32) // msvc not support constexpr correctly. #if !defined(_WIN32) // msvc not support constexpr correctly.
static constexpr char kControlDepVarName[] = "__control_var"; static constexpr char kControlDepVarName[] = "__control_var";
#else #else
static const char kControlDepVarName[]; static const char kControlDepVarName[];
#endif #endif
Type NodeType() const { return type_; } Type NodeType() const { return type_; }
......
...@@ -197,26 +197,26 @@ struct PassRegistrar : public Registrar { ...@@ -197,26 +197,26 @@ struct PassRegistrar : public Registrar {
msg) msg)
// Register a new pass that can be applied on the IR. // Register a new pass that can be applied on the IR.
#define REGISTER_PASS(pass_type, pass_class) \ #define REGISTER_PASS(pass_type, pass_class) \
STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \
__reg_pass__##pass_type, \ __reg_pass__##pass_type, \
"REGISTER_PASS must be called in global namespace"); \ "REGISTER_PASS must be called in global namespace"); \
static ::paddle::framework::ir::PassRegistrar<pass_class> \ static ::paddle::framework::ir::PassRegistrar<pass_class> \
__pass_registrar_##pass_type##__(#pass_type); \ __pass_registrar_##pass_type##__(#pass_type); \
int TouchPassRegistrar_##pass_type() { \ int TouchPassRegistrar_##pass_type() { \
__pass_registrar_##pass_type##__.Touch(); \ __pass_registrar_##pass_type##__.Touch(); \
return 0; \ return 0; \
} \ } \
static ::paddle::framework::ir::PassRegistrar<pass_class> \ static ::paddle::framework::ir::PassRegistrar<pass_class> \
&__pass_tmp_registrar_##pass_type##__ UNUSED = \ &__pass_tmp_registrar_##pass_type##__ UNUSED = \
__pass_registrar_##pass_type##__ __pass_registrar_##pass_type##__
#define USE_PASS(pass_type) \ #define USE_PASS(pass_type) \
STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \
__use_pass_itself_##pass_type, \ __use_pass_itself_##pass_type, \
"USE_PASS must be called in global namespace"); \ "USE_PASS must be called in global namespace"); \
extern int TouchPassRegistrar_##pass_type(); \ extern int TouchPassRegistrar_##pass_type(); \
static int use_pass_itself_##pass_type##_ UNUSED = \ static int use_pass_itself_##pass_type##_ UNUSED = \
TouchPassRegistrar_##pass_type() TouchPassRegistrar_##pass_type()
} // namespace ir } // namespace ir
......
...@@ -150,9 +150,9 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) { ...@@ -150,9 +150,9 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
#endif #endif
} }
// The profile has a process-wide mutex, results in serious performance issue // The profile has a process-wide mutex, results in serious performance issue
// in concurrency scenerio. Here use an `if` to fix this issue. // in concurrency scenerio. Here use an `if` to fix this issue.
// Please not remove the `if`, ask @Superjomn if there are any concern. // Please not remove the `if`, ask @Superjomn if there are any concern.
#ifndef _WIN32 #ifndef _WIN32
if (platform::IsProfileEnabled()) { if (platform::IsProfileEnabled()) {
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
......
...@@ -20,9 +20,9 @@ ...@@ -20,9 +20,9 @@
#else #else
#endif #endif
#include <iterator>
#include <algorithm> #include <algorithm>
#include <chrono> // NOLINT #include <chrono> // NOLINT
#include <iterator>
#include <numeric> #include <numeric>
#include <sstream> #include <sstream>
#include <string> #include <string>
......
...@@ -112,7 +112,7 @@ class RowwiseTransformIterator<T, platform::CPUDeviceContext> ...@@ -112,7 +112,7 @@ class RowwiseTransformIterator<T, platform::CPUDeviceContext>
} }
RowwiseTransformIterator<T, platform::CPUDeviceContext> &operator+(int n) { RowwiseTransformIterator<T, platform::CPUDeviceContext> &operator+(int n) {
while(n-- > 0) { while (n-- > 0) {
++i_; ++i_;
if (UNLIKELY(i_ == n_)) { if (UNLIKELY(i_ == n_)) {
i_ = 0; i_ = 0;
...@@ -161,7 +161,7 @@ class MidWiseTransformIterator<T, platform::CPUDeviceContext> ...@@ -161,7 +161,7 @@ class MidWiseTransformIterator<T, platform::CPUDeviceContext>
} }
MidWiseTransformIterator<T, platform::CPUDeviceContext> &operator+(int n) { MidWiseTransformIterator<T, platform::CPUDeviceContext> &operator+(int n) {
while(n-- > 0) { while (n-- > 0) {
++j_; ++j_;
if (UNLIKELY(j_ == post_)) { if (UNLIKELY(j_ == post_)) {
++i_; ++i_;
......
...@@ -67,10 +67,10 @@ static void CalcGridLocations(const platform::CPUDeviceContext& ctx, ...@@ -67,10 +67,10 @@ static void CalcGridLocations(const platform::CPUDeviceContext& ctx,
Tensor half_ymax; Tensor half_ymax;
half_xmax.mutable_data<T>({n, h, w}, ctx.GetPlace()); half_xmax.mutable_data<T>({n, h, w}, ctx.GetPlace());
auto half_xmax_t = auto half_xmax_t =
EigenTensor<T, 3>::From(half_xmax).setConstant(0.5 * x_max); EigenTensor<T, 3>::From(half_xmax).setConstant(0.5 * x_max);
half_ymax.mutable_data<T>({n, h, w}, ctx.GetPlace()); half_ymax.mutable_data<T>({n, h, w}, ctx.GetPlace());
auto half_ymax_t = auto half_ymax_t =
EigenTensor<T, 3>::From(half_ymax).setConstant(0.5 * y_max); EigenTensor<T, 3>::From(half_ymax).setConstant(0.5 * y_max);
// scale grid to [0, h-1/w-1] // scale grid to [0, h-1/w-1]
auto grid_x_t = EigenTensor<T, 3>::From(grid_x); auto grid_x_t = EigenTensor<T, 3>::From(grid_x);
......
...@@ -115,9 +115,9 @@ void InitDevices(bool init_p2p, const std::vector<int> devices) { ...@@ -115,9 +115,9 @@ void InitDevices(bool init_p2p, const std::vector<int> devices) {
// windows has no support for openblas multi-thread // windows has no support for openblas multi-thread
#ifdef _WIN32 #ifdef _WIN32
if (FLAGS_paddle_num_threads > 1) { if (FLAGS_paddle_num_threads > 1) {
FLAGS_paddle_num_threads = 1; FLAGS_paddle_num_threads = 1;
} }
#endif #endif
#ifndef PADDLE_WITH_MKLDNN #ifndef PADDLE_WITH_MKLDNN
......
...@@ -24,38 +24,38 @@ ...@@ -24,38 +24,38 @@
#include "glog/logging.h" #include "glog/logging.h"
#if !defined(_WIN32) #if !defined(_WIN32)
#include <dlfcn.h> // dladdr #include <dlfcn.h> // dladdr
#include <execinfo.h> // backtrace #include <execinfo.h> // backtrace
#include <sys/stat.h> #include <sys/stat.h>
#include <algorithm> // std::accumulate #include <algorithm> // std::accumulate
#else #else
#include <stdio.h> #include <io.h> // _popen, _pclose
#include <io.h> // _popen, _pclose #include <stdio.h>
#include <windows.h> #include <windows.h>
#include <numeric> // std::accumulate in msvc #include <numeric> // std::accumulate in msvc
#ifndef S_ISDIR // windows port for sys/stat.h #ifndef S_ISDIR // windows port for sys/stat.h
#define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR) #define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR)
#endif // S_ISDIR #endif // S_ISDIR
static void *dlsym(void *handle, const char *symbol_name) { static void *dlsym(void *handle, const char *symbol_name) {
FARPROC found_symbol; FARPROC found_symbol;
found_symbol = GetProcAddress((HMODULE)handle, symbol_name); found_symbol = GetProcAddress((HMODULE)handle, symbol_name);
if (found_symbol == NULL) { if (found_symbol == NULL) {
throw std::runtime_error(std::string(symbol_name) + " not found."); throw std::runtime_error(std::string(symbol_name) + " not found.");
}
return reinterpret_cast<void *>(found_symbol);
} }
return reinterpret_cast<void *>(found_symbol);
}
static void *dlopen(const char *filename, int flag) { static void *dlopen(const char *filename, int flag) {
std::string file_name(filename); std::string file_name(filename);
file_name.replace(0, file_name.size() - 1, '/', '\\'); file_name.replace(0, file_name.size() - 1, '/', '\\');
HMODULE hModule = LoadLibrary(file_name.c_str()); HMODULE hModule = LoadLibrary(file_name.c_str());
if (!hModule) { if (!hModule) {
throw std::runtime_error(file_name + " not found."); throw std::runtime_error(file_name + " not found.");
}
return reinterpret_cast<void *>(hModule);
} }
return reinterpret_cast<void *>(hModule);
}
#endif // !_WIN32 #endif // !_WIN32
......
...@@ -46,7 +46,7 @@ limitations under the License. */ ...@@ -46,7 +46,7 @@ limitations under the License. */
// some platform-independent defintion // some platform-independent defintion
#if defined(_WIN32) #if defined(_WIN32)
#define UNUSED #define UNUSED
#define __builtin_expect(EXP, C) (EXP) #define __builtin_expect(EXP, C) (EXP)
#else #else
#define UNUSED __attribute__((unused)) #define UNUSED __attribute__((unused))
#endif #endif
...@@ -352,7 +352,7 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -352,7 +352,7 @@ All parameter, weight, gradient are variables in Paddle.
[](Variable &self) { return self.GetMutable<LoDTensorArray>(); }, [](Variable &self) { return self.GetMutable<LoDTensorArray>(); },
py::return_value_policy::reference) py::return_value_policy::reference)
#if (defined(PADDLE_WITH_CUDA) && !defined(_WIN32)) #if (defined(PADDLE_WITH_CUDA) && !defined(_WIN32))
.def("get_communicator", .def("get_communicator",
[](Variable &self) -> platform::Communicator * { [](Variable &self) -> platform::Communicator * {
return self.GetMutable<platform::Communicator>(); return self.GetMutable<platform::Communicator>();
}, },
...@@ -364,7 +364,7 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -364,7 +364,7 @@ All parameter, weight, gradient are variables in Paddle.
}, },
py::return_value_policy::reference) py::return_value_policy::reference)
#endif #endif
; ;
#if !defined(_WIN32) #if !defined(_WIN32)
py::class_<framework::ReaderHolder>(m, "Reader", "") py::class_<framework::ReaderHolder>(m, "Reader", "")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册