Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
1a9008c4
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1a9008c4
编写于
11月 14, 2018
作者:
P
peizhilin
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
code style fix
test=develop
上级
b2a770cf
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
67 addition
and
67 deletion
+67
-67
paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc
paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc
+3
-3
paddle/fluid/framework/ir/node.cc
paddle/fluid/framework/ir/node.cc
+2
-2
paddle/fluid/framework/ir/node.h
paddle/fluid/framework/ir/node.h
+2
-2
paddle/fluid/framework/ir/pass.h
paddle/fluid/framework/ir/pass.h
+18
-18
paddle/fluid/framework/operator.cc
paddle/fluid/framework/operator.cc
+3
-3
paddle/fluid/inference/api/helper.h
paddle/fluid/inference/api/helper.h
+1
-1
paddle/fluid/operators/elementwise_op_function.h
paddle/fluid/operators/elementwise_op_function.h
+2
-2
paddle/fluid/operators/grid_sampler_op.h
paddle/fluid/operators/grid_sampler_op.h
+2
-2
paddle/fluid/platform/init.cc
paddle/fluid/platform/init.cc
+3
-3
paddle/fluid/platform/port.h
paddle/fluid/platform/port.h
+28
-28
paddle/fluid/platform/variant.h
paddle/fluid/platform/variant.h
+1
-1
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+2
-2
未找到文件。
paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc
浏览文件 @
1a9008c4
...
...
@@ -213,10 +213,10 @@ void PrepareLSTMWeight(const LoDTensor& W_forget_w0,
float
*
out_data
=
out
->
mutable_data
<
float
>
(
platform
::
CPUPlace
());
std
::
array
<
const
float
*
,
4
>
tensors
{
W_forget_w0
.
data
<
float
>
(),
W_input_w0
.
data
<
float
>
(),
W_output_w0
.
data
<
float
>
(),
W_cell_w0
.
data
<
float
>
()};
W_output_w0
.
data
<
float
>
(),
W_cell_w0
.
data
<
float
>
()};
std
::
array
<
const
float
*
,
4
>
tensors1
{
W_forget_w1
.
data
<
float
>
(),
W_input_w1
.
data
<
float
>
(),
W_output_w1
.
data
<
float
>
(),
W_cell_w1
.
data
<
float
>
()};
W_output_w1
.
data
<
float
>
(),
W_cell_w1
.
data
<
float
>
()};
for
(
int
row
=
0
;
row
<
D
;
row
++
)
{
for
(
int
col
=
0
;
col
<
4
;
col
++
)
{
...
...
@@ -240,7 +240,7 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input,
LoDTensor
*
out
)
{
std
::
array
<
const
float
*
,
4
>
tensors
{
B_forget
.
data
<
float
>
(),
B_input
.
data
<
float
>
(),
B_output
.
data
<
float
>
(),
B_cell
.
data
<
float
>
()};
B_cell
.
data
<
float
>
()};
PADDLE_ENFORCE_EQ
(
B_forget
.
dims
().
size
(),
1
);
int
D
=
B_forget
.
dims
()[
0
];
...
...
paddle/fluid/framework/ir/node.cc
浏览文件 @
1a9008c4
...
...
@@ -19,9 +19,9 @@ namespace framework {
namespace
ir
{
// msvc15 don't support constexpr in correct way.
#if !defined(_WIN32)
constexpr
char
Node
::
kControlDepVarName
[];
constexpr
char
Node
::
kControlDepVarName
[];
#else
const
char
Node
::
kControlDepVarName
[]
=
"__control_var"
;
const
char
Node
::
kControlDepVarName
[]
=
"__control_var"
;
#endif
std
::
unique_ptr
<
Node
>
CreateNodeForTest
(
const
std
::
string
&
name
,
...
...
paddle/fluid/framework/ir/node.h
浏览文件 @
1a9008c4
...
...
@@ -56,9 +56,9 @@ class Node {
enum
class
Type
{
kOperation
,
kVariable
};
#if !defined(_WIN32) // msvc not support constexpr correctly.
static
constexpr
char
kControlDepVarName
[]
=
"__control_var"
;
static
constexpr
char
kControlDepVarName
[]
=
"__control_var"
;
#else
static
const
char
kControlDepVarName
[];
static
const
char
kControlDepVarName
[];
#endif
Type
NodeType
()
const
{
return
type_
;
}
...
...
paddle/fluid/framework/ir/pass.h
浏览文件 @
1a9008c4
...
...
@@ -197,26 +197,26 @@ struct PassRegistrar : public Registrar {
msg)
// Register a new pass that can be applied on the IR.
#define REGISTER_PASS(pass_type, pass_class)
\
STATIC_ASSERT_PASS_GLOBAL_NAMESPACE(
\
__reg_pass__##pass_type,
\
"REGISTER_PASS must be called in global namespace");
\
static ::paddle::framework::ir::PassRegistrar<pass_class>
\
__pass_registrar_##pass_type##__(#pass_type);
\
int TouchPassRegistrar_##pass_type() {
\
__pass_registrar_##pass_type##__.Touch();
\
return 0;
\
}
\
static ::paddle::framework::ir::PassRegistrar<pass_class>
\
&__pass_tmp_registrar_##pass_type##__ UNUSED = \
#define REGISTER_PASS(pass_type, pass_class) \
STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \
__reg_pass__##pass_type, \
"REGISTER_PASS must be called in global namespace"); \
static ::paddle::framework::ir::PassRegistrar<pass_class> \
__pass_registrar_##pass_type##__(#pass_type); \
int TouchPassRegistrar_##pass_type() { \
__pass_registrar_##pass_type##__.Touch(); \
return 0; \
} \
static ::paddle::framework::ir::PassRegistrar<pass_class> \
&__pass_tmp_registrar_##pass_type##__ UNUSED =
\
__pass_registrar_##pass_type##__
#define USE_PASS(pass_type)
\
STATIC_ASSERT_PASS_GLOBAL_NAMESPACE(
\
__use_pass_itself_##pass_type,
\
"USE_PASS must be called in global namespace");
\
extern int TouchPassRegistrar_##pass_type();
\
static int use_pass_itself_##pass_type##_ UNUSED = \
#define USE_PASS(pass_type) \
STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \
__use_pass_itself_##pass_type, \
"USE_PASS must be called in global namespace"); \
extern int TouchPassRegistrar_##pass_type(); \
static int use_pass_itself_##pass_type##_ UNUSED =
\
TouchPassRegistrar_##pass_type()
}
// namespace ir
...
...
paddle/fluid/framework/operator.cc
浏览文件 @
1a9008c4
...
...
@@ -150,9 +150,9 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
#endif
}
// The profile has a process-wide mutex, results in serious performance issue
// in concurrency scenerio. Here use an `if` to fix this issue.
// Please not remove the `if`, ask @Superjomn if there are any concern.
// The profile has a process-wide mutex, results in serious performance issue
// in concurrency scenerio. Here use an `if` to fix this issue.
// Please not remove the `if`, ask @Superjomn if there are any concern.
#ifndef _WIN32
if
(
platform
::
IsProfileEnabled
())
{
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
...
...
paddle/fluid/inference/api/helper.h
浏览文件 @
1a9008c4
...
...
@@ -20,9 +20,9 @@
#else
#endif
#include <iterator>
#include <algorithm>
#include <chrono> // NOLINT
#include <iterator>
#include <numeric>
#include <sstream>
#include <string>
...
...
paddle/fluid/operators/elementwise_op_function.h
浏览文件 @
1a9008c4
...
...
@@ -112,7 +112,7 @@ class RowwiseTransformIterator<T, platform::CPUDeviceContext>
}
RowwiseTransformIterator
<
T
,
platform
::
CPUDeviceContext
>
&
operator
+
(
int
n
)
{
while
(
n
--
>
0
)
{
while
(
n
--
>
0
)
{
++
i_
;
if
(
UNLIKELY
(
i_
==
n_
))
{
i_
=
0
;
...
...
@@ -161,7 +161,7 @@ class MidWiseTransformIterator<T, platform::CPUDeviceContext>
}
MidWiseTransformIterator
<
T
,
platform
::
CPUDeviceContext
>
&
operator
+
(
int
n
)
{
while
(
n
--
>
0
)
{
while
(
n
--
>
0
)
{
++
j_
;
if
(
UNLIKELY
(
j_
==
post_
))
{
++
i_
;
...
...
paddle/fluid/operators/grid_sampler_op.h
浏览文件 @
1a9008c4
...
...
@@ -67,10 +67,10 @@ static void CalcGridLocations(const platform::CPUDeviceContext& ctx,
Tensor
half_ymax
;
half_xmax
.
mutable_data
<
T
>
({
n
,
h
,
w
},
ctx
.
GetPlace
());
auto
half_xmax_t
=
EigenTensor
<
T
,
3
>::
From
(
half_xmax
).
setConstant
(
0.5
*
x_max
);
EigenTensor
<
T
,
3
>::
From
(
half_xmax
).
setConstant
(
0.5
*
x_max
);
half_ymax
.
mutable_data
<
T
>
({
n
,
h
,
w
},
ctx
.
GetPlace
());
auto
half_ymax_t
=
EigenTensor
<
T
,
3
>::
From
(
half_ymax
).
setConstant
(
0.5
*
y_max
);
EigenTensor
<
T
,
3
>::
From
(
half_ymax
).
setConstant
(
0.5
*
y_max
);
// scale grid to [0, h-1/w-1]
auto
grid_x_t
=
EigenTensor
<
T
,
3
>::
From
(
grid_x
);
...
...
paddle/fluid/platform/init.cc
浏览文件 @
1a9008c4
...
...
@@ -115,9 +115,9 @@ void InitDevices(bool init_p2p, const std::vector<int> devices) {
// windows has no support for openblas multi-thread
#ifdef _WIN32
if
(
FLAGS_paddle_num_threads
>
1
)
{
FLAGS_paddle_num_threads
=
1
;
}
if
(
FLAGS_paddle_num_threads
>
1
)
{
FLAGS_paddle_num_threads
=
1
;
}
#endif
#ifndef PADDLE_WITH_MKLDNN
...
...
paddle/fluid/platform/port.h
浏览文件 @
1a9008c4
...
...
@@ -24,38 +24,38 @@
#include "glog/logging.h"
#if !defined(_WIN32)
#include <dlfcn.h> // dladdr
#include <execinfo.h> // backtrace
#include <sys/stat.h>
#include <algorithm> // std::accumulate
#include <dlfcn.h> // dladdr
#include <execinfo.h> // backtrace
#include <sys/stat.h>
#include <algorithm> // std::accumulate
#else
#include <stdio.h>
#include <io.h> // _popen, _pclose
#include <windows.h>
#include <numeric> // std::accumulate in msvc
#ifndef S_ISDIR // windows port for sys/stat.h
#define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR)
#endif // S_ISDIR
static
void
*
dlsym
(
void
*
handle
,
const
char
*
symbol_name
)
{
FARPROC
found_symbol
;
found_symbol
=
GetProcAddress
((
HMODULE
)
handle
,
symbol_name
);
if
(
found_symbol
==
NULL
)
{
throw
std
::
runtime_error
(
std
::
string
(
symbol_name
)
+
" not found."
);
}
return
reinterpret_cast
<
void
*>
(
found_symbol
);
#include <io.h> // _popen, _pclose
#include <stdio.h>
#include <windows.h>
#include <numeric> // std::accumulate in msvc
#ifndef S_ISDIR // windows port for sys/stat.h
#define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR)
#endif // S_ISDIR
static
void
*
dlsym
(
void
*
handle
,
const
char
*
symbol_name
)
{
FARPROC
found_symbol
;
found_symbol
=
GetProcAddress
((
HMODULE
)
handle
,
symbol_name
);
if
(
found_symbol
==
NULL
)
{
throw
std
::
runtime_error
(
std
::
string
(
symbol_name
)
+
" not found."
);
}
return
reinterpret_cast
<
void
*>
(
found_symbol
);
}
static
void
*
dlopen
(
const
char
*
filename
,
int
flag
)
{
std
::
string
file_name
(
filename
);
file_name
.
replace
(
0
,
file_name
.
size
()
-
1
,
'/'
,
'\\'
);
HMODULE
hModule
=
LoadLibrary
(
file_name
.
c_str
());
if
(
!
hModule
)
{
throw
std
::
runtime_error
(
file_name
+
" not found."
);
}
return
reinterpret_cast
<
void
*>
(
hModule
);
static
void
*
dlopen
(
const
char
*
filename
,
int
flag
)
{
std
::
string
file_name
(
filename
);
file_name
.
replace
(
0
,
file_name
.
size
()
-
1
,
'/'
,
'\\'
);
HMODULE
hModule
=
LoadLibrary
(
file_name
.
c_str
());
if
(
!
hModule
)
{
throw
std
::
runtime_error
(
file_name
+
" not found."
);
}
return
reinterpret_cast
<
void
*>
(
hModule
);
}
#endif // !_WIN32
...
...
paddle/fluid/platform/variant.h
浏览文件 @
1a9008c4
...
...
@@ -46,7 +46,7 @@ limitations under the License. */
// some platform-independent defintion
#if defined(_WIN32)
#define UNUSED
#define __builtin_expect(EXP, C)
(EXP)
#define __builtin_expect(EXP, C) (EXP)
#else
#define UNUSED __attribute__((unused))
#endif
paddle/fluid/pybind/pybind.cc
浏览文件 @
1a9008c4
...
...
@@ -352,7 +352,7 @@ All parameter, weight, gradient are variables in Paddle.
[](
Variable
&
self
)
{
return
self
.
GetMutable
<
LoDTensorArray
>
();
},
py
::
return_value_policy
::
reference
)
#if (defined(PADDLE_WITH_CUDA) && !defined(_WIN32))
.
def
(
"get_communicator"
,
.
def
(
"get_communicator"
,
[](
Variable
&
self
)
->
platform
::
Communicator
*
{
return
self
.
GetMutable
<
platform
::
Communicator
>
();
},
...
...
@@ -364,7 +364,7 @@ All parameter, weight, gradient are variables in Paddle.
},
py
::
return_value_policy
::
reference
)
#endif
;
;
#if !defined(_WIN32)
py
::
class_
<
framework
::
ReaderHolder
>
(
m
,
"Reader"
,
""
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录