Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
587cca7e
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
587cca7e
编写于
8月 23, 2018
作者:
T
tangwei12
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
code optimize
上级
125340ad
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
76 addition
and
32 deletion
+76
-32
paddle/fluid/operators/fill_constant_op.cc
paddle/fluid/operators/fill_constant_op.cc
+20
-7
paddle/fluid/operators/uniform_random_op.cc
paddle/fluid/operators/uniform_random_op.cc
+1
-1
paddle/fluid/operators/uniform_random_op.cu
paddle/fluid/operators/uniform_random_op.cu
+1
-1
python/paddle/fluid/tests/unittests/dist_simnet_bow.py
python/paddle/fluid/tests/unittests/dist_simnet_bow.py
+43
-20
python/paddle/fluid/tests/unittests/test_dist_base.py
python/paddle/fluid/tests/unittests/test_dist_base.py
+11
-3
未找到文件。
paddle/fluid/operators/fill_constant_op.cc
浏览文件 @
587cca7e
...
@@ -15,7 +15,6 @@ limitations under the License. */
...
@@ -15,7 +15,6 @@ limitations under the License. */
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/device_context.h"
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
...
@@ -41,19 +40,33 @@ class FillConstantOp : public framework::OperatorBase {
...
@@ -41,19 +40,33 @@ class FillConstantOp : public framework::OperatorBase {
static_cast
<
framework
::
proto
::
VarType
::
Type
>
(
Attr
<
int
>
(
"dtype"
));
static_cast
<
framework
::
proto
::
VarType
::
Type
>
(
Attr
<
int
>
(
"dtype"
));
auto
value
=
Attr
<
float
>
(
"value"
);
auto
value
=
Attr
<
float
>
(
"value"
);
auto
force_cpu
=
Attr
<
bool
>
(
"force_cpu"
);
auto
force_cpu
=
Attr
<
bool
>
(
"force_cpu"
);
auto
&
out
=
*
scope
.
FindVar
(
Output
(
"Out"
))
->
GetMutable
<
framework
::
LoDTensor
>
();
framework
::
Tensor
*
tensor
=
nullptr
;
out
.
Resize
(
framework
::
make_ddim
(
Attr
<
std
::
vector
<
int
>>
(
"shape"
)));
auto
&
out_var
=
*
scope
.
FindVar
(
Output
(
"Out"
));
if
(
out_var
.
IsType
<
framework
::
LoDTensor
>
())
{
tensor
=
out_var
.
GetMutable
<
framework
::
LoDTensor
>
();
tensor
->
Resize
(
framework
::
make_ddim
(
Attr
<
std
::
vector
<
int
>>
(
"shape"
)));
}
else
if
(
out_var
.
IsType
<
framework
::
SelectedRows
>
())
{
tensor
=
out_var
.
GetMutable
<
framework
::
SelectedRows
>
()
->
mutable_value
();
tensor
->
Resize
(
framework
::
make_ddim
(
Attr
<
std
::
vector
<
int
>>
(
"shape"
)));
}
else
{
PADDLE_THROW
(
"fill constant op's output only"
"supports SelectedRows and LoDTensor"
);
}
if
(
force_cpu
)
{
if
(
force_cpu
)
{
auto
cpu
=
platform
::
CPUPlace
();
auto
cpu
=
platform
::
CPUPlace
();
out
.
mutable_data
(
cpu
,
framework
::
ToTypeIndex
(
data_type
));
tensor
->
mutable_data
(
cpu
,
framework
::
ToTypeIndex
(
data_type
));
}
else
{
}
else
{
out
.
mutable_data
(
dev_place
,
framework
::
ToTypeIndex
(
data_type
));
tensor
->
mutable_data
(
dev_place
,
framework
::
ToTypeIndex
(
data_type
));
}
}
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
auto
&
dev_ctx
=
*
pool
.
Get
(
dev_place
);
auto
&
dev_ctx
=
*
pool
.
Get
(
dev_place
);
math
::
set_constant
(
dev_ctx
,
&
out
,
value
);
math
::
set_constant
(
dev_ctx
,
tensor
,
value
);
}
}
};
};
...
...
paddle/fluid/operators/uniform_random_op.cc
浏览文件 @
587cca7e
...
@@ -37,7 +37,7 @@ class CPUUniformRandomKernel : public framework::OpKernel<T> {
...
@@ -37,7 +37,7 @@ class CPUUniformRandomKernel : public framework::OpKernel<T> {
}
else
{
}
else
{
PADDLE_THROW
(
PADDLE_THROW
(
"uniform_random_op's output only"
"uniform_random_op's output only"
"supports SelectedRows and Tensor"
);
"supports SelectedRows and
LoD
Tensor"
);
}
}
T
*
data
=
tensor
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
T
*
data
=
tensor
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
ctx
.
Attr
<
int
>
(
"seed"
));
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
ctx
.
Attr
<
int
>
(
"seed"
));
...
...
paddle/fluid/operators/uniform_random_op.cu
浏览文件 @
587cca7e
...
@@ -54,7 +54,7 @@ class GPUUniformRandomKernel : public framework::OpKernel<T> {
...
@@ -54,7 +54,7 @@ class GPUUniformRandomKernel : public framework::OpKernel<T> {
}
else
{
}
else
{
PADDLE_THROW
(
PADDLE_THROW
(
"uniform_random_op's output only"
"uniform_random_op's output only"
"supports SelectedRows and Tensor"
);
"supports SelectedRows and
LoD
Tensor"
);
}
}
T
*
data
=
tensor
->
mutable_data
<
T
>
(
context
.
GetPlace
());
T
*
data
=
tensor
->
mutable_data
<
T
>
(
context
.
GetPlace
());
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
context
.
Attr
<
int
>
(
"seed"
));
unsigned
int
seed
=
static_cast
<
unsigned
int
>
(
context
.
Attr
<
int
>
(
"seed"
));
...
...
python/paddle/fluid/tests/unittests/dist_simnet_bow.py
浏览文件 @
587cca7e
...
@@ -91,16 +91,21 @@ def train_network(batch_size, is_distributed=False, is_sparse=False):
...
@@ -91,16 +91,21 @@ def train_network(batch_size, is_distributed=False, is_sparse=False):
is_distributed
=
is_distributed
,
is_distributed
=
is_distributed
,
size
=
[
dict_dim
,
emb_dim
],
size
=
[
dict_dim
,
emb_dim
],
param_attr
=
fluid
.
ParamAttr
(
param_attr
=
fluid
.
ParamAttr
(
name
=
"__emb__"
,
learning_rate
=
emb_lr
),
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.01
),
name
=
"__emb__"
,
learning_rate
=
emb_lr
),
is_sparse
=
is_sparse
)
is_sparse
=
is_sparse
)
## vsum
## vsum
q_sum
=
fluid
.
layers
.
sequence_pool
(
input
=
q_emb
,
pool_type
=
'sum'
)
q_sum
=
fluid
.
layers
.
sequence_pool
(
input
=
q_emb
,
pool_type
=
'sum'
)
q_ss
=
fluid
.
layers
.
softsign
(
q_sum
)
q_ss
=
fluid
.
layers
.
softsign
(
q_sum
)
## fc layer after conv
## fc layer after conv
q_fc
=
fluid
.
layers
.
fc
(
input
=
q_ss
,
q_fc
=
fluid
.
layers
.
fc
(
size
=
hid_dim
,
input
=
q_ss
,
param_attr
=
fluid
.
ParamAttr
(
size
=
hid_dim
,
name
=
"__q_fc__"
,
learning_rate
=
base_lr
))
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.01
),
name
=
"__q_fc__"
,
learning_rate
=
base_lr
))
# label data
# label data
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
"int64"
)
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
"int64"
)
# pt
# pt
...
@@ -112,17 +117,22 @@ def train_network(batch_size, is_distributed=False, is_sparse=False):
...
@@ -112,17 +117,22 @@ def train_network(batch_size, is_distributed=False, is_sparse=False):
is_distributed
=
is_distributed
,
is_distributed
=
is_distributed
,
size
=
[
dict_dim
,
emb_dim
],
size
=
[
dict_dim
,
emb_dim
],
param_attr
=
fluid
.
ParamAttr
(
param_attr
=
fluid
.
ParamAttr
(
name
=
"__emb__"
,
learning_rate
=
emb_lr
),
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.01
),
name
=
"__emb__"
,
learning_rate
=
emb_lr
),
is_sparse
=
is_sparse
)
is_sparse
=
is_sparse
)
## vsum
## vsum
pt_sum
=
fluid
.
layers
.
sequence_pool
(
input
=
pt_emb
,
pool_type
=
'sum'
)
pt_sum
=
fluid
.
layers
.
sequence_pool
(
input
=
pt_emb
,
pool_type
=
'sum'
)
pt_ss
=
fluid
.
layers
.
softsign
(
pt_sum
)
pt_ss
=
fluid
.
layers
.
softsign
(
pt_sum
)
## fc layer
## fc layer
pt_fc
=
fluid
.
layers
.
fc
(
input
=
pt_ss
,
pt_fc
=
fluid
.
layers
.
fc
(
size
=
hid_dim
,
input
=
pt_ss
,
param_attr
=
fluid
.
ParamAttr
(
size
=
hid_dim
,
name
=
"__fc__"
,
learning_rate
=
base_lr
),
param_attr
=
fluid
.
ParamAttr
(
bias_attr
=
fluid
.
ParamAttr
(
name
=
"__fc_b__"
))
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.01
),
name
=
"__fc__"
,
learning_rate
=
base_lr
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
"__fc_b__"
))
# nt
# nt
nt
=
fluid
.
layers
.
data
(
nt
=
fluid
.
layers
.
data
(
name
=
"neg_title_ids"
,
shape
=
[
1
],
dtype
=
"int64"
,
lod_level
=
1
)
name
=
"neg_title_ids"
,
shape
=
[
1
],
dtype
=
"int64"
,
lod_level
=
1
)
...
@@ -132,17 +142,22 @@ def train_network(batch_size, is_distributed=False, is_sparse=False):
...
@@ -132,17 +142,22 @@ def train_network(batch_size, is_distributed=False, is_sparse=False):
is_distributed
=
is_distributed
,
is_distributed
=
is_distributed
,
size
=
[
dict_dim
,
emb_dim
],
size
=
[
dict_dim
,
emb_dim
],
param_attr
=
fluid
.
ParamAttr
(
param_attr
=
fluid
.
ParamAttr
(
name
=
"__emb__"
,
learning_rate
=
emb_lr
),
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.01
),
name
=
"__emb__"
,
learning_rate
=
emb_lr
),
is_sparse
=
is_sparse
)
is_sparse
=
is_sparse
)
## vsum
## vsum
nt_sum
=
fluid
.
layers
.
sequence_pool
(
input
=
nt_emb
,
pool_type
=
'sum'
)
nt_sum
=
fluid
.
layers
.
sequence_pool
(
input
=
nt_emb
,
pool_type
=
'sum'
)
nt_ss
=
fluid
.
layers
.
softsign
(
nt_sum
)
nt_ss
=
fluid
.
layers
.
softsign
(
nt_sum
)
## fc layer
## fc layer
nt_fc
=
fluid
.
layers
.
fc
(
input
=
nt_ss
,
nt_fc
=
fluid
.
layers
.
fc
(
size
=
hid_dim
,
input
=
nt_ss
,
param_attr
=
fluid
.
ParamAttr
(
size
=
hid_dim
,
name
=
"__fc__"
,
learning_rate
=
base_lr
),
param_attr
=
fluid
.
ParamAttr
(
bias_attr
=
fluid
.
ParamAttr
(
name
=
"__fc_b__"
))
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.01
),
name
=
"__fc__"
,
learning_rate
=
base_lr
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
"__fc_b__"
))
cos_q_pt
=
fluid
.
layers
.
cos_sim
(
q_fc
,
pt_fc
)
cos_q_pt
=
fluid
.
layers
.
cos_sim
(
q_fc
,
pt_fc
)
cos_q_nt
=
fluid
.
layers
.
cos_sim
(
q_fc
,
nt_fc
)
cos_q_nt
=
fluid
.
layers
.
cos_sim
(
q_fc
,
nt_fc
)
# loss
# loss
...
@@ -163,7 +178,6 @@ def get_one_data(file_list):
...
@@ -163,7 +178,6 @@ def get_one_data(file_list):
with
open
(
file
,
"r"
)
as
fin
:
with
open
(
file
,
"r"
)
as
fin
:
for
i
in
fin
:
for
i
in
fin
:
contents
.
append
(
i
.
strip
())
contents
.
append
(
i
.
strip
())
random
.
shuffle
(
contents
)
for
index
,
q
in
enumerate
(
contents
):
for
index
,
q
in
enumerate
(
contents
):
try
:
try
:
one_data
=
[[
int
(
j
)
for
j
in
i
.
split
(
" "
)]
one_data
=
[[
int
(
j
)
for
j
in
i
.
split
(
" "
)]
...
@@ -205,7 +219,8 @@ def get_train_reader(batch_size):
...
@@ -205,7 +219,8 @@ def get_train_reader(batch_size):
class
TestDistSimnetBow2x2
(
TestDistRunnerBase
):
class
TestDistSimnetBow2x2
(
TestDistRunnerBase
):
def
get_model
(
self
,
batch_size
=
2
):
def
get_model
(
self
,
batch_size
=
2
):
# Train program
# Train program
avg_cost
,
acc
,
predict
=
train_network
(
batch_size
,
False
,
False
)
avg_cost
,
acc
,
predict
=
\
train_network
(
batch_size
,
bool
(
int
(
os
.
environ
[
"IS_DISTRIBUTED"
])),
bool
(
int
(
os
.
environ
[
"IS_SPARSE"
])))
inference_program
=
fluid
.
default_main_program
().
clone
()
inference_program
=
fluid
.
default_main_program
().
clone
()
...
@@ -219,7 +234,15 @@ class TestDistSimnetBow2x2(TestDistRunnerBase):
...
@@ -219,7 +234,15 @@ class TestDistSimnetBow2x2(TestDistRunnerBase):
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
paddle
.
dataset
.
common
.
download
(
DATA_URL
,
'simnet'
,
DATA_MD5
,
"train"
)
import
os
import
os
os
.
environ
[
'CPU_NUM'
]
=
'1'
os
.
environ
[
'CPU_NUM'
]
=
'1'
paddle
.
dataset
.
common
.
download
(
DATA_URL
,
'simnet'
,
DATA_MD5
,
"train"
)
os
.
environ
[
"IS_DISTRIBUTED"
]
=
'0'
os
.
environ
[
"IS_SPARSE"
]
=
'0'
runtime_main
(
TestDistSimnetBow2x2
)
runtime_main
(
TestDistSimnetBow2x2
)
# os.environ["IS_DISTRIBUTED"] = '0'
# os.environ["IS_SPARSE"] = '1'
# runtime_main(TestDistSimnetBow2x2)
python/paddle/fluid/tests/unittests/test_dist_base.py
浏览文件 @
587cca7e
...
@@ -155,7 +155,7 @@ class TestDistBase(unittest.TestCase):
...
@@ -155,7 +155,7 @@ class TestDistBase(unittest.TestCase):
self
.
_sync_mode
=
True
self
.
_sync_mode
=
True
self
.
_setup_config
()
self
.
_setup_config
()
def
start_pserver
(
self
,
model_file
,
check_error_log
):
def
start_pserver
(
self
,
model_file
,
check_error_log
,
required_envs
):
sync_mode_str
=
"TRUE"
if
self
.
_sync_mode
else
"FALSE"
sync_mode_str
=
"TRUE"
if
self
.
_sync_mode
else
"FALSE"
ps0_ep
,
ps1_ep
=
self
.
_ps_endpoints
.
split
(
","
)
ps0_ep
,
ps1_ep
=
self
.
_ps_endpoints
.
split
(
","
)
ps0_cmd
=
"%s %s pserver %s 0 %s %d TRUE %s"
%
\
ps0_cmd
=
"%s %s pserver %s 0 %s %d TRUE %s"
%
\
...
@@ -168,15 +168,23 @@ class TestDistBase(unittest.TestCase):
...
@@ -168,15 +168,23 @@ class TestDistBase(unittest.TestCase):
ps0_pipe
=
subprocess
.
PIPE
ps0_pipe
=
subprocess
.
PIPE
ps1_pipe
=
subprocess
.
PIPE
ps1_pipe
=
subprocess
.
PIPE
if
check_error_log
:
if
check_error_log
:
required_envs
[
"GLOG_v"
]
=
"7"
required_envs
[
"GLOG_logtostderr"
]
=
"1"
print
(
"ps0_cmd:"
,
ps0_cmd
)
print
(
"ps0_cmd:"
,
ps0_cmd
)
print
(
"ps1_cmd:"
,
ps1_cmd
)
print
(
"ps1_cmd:"
,
ps1_cmd
)
ps0_pipe
=
open
(
"/tmp/ps0_err.log"
,
"wb"
)
ps0_pipe
=
open
(
"/tmp/ps0_err.log"
,
"wb"
)
ps1_pipe
=
open
(
"/tmp/ps1_err.log"
,
"wb"
)
ps1_pipe
=
open
(
"/tmp/ps1_err.log"
,
"wb"
)
ps0_proc
=
subprocess
.
Popen
(
ps0_proc
=
subprocess
.
Popen
(
ps0_cmd
.
split
(
" "
),
stdout
=
subprocess
.
PIPE
,
stderr
=
ps0_pipe
)
ps0_cmd
.
split
(
" "
),
stdout
=
subprocess
.
PIPE
,
stderr
=
ps0_pipe
,
env
=
required_envs
)
ps1_proc
=
subprocess
.
Popen
(
ps1_proc
=
subprocess
.
Popen
(
ps1_cmd
.
split
(
" "
),
stdout
=
subprocess
.
PIPE
,
stderr
=
ps1_pipe
)
ps1_cmd
.
split
(
" "
),
stdout
=
subprocess
.
PIPE
,
stderr
=
ps1_pipe
,
env
=
required_envs
)
if
not
check_error_log
:
if
not
check_error_log
:
return
ps0_proc
,
ps1_proc
,
None
,
None
return
ps0_proc
,
ps1_proc
,
None
,
None
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录