Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
865a714e
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
865a714e
编写于
1月 31, 2018
作者:
Y
Yu Yang
提交者:
GitHub
1月 31, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #7970 from reyoung/feature/test_w2v_parallel.do
Make word2vec uses parallel.do when CI
上级
ed942263
f1e32e24
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
177 addition
and
81 deletion
+177
-81
paddle/operators/sum_op.h
paddle/operators/sum_op.h
+38
-11
python/paddle/v2/fluid/tests/book/test_word2vec.py
python/paddle/v2/fluid/tests/book/test_word2vec.py
+139
-70
未找到文件。
paddle/operators/sum_op.h
浏览文件 @
865a714e
...
@@ -68,7 +68,32 @@ class SumKernel : public framework::OpKernel<T> {
...
@@ -68,7 +68,32 @@ class SumKernel : public framework::OpKernel<T> {
}
}
}
}
}
else
if
(
out_var
->
IsType
<
framework
::
SelectedRows
>
())
{
}
else
if
(
out_var
->
IsType
<
framework
::
SelectedRows
>
())
{
PADDLE_ENFORCE
(
!
in_place
,
"SelectedRows not support inplace sum now"
);
std
::
unique_ptr
<
framework
::
SelectedRows
>
in0
;
if
(
in_place
)
{
// If is in_place, we store the input[0] to in0
auto
&
in_sel0
=
in_vars
[
0
]
->
Get
<
SelectedRows
>
();
auto
&
rows
=
in_sel0
.
rows
();
#ifdef PADDLE_WITH_CUDA
std
::
vector
<
int64_t
>
rows_in_cpu
;
rows_in_cpu
.
reserve
(
rows
.
size
());
for
(
auto
item
:
rows
)
{
rows_in_cpu
.
push_back
(
item
);
}
in0
.
reset
(
new
framework
::
SelectedRows
(
rows_in_cpu
,
in_sel0
.
height
()));
#else
in0
.
reset
(
new
framework
::
SelectedRows
(
rows
,
in_sel0
.
height
()));
#endif
in0
->
mutable_value
()
->
ShareDataWith
(
in_sel0
.
value
());
}
auto
get_selected_row
=
[
&
](
size_t
i
)
->
const
SelectedRows
&
{
if
(
i
==
0
&&
in0
)
{
return
*
in0
.
get
();
}
else
{
return
in_vars
[
i
]
->
Get
<
SelectedRows
>
();
}
};
auto
*
out
=
context
.
Output
<
SelectedRows
>
(
"Out"
);
auto
*
out
=
context
.
Output
<
SelectedRows
>
(
"Out"
);
out
->
mutable_rows
()
->
clear
();
out
->
mutable_rows
()
->
clear
();
auto
*
out_value
=
out
->
mutable_value
();
auto
*
out_value
=
out
->
mutable_value
();
...
@@ -76,24 +101,26 @@ class SumKernel : public framework::OpKernel<T> {
...
@@ -76,24 +101,26 @@ class SumKernel : public framework::OpKernel<T> {
// Runtime InferShape
// Runtime InferShape
size_t
first_dim
=
0
;
size_t
first_dim
=
0
;
for
(
int
i
=
0
;
i
<
N
;
i
++
)
{
for
(
int
i
=
0
;
i
<
N
;
i
++
)
{
first_dim
+=
in_vars
[
i
]
->
Get
<
SelectedRows
>
().
rows
().
size
();
auto
&
sel_row
=
get_selected_row
(
i
);
first_dim
+=
sel_row
.
rows
().
size
();
}
}
auto
in_dim
=
in_vars
[
0
]
->
Get
<
SelectedRows
>
().
value
().
dims
();
auto
in_dim
=
auto
in_dim_vec
=
framework
::
vectorize
(
in_dim
);
framework
::
vectorize
(
get_selected_row
(
N
-
1
).
value
().
dims
()
);
in_dim
_vec
[
0
]
=
static_cast
<
int64_t
>
(
first_dim
);
in_dim
[
0
]
=
static_cast
<
int64_t
>
(
first_dim
);
out_value
->
Resize
(
framework
::
make_ddim
(
in_dim
_vec
));
out_value
->
Resize
(
framework
::
make_ddim
(
in_dim
));
out_value
->
mutable_data
<
T
>
(
context
.
GetPlace
());
out_value
->
mutable_data
<
T
>
(
context
.
GetPlace
());
math
::
SelectedRowsAddTo
<
DeviceContext
,
T
>
functor
;
math
::
SelectedRowsAddTo
<
DeviceContext
,
T
>
functor
;
int64_t
offset
=
0
;
int64_t
offset
=
0
;
for
(
int
i
=
0
;
i
<
N
;
i
++
)
{
for
(
int
i
=
0
;
i
<
N
;
i
++
)
{
PADDLE_ENFORCE_EQ
(
out
->
height
(),
auto
&
sel_row
=
get_selected_row
(
i
);
in_vars
[
i
]
->
Get
<
SelectedRows
>
().
height
());
functor
(
context
.
template
device_context
<
DeviceContext
>(),
PADDLE_ENFORCE_EQ
(
out
->
height
(),
sel_row
.
height
());
in_vars
[
i
]
->
Get
<
SelectedRows
>
(),
offset
,
out
);
functor
(
context
.
template
device_context
<
DeviceContext
>(),
sel_row
,
offset
+=
in_vars
[
i
]
->
Get
<
SelectedRows
>
().
value
().
numel
();
offset
,
out
);
offset
+=
sel_row
.
value
().
numel
();
}
}
}
else
if
(
out_var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
}
else
if
(
out_var
->
IsType
<
framework
::
LoDTensorArray
>
())
{
auto
&
out_array
=
*
out_var
->
GetMutable
<
framework
::
LoDTensorArray
>
();
auto
&
out_array
=
*
out_var
->
GetMutable
<
framework
::
LoDTensorArray
>
();
...
...
python/paddle/v2/fluid/tests/book/test_word2vec.py
浏览文件 @
865a714e
...
@@ -12,76 +12,145 @@
...
@@ -12,76 +12,145 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
numpy
as
np
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
import
paddle.v2.fluid
as
fluid
import
paddle.v2.fluid
as
fluid
import
unittest
import
os
PASS_NUM
=
100
EMBED_SIZE
=
32
def
main
(
use_cuda
,
is_sparse
,
parallel
):
HIDDEN_SIZE
=
256
if
use_cuda
and
not
fluid
.
core
.
is_compiled_with_cuda
():
N
=
5
return
BATCH_SIZE
=
32
IS_SPARSE
=
True
PASS_NUM
=
100
EMBED_SIZE
=
32
word_dict
=
paddle
.
dataset
.
imikolov
.
build_dict
()
HIDDEN_SIZE
=
256
dict_size
=
len
(
word_dict
)
N
=
5
BATCH_SIZE
=
32
first_word
=
fluid
.
layers
.
data
(
name
=
'firstw'
,
shape
=
[
1
],
dtype
=
'int64'
)
IS_SPARSE
=
is_sparse
second_word
=
fluid
.
layers
.
data
(
name
=
'secondw'
,
shape
=
[
1
],
dtype
=
'int64'
)
third_word
=
fluid
.
layers
.
data
(
name
=
'thirdw'
,
shape
=
[
1
],
dtype
=
'int64'
)
def
__network__
(
words
):
forth_word
=
fluid
.
layers
.
data
(
name
=
'forthw'
,
shape
=
[
1
],
dtype
=
'int64'
)
embed_first
=
fluid
.
layers
.
embedding
(
next_word
=
fluid
.
layers
.
data
(
name
=
'nextw'
,
shape
=
[
1
],
dtype
=
'int64'
)
input
=
words
[
0
],
size
=
[
dict_size
,
EMBED_SIZE
],
embed_first
=
fluid
.
layers
.
embedding
(
dtype
=
'float32'
,
input
=
first_word
,
is_sparse
=
IS_SPARSE
,
size
=
[
dict_size
,
EMBED_SIZE
],
param_attr
=
'shared_w'
)
dtype
=
'float32'
,
embed_second
=
fluid
.
layers
.
embedding
(
is_sparse
=
IS_SPARSE
,
input
=
words
[
1
],
param_attr
=
'shared_w'
)
size
=
[
dict_size
,
EMBED_SIZE
],
embed_second
=
fluid
.
layers
.
embedding
(
dtype
=
'float32'
,
input
=
second_word
,
is_sparse
=
IS_SPARSE
,
size
=
[
dict_size
,
EMBED_SIZE
],
param_attr
=
'shared_w'
)
dtype
=
'float32'
,
embed_third
=
fluid
.
layers
.
embedding
(
is_sparse
=
IS_SPARSE
,
input
=
words
[
2
],
param_attr
=
'shared_w'
)
size
=
[
dict_size
,
EMBED_SIZE
],
embed_third
=
fluid
.
layers
.
embedding
(
dtype
=
'float32'
,
input
=
third_word
,
is_sparse
=
IS_SPARSE
,
size
=
[
dict_size
,
EMBED_SIZE
],
param_attr
=
'shared_w'
)
dtype
=
'float32'
,
embed_forth
=
fluid
.
layers
.
embedding
(
is_sparse
=
IS_SPARSE
,
input
=
words
[
3
],
param_attr
=
'shared_w'
)
size
=
[
dict_size
,
EMBED_SIZE
],
embed_forth
=
fluid
.
layers
.
embedding
(
dtype
=
'float32'
,
input
=
forth_word
,
is_sparse
=
IS_SPARSE
,
size
=
[
dict_size
,
EMBED_SIZE
],
param_attr
=
'shared_w'
)
dtype
=
'float32'
,
is_sparse
=
IS_SPARSE
,
concat_embed
=
fluid
.
layers
.
concat
(
param_attr
=
'shared_w'
)
input
=
[
embed_first
,
embed_second
,
embed_third
,
embed_forth
],
axis
=
1
)
hidden1
=
fluid
.
layers
.
fc
(
input
=
concat_embed
,
concat_embed
=
fluid
.
layers
.
concat
(
size
=
HIDDEN_SIZE
,
input
=
[
embed_first
,
embed_second
,
embed_third
,
embed_forth
],
axis
=
1
)
act
=
'sigmoid'
)
hidden1
=
fluid
.
layers
.
fc
(
input
=
concat_embed
,
size
=
HIDDEN_SIZE
,
act
=
'sigmoid'
)
predict_word
=
fluid
.
layers
.
fc
(
input
=
hidden1
,
predict_word
=
fluid
.
layers
.
fc
(
input
=
hidden1
,
size
=
dict_size
,
act
=
'softmax'
)
size
=
dict_size
,
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
predict_word
,
label
=
next_word
)
act
=
'softmax'
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
cost
)
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
predict_word
,
label
=
words
[
4
])
sgd_optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
cost
)
sgd_optimizer
.
minimize
(
avg_cost
)
return
avg_cost
train_reader
=
paddle
.
batch
(
word_dict
=
paddle
.
dataset
.
imikolov
.
build_dict
()
paddle
.
dataset
.
imikolov
.
train
(
word_dict
,
N
),
BATCH_SIZE
)
dict_size
=
len
(
word_dict
)
place
=
fluid
.
CPUPlace
()
first_word
=
fluid
.
layers
.
data
(
name
=
'firstw'
,
shape
=
[
1
],
dtype
=
'int64'
)
exe
=
fluid
.
Executor
(
place
)
second_word
=
fluid
.
layers
.
data
(
name
=
'secondw'
,
shape
=
[
1
],
dtype
=
'int64'
)
feeder
=
fluid
.
DataFeeder
(
third_word
=
fluid
.
layers
.
data
(
name
=
'thirdw'
,
shape
=
[
1
],
dtype
=
'int64'
)
feed_list
=
[
first_word
,
second_word
,
third_word
,
forth_word
,
next_word
],
forth_word
=
fluid
.
layers
.
data
(
name
=
'forthw'
,
shape
=
[
1
],
dtype
=
'int64'
)
place
=
place
)
next_word
=
fluid
.
layers
.
data
(
name
=
'nextw'
,
shape
=
[
1
],
dtype
=
'int64'
)
exe
.
run
(
fluid
.
default_startup_program
())
if
not
parallel
:
avg_cost
=
__network__
(
for
pass_id
in
range
(
PASS_NUM
):
[
first_word
,
second_word
,
third_word
,
forth_word
,
next_word
])
for
data
in
train_reader
():
else
:
avg_cost_np
=
exe
.
run
(
fluid
.
default_main_program
(),
places
=
fluid
.
layers
.
get_places
()
feed
=
feeder
.
feed
(
data
),
pd
=
fluid
.
layers
.
ParallelDo
(
places
)
fetch_list
=
[
avg_cost
])
with
pd
.
do
():
if
avg_cost_np
[
0
]
<
5.0
:
avg_cost
=
__network__
(
exit
(
0
)
# if avg cost less than 10.0, we think our code is good.
map
(
pd
.
read_input
,
[
exit
(
1
)
first_word
,
second_word
,
third_word
,
forth_word
,
next_word
]))
pd
.
write_output
(
avg_cost
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
pd
())
sgd_optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
sgd_optimizer
.
minimize
(
avg_cost
)
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
imikolov
.
train
(
word_dict
,
N
),
BATCH_SIZE
)
place
=
fluid
.
CUDAPlace
(
0
)
if
use_cuda
else
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
[
first_word
,
second_word
,
third_word
,
forth_word
,
next_word
],
place
=
place
)
exe
.
run
(
fluid
.
default_startup_program
())
for
pass_id
in
range
(
PASS_NUM
):
for
data
in
train_reader
():
avg_cost_np
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
feeder
.
feed
(
data
),
fetch_list
=
[
avg_cost
])
if
avg_cost_np
[
0
]
<
5.0
:
return
raise
AssertionError
(
"Cost is too large {0:2.2}"
.
format
(
avg_cost_np
[
0
]))
FULL_TEST
=
os
.
getenv
(
'FULL_TEST'
,
'0'
).
lower
()
in
[
'true'
,
'1'
,
't'
,
'y'
,
'yes'
,
'on'
]
SKIP_REASON
=
"Only run minimum number of tests in CI server, to make CI faster"
class
W2VTest
(
unittest
.
TestCase
):
pass
def
inject_test_method
(
use_cuda
,
is_sparse
,
parallel
):
fn_name
=
"test_{0}_{1}_{2}"
.
format
(
"cuda"
if
use_cuda
else
"cpu"
,
"sparse"
if
is_sparse
else
"dense"
,
"parallel"
if
parallel
else
"normal"
)
def
__impl__
(
*
args
,
**
kwargs
):
prog
=
fluid
.
Program
()
startup_prog
=
fluid
.
Program
()
scope
=
fluid
.
core
.
Scope
()
with
fluid
.
scope_guard
(
scope
):
with
fluid
.
program_guard
(
prog
,
startup_prog
):
main
(
use_cuda
=
use_cuda
,
is_sparse
=
is_sparse
,
parallel
=
parallel
)
if
use_cuda
and
is_sparse
and
parallel
:
fn
=
__impl__
else
:
# skip the other test when on CI server
fn
=
unittest
.
skipUnless
(
condition
=
FULL_TEST
,
reason
=
SKIP_REASON
)(
__impl__
)
setattr
(
W2VTest
,
fn_name
,
fn
)
for
use_cuda
in
(
False
,
True
):
for
is_sparse
in
(
False
,
True
):
for
parallel
in
(
False
,
True
):
inject_test_method
(
use_cuda
,
is_sparse
,
parallel
)
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录