Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
1c004a49
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1c004a49
编写于
11月 03, 2017
作者:
D
dangqingqing
浏览文件
操作
浏览文件
下载
差异文件
Resolve conflicts.
上级
519476a4
8b30e2ab
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
368 addition
and
32 deletion
+368
-32
paddle/operators/lookup_table_op.h
paddle/operators/lookup_table_op.h
+3
-1
paddle/operators/math/CMakeLists.txt
paddle/operators/math/CMakeLists.txt
+1
-1
paddle/operators/math/sequence_pooling.cc
paddle/operators/math/sequence_pooling.cc
+8
-8
paddle/operators/math/sequence_pooling.cu
paddle/operators/math/sequence_pooling.cu
+7
-7
paddle/operators/sequence_pool_op.cc
paddle/operators/sequence_pool_op.cc
+3
-2
paddle/scripts/travis/build_doc.sh
paddle/scripts/travis/build_doc.sh
+2
-2
python/paddle/v2/framework/initializer.py
python/paddle/v2/framework/initializer.py
+130
-1
python/paddle/v2/framework/layers.py
python/paddle/v2/framework/layers.py
+2
-6
python/paddle/v2/framework/nets.py
python/paddle/v2/framework/nets.py
+2
-1
python/paddle/v2/framework/tests/test_evaluator.py
python/paddle/v2/framework/tests/test_evaluator.py
+1
-0
python/paddle/v2/framework/tests/test_initializer.py
python/paddle/v2/framework/tests/test_initializer.py
+107
-0
python/paddle/v2/framework/tests/test_recommender_system.py
python/paddle/v2/framework/tests/test_recommender_system.py
+3
-3
python/paddle/v2/framework/tests/test_understand_sentiment_conv.py
...ddle/v2/framework/tests/test_understand_sentiment_conv.py
+99
-0
未找到文件。
paddle/operators/lookup_table_op.h
浏览文件 @
1c004a49
...
@@ -90,11 +90,13 @@ class LookupTableGradKernel : public framework::OpKernel<T> {
...
@@ -90,11 +90,13 @@ class LookupTableGradKernel : public framework::OpKernel<T> {
auto
*
d_output_data
=
d_output
->
data
<
T
>
();
auto
*
d_output_data
=
d_output
->
data
<
T
>
();
auto
*
d_table_data
=
d_table
->
mutable_data
<
T
>
(
context
.
GetPlace
());
auto
*
d_table_data
=
d_table
->
mutable_data
<
T
>
(
context
.
GetPlace
());
memset
(
d_table_data
,
0
,
d_table
->
numel
()
*
sizeof
(
T
));
for
(
int64_t
i
=
0
;
i
<
ids
->
numel
();
++
i
)
{
for
(
int64_t
i
=
0
;
i
<
ids
->
numel
();
++
i
)
{
PADDLE_ENFORCE_LT
(
ids_data
[
i
],
N
);
PADDLE_ENFORCE_LT
(
ids_data
[
i
],
N
);
PADDLE_ENFORCE_GE
(
ids_data
[
i
],
0
);
PADDLE_ENFORCE_GE
(
ids_data
[
i
],
0
);
for
(
int
j
=
0
;
j
<
D
;
++
j
)
{
for
(
int
j
=
0
;
j
<
D
;
++
j
)
{
d_table_data
[
ids_data
[
i
]
*
D
+
j
]
=
d_output_data
[
i
*
D
+
j
];
d_table_data
[
ids_data
[
i
]
*
D
+
j
]
+
=
d_output_data
[
i
*
D
+
j
];
}
}
}
}
}
}
...
...
paddle/operators/math/CMakeLists.txt
浏览文件 @
1c004a49
...
@@ -19,7 +19,7 @@ else()
...
@@ -19,7 +19,7 @@ else()
cc_library
(
softmax SRCS softmax.cc DEPS operator
)
cc_library
(
softmax SRCS softmax.cc DEPS operator
)
cc_library
(
cross_entropy SRCS cross_entropy.cc DEPS operator
)
cc_library
(
cross_entropy SRCS cross_entropy.cc DEPS operator
)
cc_library
(
pooling SRCS pooling.cc DEPS device_context
)
cc_library
(
pooling SRCS pooling.cc DEPS device_context
)
nv
_library
(
sequence_pooling SRCS sequence_pooling.cc DEPS device_context math_function
)
cc
_library
(
sequence_pooling SRCS sequence_pooling.cc DEPS device_context math_function
)
cc_library
(
vol2col SRCS vol2col.cc DEPS device_context
)
cc_library
(
vol2col SRCS vol2col.cc DEPS device_context
)
cc_library
(
context_project SRCS context_project.cc DEPS device_context
)
cc_library
(
context_project SRCS context_project.cc DEPS device_context
)
cc_library
(
sequence2batch SRCS sequence2batch.cc DEPS device_context
)
cc_library
(
sequence2batch SRCS sequence2batch.cc DEPS device_context
)
...
...
paddle/operators/math/sequence_pooling.cc
浏览文件 @
1c004a49
...
@@ -28,9 +28,9 @@ class MaxSeqPoolFunctor<platform::CPUPlace, T> {
...
@@ -28,9 +28,9 @@ class MaxSeqPoolFunctor<platform::CPUPlace, T> {
auto
in_dims
=
input
.
dims
();
auto
in_dims
=
input
.
dims
();
auto
out_dims
=
output
->
dims
();
auto
out_dims
=
output
->
dims
();
auto
idx_dims
=
index
->
dims
();
auto
idx_dims
=
index
->
dims
();
PADDLE_ENFORCE_GT
(
in_dims
.
size
(),
1
UL
);
PADDLE_ENFORCE_GT
(
in_dims
.
size
(),
1
);
PADDLE_ENFORCE_GT
(
out_dims
.
size
(),
1
UL
);
PADDLE_ENFORCE_GT
(
out_dims
.
size
(),
1
);
for
(
size
_t
i
=
1
;
i
<
in_dims
.
size
();
++
i
)
{
for
(
int64
_t
i
=
1
;
i
<
in_dims
.
size
();
++
i
)
{
PADDLE_ENFORCE_EQ
(
in_dims
[
i
],
out_dims
[
i
]);
PADDLE_ENFORCE_EQ
(
in_dims
[
i
],
out_dims
[
i
]);
}
}
PADDLE_ENFORCE_EQ
(
idx_dims
,
out_dims
);
PADDLE_ENFORCE_EQ
(
idx_dims
,
out_dims
);
...
@@ -69,9 +69,9 @@ class MaxSeqPoolGradFunctor<platform::CPUPlace, T> {
...
@@ -69,9 +69,9 @@ class MaxSeqPoolGradFunctor<platform::CPUPlace, T> {
auto
og_dims
=
out_grad
.
dims
();
auto
og_dims
=
out_grad
.
dims
();
auto
ig_dims
=
in_grad
->
dims
();
auto
ig_dims
=
in_grad
->
dims
();
auto
idx_dims
=
index
.
dims
();
auto
idx_dims
=
index
.
dims
();
PADDLE_ENFORCE_GT
(
og_dims
.
size
(),
1
UL
);
PADDLE_ENFORCE_GT
(
og_dims
.
size
(),
1
);
PADDLE_ENFORCE_GT
(
ig_dims
.
size
(),
1
UL
);
PADDLE_ENFORCE_GT
(
ig_dims
.
size
(),
1
);
for
(
size
_t
i
=
1
;
i
<
og_dims
.
size
();
++
i
)
{
for
(
int64
_t
i
=
1
;
i
<
og_dims
.
size
();
++
i
)
{
PADDLE_ENFORCE_EQ
(
og_dims
[
i
],
ig_dims
[
i
]);
PADDLE_ENFORCE_EQ
(
og_dims
[
i
],
ig_dims
[
i
]);
}
}
PADDLE_ENFORCE_EQ
(
idx_dims
,
og_dims
);
PADDLE_ENFORCE_EQ
(
idx_dims
,
og_dims
);
...
@@ -84,8 +84,8 @@ class MaxSeqPoolGradFunctor<platform::CPUPlace, T> {
...
@@ -84,8 +84,8 @@ class MaxSeqPoolGradFunctor<platform::CPUPlace, T> {
set_zero
(
context
,
in_grad
,
static_cast
<
T
>
(
0.0
));
set_zero
(
context
,
in_grad
,
static_cast
<
T
>
(
0.0
));
int64_t
num_seq
=
og_dims
[
0
];
int64_t
num_seq
=
og_dims
[
0
];
int64_t
dim
=
out_grad
.
numel
()
/
num_seq
;
int64_t
dim
=
out_grad
.
numel
()
/
num_seq
;
for
(
size
_t
i
=
0
;
i
<
num_seq
;
++
i
)
{
for
(
int64
_t
i
=
0
;
i
<
num_seq
;
++
i
)
{
for
(
size
_t
j
=
0
;
j
<
dim
;
++
j
)
{
for
(
int64
_t
j
=
0
;
j
<
dim
;
++
j
)
{
int
step_id
=
max_index
[
i
*
dim
+
j
];
int
step_id
=
max_index
[
i
*
dim
+
j
];
ig_data
[
step_id
*
dim
+
j
]
=
og_data
[
i
*
dim
+
j
];
ig_data
[
step_id
*
dim
+
j
]
=
og_data
[
i
*
dim
+
j
];
}
}
...
...
paddle/operators/math/sequence_pooling.cu
浏览文件 @
1c004a49
...
@@ -31,7 +31,7 @@ __global__ void KeMaxSequencePool(const T* input, const size_t* starts,
...
@@ -31,7 +31,7 @@ __global__ void KeMaxSequencePool(const T* input, const size_t* starts,
size_t
start
=
starts
[
seq_id
];
size_t
start
=
starts
[
seq_id
];
size_t
end
=
starts
[
seq_id
+
1
];
size_t
end
=
starts
[
seq_id
+
1
];
for
(
int
i
=
dim_idx
;
i
<
dim
;
i
+=
blockDim
.
x
)
{
for
(
int
64_t
i
=
dim_idx
;
i
<
dim
;
i
+=
blockDim
.
x
)
{
T
max_val
=
static_cast
<
T
>
(
-
FLT_MAX
);
T
max_val
=
static_cast
<
T
>
(
-
FLT_MAX
);
int
max_id
=
-
1
;
int
max_id
=
-
1
;
for
(
size_t
step_id
=
start
;
step_id
<
end
;
step_id
++
)
{
for
(
size_t
step_id
=
start
;
step_id
<
end
;
step_id
++
)
{
...
@@ -54,9 +54,9 @@ class MaxSeqPoolFunctor<platform::GPUPlace, T> {
...
@@ -54,9 +54,9 @@ class MaxSeqPoolFunctor<platform::GPUPlace, T> {
auto
in_dims
=
input
.
dims
();
auto
in_dims
=
input
.
dims
();
auto
out_dims
=
output
->
dims
();
auto
out_dims
=
output
->
dims
();
auto
idx_dims
=
index
->
dims
();
auto
idx_dims
=
index
->
dims
();
PADDLE_ENFORCE_GT
(
in_dims
.
size
(),
1UL
);
PADDLE_ENFORCE_GT
(
in_dims
.
size
(),
static_cast
<
int64_t
>
(
1
)
);
PADDLE_ENFORCE_GT
(
out_dims
.
size
(),
1
UL
);
PADDLE_ENFORCE_GT
(
out_dims
.
size
(),
1
);
for
(
size
_t
i
=
1
;
i
<
in_dims
.
size
();
++
i
)
{
for
(
int64
_t
i
=
1
;
i
<
in_dims
.
size
();
++
i
)
{
PADDLE_ENFORCE_EQ
(
in_dims
[
i
],
out_dims
[
i
]);
PADDLE_ENFORCE_EQ
(
in_dims
[
i
],
out_dims
[
i
]);
}
}
PADDLE_ENFORCE_EQ
(
idx_dims
,
out_dims
);
PADDLE_ENFORCE_EQ
(
idx_dims
,
out_dims
);
...
@@ -100,9 +100,9 @@ class MaxSeqPoolGradFunctor<platform::GPUPlace, T> {
...
@@ -100,9 +100,9 @@ class MaxSeqPoolGradFunctor<platform::GPUPlace, T> {
auto
og_dims
=
out_grad
.
dims
();
auto
og_dims
=
out_grad
.
dims
();
auto
idx_dims
=
index
.
dims
();
auto
idx_dims
=
index
.
dims
();
auto
ig_dims
=
in_grad
->
dims
();
auto
ig_dims
=
in_grad
->
dims
();
PADDLE_ENFORCE_GT
(
og_dims
.
size
(),
1UL
);
PADDLE_ENFORCE_GT
(
og_dims
.
size
(),
static_cast
<
int64_t
>
(
1
)
);
PADDLE_ENFORCE_GT
(
ig_dims
.
size
(),
1UL
);
PADDLE_ENFORCE_GT
(
ig_dims
.
size
(),
static_cast
<
int64_t
>
(
1
)
);
for
(
size
_t
i
=
1
;
i
<
og_dims
.
size
();
++
i
)
{
for
(
int64
_t
i
=
1
;
i
<
og_dims
.
size
();
++
i
)
{
PADDLE_ENFORCE_EQ
(
og_dims
[
i
],
ig_dims
[
i
]);
PADDLE_ENFORCE_EQ
(
og_dims
[
i
],
ig_dims
[
i
]);
}
}
PADDLE_ENFORCE_EQ
(
idx_dims
,
og_dims
);
PADDLE_ENFORCE_EQ
(
idx_dims
,
og_dims
);
...
...
paddle/operators/sequence_pool_op.cc
浏览文件 @
1c004a49
...
@@ -50,8 +50,9 @@ class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -50,8 +50,9 @@ class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker {
.
AsIntermediate
();
.
AsIntermediate
();
AddAttr
<
std
::
string
>
(
AddAttr
<
std
::
string
>
(
"pooltype"
,
"pooltype"
,
"(int, default AVERAGE) The pooling pooltype of SequencePoolOp."
)
"(int, default AVERAGE) the pooling pooltype of SequencePoolOp."
)
.
SetDefault
(
"AVERAGE"
);
.
SetDefault
(
"AVERAGE"
)
.
InEnum
({
"AVERAGE"
,
"SUM"
,
"SQRT"
,
"LAST"
,
"FIRST"
,
"MAX"
});
AddComment
(
R"DOC(
AddComment
(
R"DOC(
SequencePoolOp pools features of all time-steps of each instance.
SequencePoolOp pools features of all time-steps of each instance.
...
...
paddle/scripts/travis/build_doc.sh
浏览文件 @
1c004a49
...
@@ -53,8 +53,8 @@ function deploy_docs() {
...
@@ -53,8 +53,8 @@ function deploy_docs() {
set
+e
set
+e
rm
-rf
${
DIR
}
/doc
${
DIR
}
/doc_cn
rm
-rf
${
DIR
}
/doc
${
DIR
}
/doc_cn
set
-e
set
-e
mv
../doc/cn/html
${
DIR
}
/doc_cn
cp
-r
../doc/cn/html
${
DIR
}
/doc_cn
mv
../doc/en/html
${
DIR
}
/doc
cp
-r
../doc/en/html
${
DIR
}
/doc
git add
.
git add
.
}
}
...
...
python/paddle/v2/framework/initializer.py
浏览文件 @
1c004a49
import
paddle.v2.framework.framework
as
framework
import
paddle.v2.framework.framework
as
framework
import
numpy
as
np
__all__
=
[
'ConstantInitializer'
,
'UniformInitializer'
]
__all__
=
[
'ConstantInitializer'
,
'UniformInitializer'
,
'NormalInitializer'
,
'XavierInitializer'
]
class
Initializer
(
object
):
class
Initializer
(
object
):
...
@@ -20,6 +24,41 @@ class Initializer(object):
...
@@ -20,6 +24,41 @@ class Initializer(object):
"""
"""
raise
NotImplementedError
()
raise
NotImplementedError
()
def
_compute_fans
(
self
,
var
):
"""Compute the fan_in and the fan_out for layers
This method computes the fan_in and the fan_out
for neural network layers, if not specified. It is
not possible to perfectly estimate fan_in and fan_out.
This method will estimate it correctly for matrix multiply and
convolutions.
Args:
var: variable for which fan_in and fan_out have to be computed
Returns:
tuple of two integers (fan_in, fan_out)
"""
shape
=
var
.
shape
if
not
shape
or
len
(
shape
)
==
0
:
fan_in
=
fan_out
=
1
elif
len
(
shape
)
==
1
:
fan_in
=
fan_out
=
shape
[
0
]
elif
len
(
shape
)
==
2
:
# This is the case for simple matrix multiply
fan_in
=
shape
[
0
]
fan_out
=
shape
[
1
]
else
:
# Assume this to be a convolutional kernel
# In PaddlePaddle, the shape of the kernel is like:
# [num_filters, num_filter_channels, ...] where the remaining
# dimensions are the filter_size
receptive_field_size
=
np
.
prod
(
shape
[
2
:])
fan_in
=
shape
[
1
]
*
receptive_field_size
fan_out
=
shape
[
0
]
*
receptive_field_size
return
(
fan_in
,
fan_out
)
class
ConstantInitializer
(
Initializer
):
class
ConstantInitializer
(
Initializer
):
"""Implements the constant initializer
"""Implements the constant initializer
...
@@ -156,3 +195,93 @@ class NormalInitializer(Initializer):
...
@@ -156,3 +195,93 @@ class NormalInitializer(Initializer):
})
})
var
.
op
=
op
var
.
op
=
op
return
op
return
op
class
XavierInitializer
(
Initializer
):
"""Implements the Xavier initializer
This class implements the Xavier weight initializer from the paper
Understanding the difficulty of training deep feedforward neural
networks[1] by Xavier Glorot and Yoshua Bengio.
This initializer is designed to keep the scale of the gradients
approximately same in all the layers. In case of Uniform distribution,
the range is [-x, x], where x = sqrt(6 / (fan_in + fan_out)).
In case of Normal distribution, the mean is 0 and the standard deviation
is sqrt(2/ (fan_in + fan_out)).
References:
[1] Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics.
(http://proceedings.mlr.press/v9/glorot10a.html)
"""
def
__init__
(
self
,
uniform
=
True
,
fan_in
=
None
,
fan_out
=
None
,
seed
=
0
):
"""Constructor for XavierInitializer
Args:
uniform: whether to use uniform or normal distribution
fan_in: fan_in for Xavier initialization. If None, it is
inferred from the variable.
fan_out: fan_out for Xavier initialization. If None, it is
inferred from the variable.
seed: random seed
Note: It is recommended to set fan_in and fan_out to None for
most cases.
"""
assert
uniform
is
not
None
assert
seed
is
not
None
super
(
XavierInitializer
,
self
).
__init__
()
self
.
_uniform
=
uniform
self
.
_fan_in
=
fan_in
self
.
_fan_out
=
fan_out
self
.
_seed
=
seed
def
__call__
(
self
,
var
,
block
):
"""Add xavier initialization ops for a variable
Args:
var: Variable that needs to be initialized
block: The block in which initialization ops
should be added
Returns:
the initialization op
"""
assert
isinstance
(
var
,
framework
.
Variable
)
assert
isinstance
(
block
,
framework
.
Block
)
f_in
,
f_out
=
self
.
_compute_fans
(
var
)
# If fan_in and fan_out are passed, use them
fan_in
=
f_in
if
self
.
_fan_in
is
None
else
self
.
_fan_in
fan_out
=
f_out
if
self
.
_fan_out
is
None
else
self
.
_fan_out
if
self
.
_uniform
:
limit
=
np
.
sqrt
(
6.0
/
float
(
fan_in
+
fan_out
))
op
=
block
.
prepend_op
(
type
=
"uniform_random"
,
outputs
=
{
"Out"
:
var
},
attrs
=
{
"shape"
:
var
.
shape
,
"data_type"
:
int
(
var
.
data_type
),
"min"
:
-
limit
,
"max"
:
limit
,
"seed"
:
self
.
_seed
})
else
:
std
=
np
.
sqrt
(
2.0
/
float
(
fan_in
+
fan_out
))
op
=
block
.
prepend_op
(
type
=
"gaussian_random"
,
outputs
=
{
"Out"
:
var
},
attrs
=
{
"shape"
:
var
.
shape
,
"data_type"
:
int
(
var
.
data_type
),
"mean"
:
0.0
,
"std"
:
std
,
"seed"
:
self
.
_seed
})
var
.
op
=
op
return
op
python/paddle/v2/framework/layers.py
浏览文件 @
1c004a49
...
@@ -278,6 +278,7 @@ def sequence_conv(input,
...
@@ -278,6 +278,7 @@ def sequence_conv(input,
num_filters
,
num_filters
,
filter_size
=
3
,
filter_size
=
3
,
filter_stride
=
1
,
filter_stride
=
1
,
act
=
None
,
padding
=
None
,
padding
=
None
,
bias_attr
=
None
,
bias_attr
=
None
,
param_attr
=
None
,
param_attr
=
None
,
...
@@ -304,7 +305,7 @@ def sequence_conv(input,
...
@@ -304,7 +305,7 @@ def sequence_conv(input,
outputs
=
{
"Out"
:
pre_bias
},
outputs
=
{
"Out"
:
pre_bias
},
attrs
=
{
attrs
=
{
'contextStride'
:
filter_stride
,
'contextStride'
:
filter_stride
,
'contextStart'
:
0
,
'contextStart'
:
-
int
(
filter_size
/
2
)
,
'contextLength'
:
filter_size
'contextLength'
:
filter_size
})
})
pre_act
=
helper
.
append_bias_op
(
pre_bias
)
pre_act
=
helper
.
append_bias_op
(
pre_bias
)
...
@@ -364,11 +365,6 @@ def conv2d(input,
...
@@ -364,11 +365,6 @@ def conv2d(input,
def
sequence_pool
(
input
,
pool_type
,
**
kwargs
):
def
sequence_pool
(
input
,
pool_type
,
**
kwargs
):
ENUM_POOL_TYPE
=
set
([
"MAX"
,
"AVG"
,
"SQRT"
,
"LAST"
,
"FIRST"
])
if
pool_type
.
upper
()
not
in
ENUM_POOL_TYPE
:
raise
ValueError
(
"Unknown pool_type: '%s'. It can only be %s."
,
str
(
pool_type
),
" "
.
join
(
ENUM_POOL_TYPE
))
helper
=
LayerHelper
(
'sequence_pool'
,
input
=
input
,
**
kwargs
)
helper
=
LayerHelper
(
'sequence_pool'
,
input
=
input
,
**
kwargs
)
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
pool_out
=
helper
.
create_tmp_variable
(
dtype
)
pool_out
=
helper
.
create_tmp_variable
(
dtype
)
...
...
python/paddle/v2/framework/nets.py
浏览文件 @
1c004a49
...
@@ -47,7 +47,7 @@ def img_conv_group(input,
...
@@ -47,7 +47,7 @@ def img_conv_group(input,
"""
"""
tmp
=
input
tmp
=
input
assert
isinstance
(
conv_num_filter
,
list
)
or
\
assert
isinstance
(
conv_num_filter
,
list
)
or
\
isinstance
(
conv_num_filter
,
tuple
)
isinstance
(
conv_num_filter
,
tuple
)
def
__extend_list__
(
obj
):
def
__extend_list__
(
obj
):
if
not
hasattr
(
obj
,
'__len__'
):
if
not
hasattr
(
obj
,
'__len__'
):
...
@@ -109,6 +109,7 @@ def sequence_conv_pool(input,
...
@@ -109,6 +109,7 @@ def sequence_conv_pool(input,
input
=
input
,
input
=
input
,
num_filters
=
num_filters
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
filter_size
=
filter_size
,
act
=
act
,
program
=
program
,
program
=
program
,
init_program
=
init_program
)
init_program
=
init_program
)
...
...
python/paddle/v2/framework/tests/test_evaluator.py
浏览文件 @
1c004a49
...
@@ -60,4 +60,5 @@ class TestEvaluator(unittest.TestCase):
...
@@ -60,4 +60,5 @@ class TestEvaluator(unittest.TestCase):
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
exit
(
0
)
unittest
.
main
()
unittest
.
main
()
python/paddle/v2/framework/tests/test_initializer.py
浏览文件 @
1c004a49
import
numpy
as
np
import
unittest
import
unittest
import
paddle.v2.framework.framework
as
framework
import
paddle.v2.framework.framework
as
framework
...
@@ -116,5 +117,111 @@ class TestNormalInitializer(unittest.TestCase):
...
@@ -116,5 +117,111 @@ class TestNormalInitializer(unittest.TestCase):
self
.
assertEqual
(
init_op
.
attr
(
'seed'
),
123
)
self
.
assertEqual
(
init_op
.
attr
(
'seed'
),
123
)
class
TestXavierInitializer
(
unittest
.
TestCase
):
def
test_uniform_xavier_initializer
(
self
):
"""Test Xavier initializer with uniform distribution on
for matrix multiply.
"""
program
=
framework
.
Program
()
block
=
program
.
global_block
()
param
=
block
.
create_parameter
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"param"
,
initializer
=
initializer
.
XavierInitializer
())
self
.
assertEqual
(
len
(
block
.
ops
),
1
)
init_op
=
block
.
ops
[
0
]
self
.
assertEqual
(
init_op
.
type
,
'uniform_random'
)
limit
=
np
.
sqrt
(
6.0
/
(
param
.
shape
[
0
]
+
param
.
shape
[
1
]))
self
.
assertAlmostEqual
(
init_op
.
attr
(
'min'
),
-
limit
,
delta
=
DELTA
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'max'
),
limit
,
delta
=
DELTA
)
self
.
assertEqual
(
init_op
.
attr
(
'seed'
),
0
)
def
test_uniform_xavier_initializer_conv
(
self
):
"""Test Xavier initializer with uniform distribution on
for convolutions.
"""
program
=
framework
.
Program
()
block
=
program
.
global_block
()
param
=
block
.
create_parameter
(
dtype
=
"float32"
,
shape
=
[
5
,
10
,
15
,
20
],
lod_level
=
0
,
name
=
"param"
,
initializer
=
initializer
.
XavierInitializer
())
self
.
assertEqual
(
len
(
block
.
ops
),
1
)
init_op
=
block
.
ops
[
0
]
self
.
assertEqual
(
init_op
.
type
,
'uniform_random'
)
receptive_field_size
=
float
(
15
*
20
)
limit
=
np
.
sqrt
(
6.0
/
(
(
param
.
shape
[
0
]
+
param
.
shape
[
1
])
*
receptive_field_size
))
self
.
assertAlmostEqual
(
init_op
.
attr
(
'min'
),
-
limit
,
delta
=
DELTA
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'max'
),
limit
,
delta
=
DELTA
)
self
.
assertEqual
(
init_op
.
attr
(
'seed'
),
0
)
def
test_normal_xavier_initializer
(
self
):
"""Test Xavier initializer with normal distribution on
for matrix multiply.
"""
program
=
framework
.
Program
()
block
=
program
.
global_block
()
param
=
block
.
create_parameter
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"param"
,
initializer
=
initializer
.
XavierInitializer
(
uniform
=
False
))
self
.
assertEqual
(
len
(
block
.
ops
),
1
)
init_op
=
block
.
ops
[
0
]
self
.
assertEqual
(
init_op
.
type
,
'gaussian_random'
)
std
=
np
.
sqrt
(
2.0
/
(
param
.
shape
[
0
]
+
param
.
shape
[
1
]))
self
.
assertAlmostEqual
(
init_op
.
attr
(
'mean'
),
0.0
,
delta
=
DELTA
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'std'
),
std
,
delta
=
DELTA
)
self
.
assertEqual
(
init_op
.
attr
(
'seed'
),
0
)
def
test_normal_xavier_initializer_conv
(
self
):
"""Test Xavier initializer with normal distribution on
for convolutions.
"""
program
=
framework
.
Program
()
block
=
program
.
global_block
()
param
=
block
.
create_parameter
(
dtype
=
"float32"
,
shape
=
[
5
,
10
,
15
,
20
],
lod_level
=
0
,
name
=
"param"
,
initializer
=
initializer
.
XavierInitializer
(
uniform
=
False
))
self
.
assertEqual
(
len
(
block
.
ops
),
1
)
init_op
=
block
.
ops
[
0
]
self
.
assertEqual
(
init_op
.
type
,
'gaussian_random'
)
receptive_field_size
=
float
(
15
*
20
)
std
=
np
.
sqrt
(
2.0
/
(
(
param
.
shape
[
0
]
+
param
.
shape
[
1
])
*
receptive_field_size
))
self
.
assertAlmostEqual
(
init_op
.
attr
(
'mean'
),
0.0
,
delta
=
DELTA
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'std'
),
std
,
delta
=
DELTA
)
self
.
assertEqual
(
init_op
.
attr
(
'seed'
),
0
)
def
test_xavier_initializer_supplied_arguments
(
self
):
"""Test the Xavier initializer with supplied arguments
"""
program
=
framework
.
Program
()
block
=
program
.
global_block
()
block
.
create_parameter
(
dtype
=
"float32"
,
shape
=
[
5
,
10
],
lod_level
=
0
,
name
=
"param"
,
initializer
=
initializer
.
XavierInitializer
(
fan_in
=
12
,
fan_out
=
23
,
seed
=
134
))
self
.
assertEqual
(
len
(
block
.
ops
),
1
)
init_op
=
block
.
ops
[
0
]
self
.
assertEqual
(
init_op
.
type
,
'uniform_random'
)
limit
=
np
.
sqrt
(
6.0
/
(
12
+
23
))
self
.
assertAlmostEqual
(
init_op
.
attr
(
'min'
),
-
limit
,
delta
=
DELTA
)
self
.
assertAlmostEqual
(
init_op
.
attr
(
'max'
),
limit
,
delta
=
DELTA
)
self
.
assertEqual
(
init_op
.
attr
(
'seed'
),
134
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
python/paddle/v2/framework/tests/test_recommender_system.py
浏览文件 @
1c004a49
...
@@ -243,7 +243,7 @@ def model():
...
@@ -243,7 +243,7 @@ def model():
def
main
():
def
main
():
cost
=
model
()
cost
=
model
()
sgd_optimizer
=
optimizer
.
SGDOptimizer
(
learning_rate
=
0.2
)
sgd_optimizer
=
optimizer
.
SGDOptimizer
(
learning_rate
=
0.2
)
opts
=
sgd_optimizer
.
minimize
(
cost
)
opts
=
sgd_optimizer
.
minimize
(
cost
,
init_program
=
init_program
)
block
=
program
.
block
(
0
)
block
=
program
.
block
(
0
)
if
use_gpu
:
if
use_gpu
:
...
@@ -305,8 +305,8 @@ def main():
...
@@ -305,8 +305,8 @@ def main():
feed
=
func_feed
(
feeding
,
data
),
feed
=
func_feed
(
feeding
,
data
),
fetch_list
=
[
cost
])
fetch_list
=
[
cost
])
out
=
np
.
array
(
outs
[
0
])
out
=
np
.
array
(
outs
[
0
])
if
out
[
0
]
<
5
.0
:
if
out
[
0
]
<
6
.0
:
# if avg cost less than
10
.0, we think our code is good.
# if avg cost less than
6
.0, we think our code is good.
exit
(
0
)
exit
(
0
)
...
...
python/paddle/v2/framework/tests/test_understand_sentiment_conv.py
0 → 100644
浏览文件 @
1c004a49
import
paddle.v2
as
paddle
import
paddle.v2.framework.layers
as
layers
import
paddle.v2.framework.nets
as
nets
import
paddle.v2.framework.core
as
core
import
paddle.v2.framework.optimizer
as
optimizer
from
paddle.v2.framework.framework
import
Program
,
g_program
,
g_init_program
from
paddle.v2.framework.executor
import
Executor
import
numpy
as
np
def
convolution_net
(
input_dim
,
class_dim
=
2
,
emb_dim
=
32
,
hid_dim
=
32
):
data
=
layers
.
data
(
name
=
"words"
,
shape
=
[
1
],
data_type
=
"int64"
)
label
=
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
data_type
=
"int64"
)
emb
=
layers
.
embedding
(
input
=
data
,
size
=
[
input_dim
,
emb_dim
])
conv_3
=
nets
.
sequence_conv_pool
(
input
=
emb
,
num_filters
=
hid_dim
,
filter_size
=
3
,
act
=
"tanh"
,
pool_type
=
"sqrt"
)
conv_4
=
nets
.
sequence_conv_pool
(
input
=
emb
,
num_filters
=
hid_dim
,
filter_size
=
4
,
act
=
"tanh"
,
pool_type
=
"sqrt"
)
prediction
=
layers
.
fc
(
input
=
[
conv_3
,
conv_4
],
size
=
class_dim
,
act
=
"softmax"
)
cost
=
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
avg_cost
=
layers
.
mean
(
x
=
cost
)
adam_optimizer
=
optimizer
.
AdamOptimizer
(
learning_rate
=
0.002
)
opts
=
adam_optimizer
.
minimize
(
avg_cost
)
acc
=
layers
.
accuracy
(
input
=
prediction
,
label
=
label
)
return
avg_cost
,
acc
def
to_lodtensor
(
data
,
place
):
seq_lens
=
[
len
(
seq
)
for
seq
in
data
]
cur_len
=
0
lod
=
[
cur_len
]
for
l
in
seq_lens
:
cur_len
+=
l
lod
.
append
(
cur_len
)
flattened_data
=
np
.
concatenate
(
data
,
axis
=
0
).
astype
(
"int64"
)
flattened_data
=
flattened_data
.
reshape
([
len
(
flattened_data
),
1
])
res
=
core
.
LoDTensor
()
res
.
set
(
flattened_data
,
place
)
res
.
set_lod
([
lod
])
return
res
def
main
():
BATCH_SIZE
=
100
PASS_NUM
=
5
word_dict
=
paddle
.
dataset
.
imdb
.
word_dict
()
dict_dim
=
len
(
word_dict
)
class_dim
=
2
cost
,
acc
=
convolution_net
(
input_dim
=
dict_dim
,
class_dim
=
class_dim
)
train_data
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
imdb
.
train
(
word_dict
),
buf_size
=
1000
),
batch_size
=
BATCH_SIZE
)
place
=
core
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
.
run
(
g_init_program
)
for
pass_id
in
xrange
(
PASS_NUM
):
for
data
in
train_data
():
tensor_words
=
to_lodtensor
(
map
(
lambda
x
:
x
[
0
],
data
),
place
)
label
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
label
=
label
.
reshape
([
BATCH_SIZE
,
1
])
tensor_label
=
core
.
LoDTensor
()
tensor_label
.
set
(
label
,
place
)
outs
=
exe
.
run
(
g_program
,
feed
=
{
"words"
:
tensor_words
,
"label"
:
tensor_label
},
fetch_list
=
[
cost
,
acc
])
cost_val
=
np
.
array
(
outs
[
0
])
acc_val
=
np
.
array
(
outs
[
1
])
print
(
"cost="
+
str
(
cost_val
)
+
" acc="
+
str
(
acc_val
))
if
cost_val
<
1.0
and
acc_val
>
0.7
:
exit
(
0
)
exit
(
1
)
if
__name__
==
'__main__'
:
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录