Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
61444d90
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
61444d90
编写于
11月 10, 2016
作者:
Q
qijun
浏览文件
操作
浏览文件
下载
差异文件
Merge remote-tracking branch 'baidu/develop' into feature/sppnet
上级
f173341f
8d4c453b
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
51 addition
and
27 deletion
+51
-27
paddle/math/Matrix.cpp
paddle/math/Matrix.cpp
+10
-0
paddle/math/Matrix.h
paddle/math/Matrix.h
+6
-0
paddle/math/tests/test_matrixCompare.cpp
paddle/math/tests/test_matrixCompare.cpp
+8
-5
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+23
-20
python/paddle/trainer_config_helpers/tests/configs/projections.py
...addle/trainer_config_helpers/tests/configs/projections.py
+1
-1
python/paddle/trainer_config_helpers/tests/layers_test_config.py
...paddle/trainer_config_helpers/tests/layers_test_config.py
+3
-1
未找到文件。
paddle/math/Matrix.cpp
浏览文件 @
61444d90
...
@@ -187,6 +187,15 @@ MatrixPtr Matrix::subMatrix(size_t startRow, size_t endRow, size_t startCol,
...
@@ -187,6 +187,15 @@ MatrixPtr Matrix::subMatrix(size_t startRow, size_t endRow, size_t startCol,
trans_
,
useGpu_
);
trans_
,
useGpu_
);
}
}
void
Matrix
::
setDiag
(
real
value
)
{
CHECK
(
data_
!=
NULL
);
CHECK_EQ
(
height_
,
width_
);
zeroMem
();
BaseMatrix
diag
(
height_
,
1
,
stride_
+
1
,
data_
,
false
,
useGpu_
);
diag
.
assign
(
value
);
}
GpuMatrix
::
GpuMatrix
(
size_t
height
,
size_t
width
,
bool
trans
)
GpuMatrix
::
GpuMatrix
(
size_t
height
,
size_t
width
,
bool
trans
)
:
Matrix
(
std
::
make_shared
<
GpuMemoryHandle
>
(
height
*
width
*
sizeof
(
real
)),
:
Matrix
(
std
::
make_shared
<
GpuMemoryHandle
>
(
height
*
width
*
sizeof
(
real
)),
height
,
width
,
trans
,
true
)
{}
height
,
width
,
trans
,
true
)
{}
...
@@ -202,6 +211,7 @@ void GpuMatrix::resetOne() {
...
@@ -202,6 +211,7 @@ void GpuMatrix::resetOne() {
CHECK
(
data_
!=
NULL
);
CHECK
(
data_
!=
NULL
);
one
();
one
();
}
}
void
GpuMatrix
::
resize
(
size_t
newHeight
,
size_t
newWidth
)
{
void
GpuMatrix
::
resize
(
size_t
newHeight
,
size_t
newWidth
)
{
size_t
newSize
=
newHeight
*
newWidth
;
size_t
newSize
=
newHeight
*
newWidth
;
if
(
NULL
==
memoryHandle_
.
get
()
||
if
(
NULL
==
memoryHandle_
.
get
()
||
...
...
paddle/math/Matrix.h
浏览文件 @
61444d90
...
@@ -195,6 +195,8 @@ public:
...
@@ -195,6 +195,8 @@ public:
virtual
void
resetOne
()
{
LOG
(
FATAL
)
<<
"Not implemented"
;
}
virtual
void
resetOne
()
{
LOG
(
FATAL
)
<<
"Not implemented"
;
}
void
setDiag
(
real
value
);
virtual
void
copyFrom
(
const
Matrix
&
src
)
{
LOG
(
FATAL
)
<<
"Not implemented"
;
}
virtual
void
copyFrom
(
const
Matrix
&
src
)
{
LOG
(
FATAL
)
<<
"Not implemented"
;
}
virtual
void
trimFrom
(
const
CpuSparseMatrix
&
src
)
{
virtual
void
trimFrom
(
const
CpuSparseMatrix
&
src
)
{
...
@@ -330,6 +332,7 @@ public:
...
@@ -330,6 +332,7 @@ public:
virtual
MatrixPtr
getInverse
()
{
virtual
MatrixPtr
getInverse
()
{
LOG
(
FATAL
)
<<
"Not implemented"
;
LOG
(
FATAL
)
<<
"Not implemented"
;
return
nullptr
;
}
}
/**
/**
...
@@ -1016,6 +1019,7 @@ public:
...
@@ -1016,6 +1019,7 @@ public:
void
zeroMem
();
void
zeroMem
();
void
resetOne
();
void
resetOne
();
void
setDiag
(
real
value
);
void
resize
(
size_t
newHeight
,
size_t
newWidth
);
void
resize
(
size_t
newHeight
,
size_t
newWidth
);
void
resize
(
size_t
newHeight
,
size_t
newWidth
,
void
resize
(
size_t
newHeight
,
size_t
newWidth
,
...
@@ -1280,6 +1284,8 @@ public:
...
@@ -1280,6 +1284,8 @@ public:
void
zeroMem
();
void
zeroMem
();
void
resetOne
();
void
resetOne
();
void
setDiag
(
real
value
);
void
resize
(
size_t
newHeight
,
size_t
newWidth
);
void
resize
(
size_t
newHeight
,
size_t
newWidth
);
void
resize
(
size_t
newHeight
,
size_t
newWidth
,
void
resize
(
size_t
newHeight
,
size_t
newWidth
,
size_t
newNnz
,
/* used to allocate space */
size_t
newNnz
,
/* used to allocate space */
...
...
paddle/math/tests/test_matrixCompare.cpp
浏览文件 @
61444d90
...
@@ -647,20 +647,23 @@ void testMatrixInverse(int height) {
...
@@ -647,20 +647,23 @@ void testMatrixInverse(int height) {
MatrixPtr
cpuI
=
std
::
make_shared
<
CpuMatrix
>
(
height
,
height
);
MatrixPtr
cpuI
=
std
::
make_shared
<
CpuMatrix
>
(
height
,
height
);
MatrixPtr
gpuI
=
std
::
make_shared
<
GpuMatrix
>
(
height
,
height
);
MatrixPtr
gpuI
=
std
::
make_shared
<
GpuMatrix
>
(
height
,
height
);
/* Make matrix well conditioned: cpu * cpuT + Identity */
cpu
->
randomizeUniform
();
cpu
->
randomizeUniform
();
MatrixPtr
cpuT
=
cpu
->
getTranspose
();
MatrixPtr
outputCheck
=
std
::
make_shared
<
CpuMatrix
>
(
height
,
height
);
outputCheck
->
mul
(
cpu
,
cpuT
);
cpu
->
setDiag
(
1.0
);
cpu
->
add
(
*
outputCheck
);
gpu
->
copyFrom
(
*
cpu
);
gpu
->
copyFrom
(
*
cpu
);
cpu
->
inverse
(
cpuI
,
false
);
cpu
->
inverse
(
cpuI
,
false
);
gpu
->
inverse
(
gpuI
,
false
);
gpu
->
inverse
(
gpuI
,
false
);
MatrixPtr
outputCheck
=
std
::
make_shared
<
CpuMatrix
>
(
height
,
height
);
outputCheck
->
copyFrom
(
*
gpuI
);
outputCheck
->
copyFrom
(
*
gpuI
);
MatrixCheckErr
(
*
cpuI
,
*
outputCheck
);
MatrixCheckErr
(
*
cpuI
,
*
outputCheck
);
outputCheck
->
mul
(
cpu
,
cpuI
);
outputCheck
->
mul
(
cpu
,
cpuI
);
cpu
->
zeroMem
();
cpu
->
setDiag
(
1.0
);
for
(
int
i
=
0
;
i
<
height
;
i
++
)
{
cpu
->
getRowBuf
(
i
)[
i
]
=
1.0
;
}
MatrixCheckErr
(
*
cpu
,
*
outputCheck
);
MatrixCheckErr
(
*
cpu
,
*
outputCheck
);
}
}
...
...
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
61444d90
...
@@ -592,7 +592,7 @@ class MixedLayerType(LayerOutput):
...
@@ -592,7 +592,7 @@ class MixedLayerType(LayerOutput):
def
__exit__
(
self
,
*
args
,
**
kwargs
):
def
__exit__
(
self
,
*
args
,
**
kwargs
):
del
args
,
kwargs
# unused parameter to suppress warning
del
args
,
kwargs
# unused parameter to suppress warning
assert
len
(
self
.
inputs
)
!=
0
assert
len
(
self
.
inputs
)
!=
0
MixedLayer
(
ml
=
MixedLayer
(
name
=
self
.
name
,
name
=
self
.
name
,
size
=
self
.
size
,
size
=
self
.
size
,
active_type
=
self
.
activation
.
name
,
active_type
=
self
.
activation
.
name
,
...
@@ -600,6 +600,9 @@ class MixedLayerType(LayerOutput):
...
@@ -600,6 +600,9 @@ class MixedLayerType(LayerOutput):
inputs
=
self
.
inputs
,
inputs
=
self
.
inputs
,
**
ExtraLayerAttribute
.
to_kwargs
(
self
.
layer_attr
)
**
ExtraLayerAttribute
.
to_kwargs
(
self
.
layer_attr
)
)
)
# update the size which might be computed inside MixedLayer
# according to the operator's output size
self
.
size
=
ml
.
config
.
size
@
wrap_name_default
(
"mixed"
)
@
wrap_name_default
(
"mixed"
)
...
@@ -2682,7 +2685,7 @@ def out_prod_layer(input1, input2, name=None, layer_attr=None):
...
@@ -2682,7 +2685,7 @@ def out_prod_layer(input1, input2, name=None, layer_attr=None):
assert
isinstance
(
input1
,
LayerOutput
)
assert
isinstance
(
input1
,
LayerOutput
)
assert
isinstance
(
input2
,
LayerOutput
)
assert
isinstance
(
input2
,
LayerOutput
)
Layer
(
name
=
name
,
Layer
(
name
=
name
,
type
=
"out_prod"
,
type
=
LayerType
.
OUT_PROD_LAYER
,
inputs
=
[
input1
.
name
,
input2
.
name
],
inputs
=
[
input1
.
name
,
input2
.
name
],
**
ExtraLayerAttribute
.
to_kwargs
(
layer_attr
))
**
ExtraLayerAttribute
.
to_kwargs
(
layer_attr
))
return
LayerOutput
(
name
=
name
,
return
LayerOutput
(
name
=
name
,
...
@@ -2943,7 +2946,7 @@ def classification_cost(input, label, weight=None, name=None,
...
@@ -2943,7 +2946,7 @@ def classification_cost(input, label, weight=None, name=None,
def
conv_operator
(
img
,
filter
,
filter_size
,
num_filters
,
def
conv_operator
(
img
,
filter
,
filter_size
,
num_filters
,
num_channel
=
None
,
stride
=
1
,
padding
=
0
,
num_channel
s
=
None
,
stride
=
1
,
padding
=
0
,
filter_size_y
=
None
,
stride_y
=
None
,
padding_y
=
None
):
filter_size_y
=
None
,
stride_y
=
None
,
padding_y
=
None
):
"""
"""
Different from img_conv_layer, conv_op is an Operator, which can be used
Different from img_conv_layer, conv_op is an Operator, which can be used
...
@@ -2973,8 +2976,8 @@ def conv_operator(img, filter, filter_size, num_filters,
...
@@ -2973,8 +2976,8 @@ def conv_operator(img, filter, filter_size, num_filters,
:type filter_size_y: int
:type filter_size_y: int
:param num_filters: channel of output data.
:param num_filters: channel of output data.
:type num_filters: int
:type num_filters: int
:param num_channel: channel of input data.
:param num_channel
s
: channel of input data.
:type num_channel: int
:type num_channel
s
: int
:param stride: The x dimension of the stride.
:param stride: The x dimension of the stride.
:type stride: int
:type stride: int
:param stride_y: The y dimension of the stride.
:param stride_y: The y dimension of the stride.
...
@@ -2993,19 +2996,19 @@ def conv_operator(img, filter, filter_size, num_filters,
...
@@ -2993,19 +2996,19 @@ def conv_operator(img, filter, filter_size, num_filters,
if
padding_y
is
None
:
if
padding_y
is
None
:
padding_y
=
padding
padding_y
=
padding
if
num_channel
is
None
:
if
num_channel
s
is
None
:
num_channel
=
img
.
num_filters
num_channel
s
=
img
.
num_filters
assert
isinstance
(
filter
,
LayerOutput
)
assert
isinstance
(
filter
,
LayerOutput
)
if
filter
.
size
is
not
None
:
if
filter
.
size
is
not
None
:
filter
.
size
=
filter_size
*
filter_size_y
*
num_filters
*
num_channel
filter
.
size
=
filter_size
*
filter_size_y
*
num_filters
*
num_channel
s
op
=
ConvOperator
(
input_layer_names
=
[
img
.
name
,
filter
.
name
],
op
=
ConvOperator
(
input_layer_names
=
[
img
.
name
,
filter
.
name
],
num_filters
=
num_filters
,
num_filters
=
num_filters
,
conv_conf
=
Conv
(
filter_size
=
filter_size
,
conv_conf
=
Conv
(
filter_size
=
filter_size
,
padding
=
padding
,
padding
=
padding
,
stride
=
stride
,
stride
=
stride
,
channels
=
num_channel
,
channels
=
num_channel
s
,
filter_size_y
=
filter_size_y
,
filter_size_y
=
filter_size_y
,
padding_y
=
padding_y
,
padding_y
=
padding_y
,
stride_y
=
stride_y
,
stride_y
=
stride_y
,
...
@@ -3045,8 +3048,8 @@ def conv_projection(input, filter_size, num_filters,
...
@@ -3045,8 +3048,8 @@ def conv_projection(input, filter_size, num_filters,
:type filter_size_y: int
:type filter_size_y: int
:param num_filters: channel of output data.
:param num_filters: channel of output data.
:type num_filters: int
:type num_filters: int
:param num_channel: channel of input data.
:param num_channel
s
: channel of input data.
:type num_channel: int
:type num_channel
s
: int
:param stride: The x dimension of the stride.
:param stride: The x dimension of the stride.
:type stride: int
:type stride: int
:param stride_y: The y dimension of the stride.
:param stride_y: The y dimension of the stride.
...
...
python/paddle/trainer_config_helpers/tests/configs/projections.py
浏览文件 @
61444d90
...
@@ -35,7 +35,7 @@ flt = data_layer(name='filter', size=3*3*1*64)
...
@@ -35,7 +35,7 @@ flt = data_layer(name='filter', size=3*3*1*64)
with
mixed_layer
()
as
m7
:
with
mixed_layer
()
as
m7
:
m7
+=
conv_operator
(
img
=
img
,
filter
=
flt
,
num_filters
=
64
,
m7
+=
conv_operator
(
img
=
img
,
filter
=
flt
,
num_filters
=
64
,
num_channel
=
1
,
filter_size
=
3
)
num_channel
s
=
1
,
filter_size
=
3
)
end
=
mixed_layer
(
input
=
[
full_matrix_projection
(
input
=
m5
),
end
=
mixed_layer
(
input
=
[
full_matrix_projection
(
input
=
m5
),
trans_full_matrix_projection
(
input
=
m6
),
trans_full_matrix_projection
(
input
=
m6
),
...
...
python/paddle/trainer_config_helpers/tests/layers_test_config.py
浏览文件 @
61444d90
...
@@ -29,9 +29,11 @@ z1 = mixed_layer(act=LinearActivation(),
...
@@ -29,9 +29,11 @@ z1 = mixed_layer(act=LinearActivation(),
filter
=
y1
,
filter
=
y1
,
filter_size
=
1
,
filter_size
=
1
,
num_filters
=
5
,
num_filters
=
5
,
num_channel
=
5
,
num_channel
s
=
5
,
stride
=
1
)])
stride
=
1
)])
assert
z1
.
size
>
0
y2
=
fc_layer
(
input
=
y
,
size
=
15
)
y2
=
fc_layer
(
input
=
y
,
size
=
15
)
cos1
=
cos_sim
(
a
=
x1
,
b
=
y1
)
cos1
=
cos_sim
(
a
=
x1
,
b
=
y1
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录