Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
1e828dc1
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1e828dc1
编写于
8月 14, 2017
作者:
C
caoying03
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into cross_entropy_over_beam
上级
05e8a26b
8747d60d
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
317 addition
and
378 deletion
+317
-378
doc/getstarted/build_and_install/docker_install_cn.rst
doc/getstarted/build_and_install/docker_install_cn.rst
+2
-2
paddle/framework/CMakeLists.txt
paddle/framework/CMakeLists.txt
+1
-1
paddle/framework/details/lod_tensor.cc
paddle/framework/details/lod_tensor.cc
+0
-62
paddle/framework/details/lod_tensor.h
paddle/framework/details/lod_tensor.h
+0
-46
paddle/framework/lod_tensor.cc
paddle/framework/lod_tensor.cc
+51
-24
paddle/framework/lod_tensor.h
paddle/framework/lod_tensor.h
+61
-57
paddle/framework/lod_tensor_impl.h
paddle/framework/lod_tensor_impl.h
+0
-60
paddle/framework/lod_tensor_test.cc
paddle/framework/lod_tensor_test.cc
+33
-82
paddle/gserver/tests/LayerGradUtil.h
paddle/gserver/tests/LayerGradUtil.h
+3
-3
paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp
paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp
+69
-20
python/paddle/v2/framework/tests/CMakeLists.txt
python/paddle/v2/framework/tests/CMakeLists.txt
+1
-0
python/paddle/v2/framework/tests/test_recurrent_op.py
python/paddle/v2/framework/tests/test_recurrent_op.py
+96
-21
未找到文件。
doc/getstarted/build_and_install/docker_install_cn.rst
浏览文件 @
1e828dc1
...
@@ -74,13 +74,13 @@ PaddlePaddle发布新版本的时候都会发布对应版本的生产镜像以
...
@@ -74,13 +74,13 @@ PaddlePaddle发布新版本的时候都会发布对应版本的生产镜像以
.. code-block:: bash
.. code-block:: bash
docker run -it --rm paddlepaddle/paddle:0.10.0-dev /bin/bash
docker run -it --rm
-v $(pwd):/paddle
paddlepaddle/paddle:0.10.0-dev /bin/bash
或者,可以以后台进程方式运行容器:
或者,可以以后台进程方式运行容器:
.. code-block:: bash
.. code-block:: bash
docker run -d -p 2202:22 -p 8888:8888
paddledev/paddle:0.10.0-dev
docker run -d -p 2202:22 -p 8888:8888
-v $(pwd):/paddle paddlepaddle/paddle:0.10.0-dev /usr/sbin/sshd -D
然后用密码 :code:`root` SSH进入容器:
然后用密码 :code:`root` SSH进入容器:
...
...
paddle/framework/CMakeLists.txt
浏览文件 @
1e828dc1
...
@@ -7,7 +7,7 @@ cc_library(tensor SRCS tensor.cc DEPS ddim place paddle_memory device_context)
...
@@ -7,7 +7,7 @@ cc_library(tensor SRCS tensor.cc DEPS ddim place paddle_memory device_context)
cc_test
(
tensor_test SRCS tensor_test.cc DEPS tensor
)
cc_test
(
tensor_test SRCS tensor_test.cc DEPS tensor
)
cc_test
(
eigen_test SRCS eigen_test.cc DEPS tensor
)
cc_test
(
eigen_test SRCS eigen_test.cc DEPS tensor
)
cc_library
(
lod_tensor SRCS lod_tensor.cc
details/lod_tensor.cc
DEPS ddim place tensor
)
cc_library
(
lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor
)
cc_test
(
lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor
)
cc_test
(
lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor
)
cc_test
(
variable_test SRCS variable_test.cc
)
cc_test
(
variable_test SRCS variable_test.cc
)
...
...
paddle/framework/details/lod_tensor.cc
已删除
100644 → 0
浏览文件 @
05e8a26b
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/lod_tensor.h"
#include <memory>
namespace
paddle
{
namespace
framework
{
namespace
details
{
using
LOD
=
LODTensor
::
LOD
;
std
::
shared_ptr
<
LOD
>
SliceLOD
(
const
LOD
&
lod
,
size_t
level_begin
,
size_t
level_end
)
{
auto
new_lod
=
std
::
make_shared
<
LOD
>
();
new_lod
->
reserve
(
level_end
-
level_begin
);
for
(
size_t
i
=
level_begin
;
i
<
level_end
;
i
++
)
{
new_lod
->
emplace_back
(
lod
[
i
]);
}
return
new_lod
;
}
std
::
shared_ptr
<
LOD
>
SliceLOD
(
const
LOD
&
lod
,
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
,
bool
tensor_shared
)
{
// slice the lod.
auto
new_lod
=
std
::
make_shared
<
LOD
>
();
new_lod
->
reserve
(
lod
.
size
()
-
level
);
auto
start
=
lod
.
at
(
level
)[
elem_begin
];
auto
end
=
lod
.
at
(
level
)[
elem_end
];
for
(
auto
it
=
lod
.
begin
()
+
level
;
it
!=
lod
.
end
();
it
++
)
{
auto
it_begin
=
std
::
find
(
it
->
begin
(),
it
->
end
(),
start
);
auto
it_end
=
std
::
find
(
it_begin
,
it
->
end
(),
end
);
PADDLE_ENFORCE
(
it_begin
!=
it
->
end
(),
"error in parsing lod info"
);
PADDLE_ENFORCE
(
it_end
!=
it
->
end
(),
"error in parsing lod info"
);
new_lod
->
emplace_back
(
it_begin
,
it_end
+
1
);
if
(
!
tensor_shared
)
{
// reset offset if tensor is copyed and sliced.
std
::
transform
(
new_lod
->
back
().
begin
(),
new_lod
->
back
().
end
(),
new_lod
->
back
().
begin
(),
[
start
](
int
v
)
{
return
v
-
start
;
});
PADDLE_ENFORCE
(
new_lod
->
back
().
front
()
==
0
,
"error in slice LOD"
);
}
}
return
new_lod
;
}
}
// namespace details
}
// namespace framework
}
// namespace paddle
paddle/framework/details/lod_tensor.h
已删除
100644 → 0
浏览文件 @
05e8a26b
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
namespace
paddle
{
namespace
framework
{
namespace
details
{
/*
* Slice levels from LOD.
*
* @lod: LOD to slice.
* @level_begin: level to begin slice.
* @level_end: level to end slice.
*/
std
::
shared_ptr
<
LODTensor
::
LOD
>
SliceLOD
(
const
LODTensor
::
LOD
&
lod
,
size_t
level_begin
,
size_t
level_end
);
/*
* Slice elements from a level of LOD.
*
* @lod: LOD to slice.
* @level: which level to slice.
* @elem_begin: element's index to begin slice.
* @elem_end: element's index to end slice.
*/
std
::
shared_ptr
<
LODTensor
::
LOD
>
SliceLOD
(
const
LODTensor
::
LOD
&
lod
,
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
,
bool
tensor_shared
);
}
// namespace details
}
// namespace framework
}
// namespace paddle
paddle/framework/lod_tensor.cc
浏览文件 @
1e828dc1
...
@@ -19,32 +19,59 @@
...
@@ -19,32 +19,59 @@
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
LODTensor
LODTensor
::
SliceShared
(
size_t
level_begin
,
size_t
level_end
)
const
{
LODTensor
::
LOD
LODTensor
::
LOD
::
SliceLevels
(
size_t
level_begin
,
PADDLE_ENFORCE
(
HasLOD
(),
"has no LOD info, can't be sliced."
);
size_t
level_end
)
const
{
auto
new_lod
=
details
::
SliceLOD
(
*
lod_start_pos_
,
level_begin
,
level_end
);
LOD
new_lod
;
// slice levels just need to update LOD info, each level will contains the
new_lod
.
reserve
(
level_end
-
level_begin
);
// whole tensor_, so no need to modify tensor_.
for
(
size_t
i
=
level_begin
;
i
<
level_end
;
i
++
)
{
return
LODTensor
(
tensor_
,
new_lod
);
new_lod
.
emplace_back
(
at
(
i
));
}
return
new_lod
;
}
}
LODTensor
LODTensor
::
SliceShared
(
size_t
level
,
size_t
elem_begin
,
LODTensor
::
LOD
LODTensor
::
LOD
::
SliceInLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
)
const
{
size_t
elem_end
)
const
{
PADDLE_ENFORCE
(
HasLOD
(),
"has no LOD info, can't be sliced."
);
// slice the lod.
PADDLE_ENFORCE
(
level
<
NumLevels
(),
"level [%d] out of range [%d]"
,
level
,
LOD
new_lod
;
NumLevels
());
new_lod
.
reserve
(
size
()
-
level
);
PADDLE_ENFORCE
(
elem_begin
<
NumElements
(
level
),
auto
start
=
this
->
at
(
level
)[
elem_begin
];
"element begin [%d] out of range [%d]"
,
elem_begin
,
auto
end
=
this
->
at
(
level
)[
elem_end
];
NumElements
(
level
));
PADDLE_ENFORCE
(
elem_end
<
NumElements
(
level
)
+
1
,
for
(
auto
it
=
this
->
begin
()
+
level
;
it
!=
this
->
end
();
it
++
)
{
"element end [%d] out of range [%d]"
,
elem_end
,
auto
it_begin
=
std
::
find
(
it
->
begin
(),
it
->
end
(),
start
);
NumElements
(
level
));
auto
it_end
=
std
::
find
(
it_begin
,
it
->
end
(),
end
);
PADDLE_ENFORCE
(
it_begin
!=
it
->
end
(),
"error in parsing lod info"
);
auto
new_lod
=
details
::
SliceLOD
(
*
lod_start_pos_
,
level
,
elem_begin
,
elem_end
,
PADDLE_ENFORCE
(
it_end
!=
it
->
end
(),
"error in parsing lod info"
);
true
/*tensor_shared*/
);
new_lod
.
emplace_back
(
it_begin
,
it_end
+
1
);
// reset offset if tensor is copyed and sliced.
// slice elements just need to update LOD info, because offsets are not
std
::
transform
(
new_lod
.
back
().
begin
(),
new_lod
.
back
().
end
(),
// changed, so the original tensor_ can be reused.
new_lod
.
back
().
begin
(),
return
LODTensor
(
tensor_
,
new_lod
);
[
start
](
int
v
)
{
return
v
-
start
;
});
PADDLE_ENFORCE_EQ
(
new_lod
.
back
().
front
(),
0
,
"error in slice LOD"
);
}
PADDLE_ENFORCE_LE
(
new_lod
.
size
(),
this
->
size
());
return
new_lod
;
}
bool
operator
==
(
const
LODTensor
::
LOD
&
a
,
const
LODTensor
::
LOD
&
b
)
{
if
(
a
.
size
()
!=
b
.
size
())
{
return
false
;
}
for
(
size_t
i
=
0
;
i
<
a
.
size
();
i
++
)
{
const
auto
&
a_level
=
a
[
i
];
const
auto
&
b_level
=
b
[
i
];
if
(
a_level
.
size
()
!=
b_level
.
size
())
{
return
false
;
}
for
(
size_t
j
=
0
;
j
<
a_level
.
size
();
j
++
)
{
if
(
a_level
[
j
]
!=
b_level
[
j
])
{
return
false
;
}
}
}
return
true
;
}
}
}
// namespace framework
}
// namespace framework
...
...
paddle/framework/lod_tensor.h
浏览文件 @
1e828dc1
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
#pragma once
#pragma once
#include <memory>
#include <memory>
#if
(!
PADDLE_ONLY_CPU)
#if
!defined(
PADDLE_ONLY_CPU)
#include <thrust/device_vector.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/host_vector.h>
#endif
#endif
...
@@ -31,30 +31,29 @@ namespace framework {
...
@@ -31,30 +31,29 @@ namespace framework {
* LODTensor (Level of details Tensor)
* LODTensor (Level of details Tensor)
* see https://en.wikipedia.org/wiki/Level_of_details for reference.
* see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/
*/
class
LODTensor
{
class
LODTensor
:
public
Tensor
{
public:
public:
// Level save offsets of each unit.
// Level save offsets of each unit.
#ifdef PADDLE_ONLY_CPU
#ifdef PADDLE_ONLY_CPU
using
Level
=
std
::
vector
<
size_t
>
;
template
<
typename
T
>
using
Vector
=
std
::
vector
<
T
>
;
#else
#else
using
Level
=
thrust
::
device_vector
<
size_t
>
;
template
<
typename
T
>
using
Vector
=
thrust
::
host_vector
<
T
>
;
#endif
#endif
// L
O
D stores offsets of each level of units, the largest units level first,
// L
o
D stores offsets of each level of units, the largest units level first,
// then the smaller units level. Each Level stores the offsets of units in
// then the smaller units level. Each Level stores the offsets of units in
// Tesor.
// Tesor.
typedef
std
::
vector
<
Level
>
LOD
;
class
LOD
:
public
std
::
vector
<
Vector
<
size_t
>>
{
public:
LOD
SliceLevels
(
size_t
level_begin
,
size_t
level_end
)
const
;
LOD
SliceInLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
)
const
;
};
LODTensor
()
{}
LODTensor
()
{}
LODTensor
(
const
std
::
shared_ptr
<
Tensor
>
&
tensor
,
explicit
LODTensor
(
const
LOD
&
lod
)
:
lod_
(
lod
)
{}
const
std
::
shared_ptr
<
LOD
>
&
lod
)
{
Reset
(
tensor
,
lod
);
}
void
Reset
(
const
std
::
shared_ptr
<
Tensor
>
&
tensor
,
virtual
Tensor
*
Clone
()
const
{
return
new
LODTensor
(
lod_
);
}
const
std
::
shared_ptr
<
LOD
>
&
lod
)
{
tensor_
=
tensor
;
lod_start_pos_
=
lod
;
}
/*
/*
* Get a element from LOD.
* Get a element from LOD.
...
@@ -65,16 +64,14 @@ class LODTensor {
...
@@ -65,16 +64,14 @@ class LODTensor {
PADDLE_ENFORCE
(
elem
<
NumElements
(
level
),
PADDLE_ENFORCE
(
elem
<
NumElements
(
level
),
"element begin [%d] out of range [%d]"
,
elem
,
"element begin [%d] out of range [%d]"
,
elem
,
NumElements
(
level
));
NumElements
(
level
));
return
(
*
lod_start_pos
_
)[
level
][
elem
];
return
(
lod
_
)[
level
][
elem
];
}
}
/*
/*
* Number of LODTensor's levels, each level has units of data, for example,
* Number of LODTensor's levels, each level has units of data, for example,
* in the sentence's view, article, paragraph, sentence are 3 levels.
* in the sentence's view, article, paragraph, sentence are 3 levels.
*/
*/
size_t
NumLevels
()
const
{
size_t
NumLevels
()
const
{
return
lod_
.
size
();
}
return
lod_start_pos_
?
lod_start_pos_
->
size
()
:
0UL
;
}
/*
/*
* Number of elements in a level.
* Number of elements in a level.
*/
*/
...
@@ -82,64 +79,71 @@ class LODTensor {
...
@@ -82,64 +79,71 @@ class LODTensor {
PADDLE_ENFORCE
(
level
<
NumLevels
(),
"level [%d] out of range [%d]"
,
level
,
PADDLE_ENFORCE
(
level
<
NumLevels
(),
"level [%d] out of range [%d]"
,
level
,
NumLevels
());
NumLevels
());
// the last offset is the end of last element
// the last offset is the end of last element
return
lod_
start_pos_
->
at
(
level
)
.
size
()
-
1
;
return
lod_
[
level
]
.
size
()
-
1
;
}
}
/*
* Slice of levels[level_begin:level_end], with tensor copied.
*/
template
<
typename
T
>
LODTensor
SliceCopied
(
size_t
level_begin
,
size_t
level_end
,
const
platform
::
Place
&
dst_place
)
const
;
/*
/*
* Slice of levels[level_begin:level_end], with tensor shared.
* Slice of levels[level_begin:level_end], with tensor shared.
*/
*/
LODTensor
SliceShared
(
size_t
level_begin
,
size_t
level_end
)
const
;
/*
* Slice of elements of a level, [elem_begin: elem_end], with tensor copied.
* @note: low performance in slice lod_start_pos_.
*/
template
<
typename
T
>
template
<
typename
T
>
LODTensor
SliceCopied
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
,
LODTensor
SliceLevels
(
size_t
level_begin
,
size_t
level_end
)
const
;
const
platform
::
Place
&
dst_place
)
const
;
/*
/*
* Slice of elements of a level, [elem_begin: elem_end], with tensor shared.
* Slice of elements of a level, [elem_begin: elem_end], with tensor shared.
* @note: low performance in slice lod_start_pos_.
* @note: low performance in slice lod_.
*/
LODTensor
SliceShared
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
)
const
;
/*
* Copy other's lod_start_pos_, to share LOD info.
* @note: the LOD info should not be changed.
*/
*/
void
ShareLOD
(
const
LODTensor
&
other
)
{
template
<
typename
T
>
lod_start_pos_
=
other
.
lod_start_pos_
;
LODTensor
SliceInLevel
(
size_t
level
,
size_t
elem_begin
,
}
size_t
elem_end
)
const
;
/*
/*
* Copy other's lod_
start_pos_
's content, free to mutate.
* Copy other's lod_'s content, free to mutate.
*/
*/
void
CopyLOD
(
const
LODTensor
&
other
)
{
void
CopyLOD
(
const
LODTensor
&
other
)
{
lod_
=
other
.
lod_
;
}
lod_start_pos_
=
std
::
make_shared
<
LOD
>
(
*
other
.
lod_start_pos_
);
}
/*
/*
* Determine whether LODTensor has a valid LOD info.
* Determine whether LODTensor has a valid LOD info.
*/
*/
bool
HasLOD
()
const
{
return
bool
(
lod_start_pos_
)
;
}
const
LOD
&
lod
()
const
{
return
lod_
;
}
LOD
*
lod
()
const
{
return
lod_start_pos_
.
get
()
;
}
LOD
*
mutable_lod
()
{
return
&
lod_
;
}
std
::
shared_ptr
<
Tensor
>
&
tensor
()
{
return
tensor_
;
}
virtual
~
LODTensor
()
{}
Tensor
*
raw_tensor
()
{
return
tensor_
.
get
();
}
private:
private:
std
::
shared_ptr
<
LOD
>
lod_start_pos_
;
LOD
lod_
;
std
::
shared_ptr
<
Tensor
>
tensor_
;
};
};
bool
operator
==
(
const
LODTensor
::
LOD
&
a
,
const
LODTensor
::
LOD
&
b
);
template
<
typename
T
>
LODTensor
LODTensor
::
SliceLevels
(
size_t
level_begin
,
size_t
level_end
)
const
{
auto
new_lod
=
lod_
.
SliceLevels
(
level_begin
,
level_end
);
// slice levels just need to update LOD info, each level will contains the
// whole tensor_, so no need to modify tensor_.
LODTensor
new_tensor
(
new_lod
);
new_tensor
.
ShareDataWith
<
T
>
(
*
this
);
return
new_tensor
;
}
template
<
typename
T
>
LODTensor
LODTensor
::
SliceInLevel
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
)
const
{
PADDLE_ENFORCE
(
level
<
NumLevels
(),
"level [%d] out of range [%d]"
,
level
,
NumLevels
());
PADDLE_ENFORCE
(
elem_begin
<
NumElements
(
level
),
"element begin [%d] out of range [%d]"
,
elem_begin
,
NumElements
(
level
));
PADDLE_ENFORCE
(
elem_end
<
NumElements
(
level
)
+
1
,
"element end [%d] out of range [%d]"
,
elem_end
,
NumElements
(
level
));
auto
new_lod
=
lod_
.
SliceInLevel
(
level
,
elem_begin
,
elem_end
);
// slice elements just need to update LOD info, because offsets are not
// changed, so the original tensor_ can be reused.
LODTensor
new_tensor
(
new_lod
);
new_tensor
.
ShareDataWith
<
T
>
(
*
this
);
return
new_tensor
;
}
}
// namespace framework
}
// namespace framework
}
// namespace paddle
}
// namespace paddle
#include "paddle/framework/lod_tensor_impl.h"
paddle/framework/lod_tensor_impl.h
已删除
100644 → 0
浏览文件 @
05e8a26b
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/details/lod_tensor.h"
namespace
paddle
{
namespace
framework
{
template
<
typename
T
>
LODTensor
LODTensor
::
SliceCopied
(
size_t
level_begin
,
size_t
level_end
,
const
platform
::
Place
&
dst_place
)
const
{
PADDLE_ENFORCE
(
HasLOD
(),
"has no LOD info, can't be sliced."
);
auto
new_lod
=
details
::
SliceLOD
(
*
lod_start_pos_
,
level_begin
,
level_end
);
auto
new_tensor
=
std
::
make_shared
<
Tensor
>
();
new_tensor
->
CopyFrom
<
T
>
(
*
tensor_
,
dst_place
);
return
LODTensor
(
new_tensor
,
new_lod
);
}
template
<
typename
T
>
LODTensor
LODTensor
::
SliceCopied
(
size_t
level
,
size_t
elem_begin
,
size_t
elem_end
,
const
platform
::
Place
&
dst_place
)
const
{
PADDLE_ENFORCE
(
HasLOD
(),
"has no LOD info, can't be sliced."
);
PADDLE_ENFORCE
(
level
<
NumLevels
(),
"level [%d] out of range [%d]"
,
level
,
NumLevels
());
PADDLE_ENFORCE
(
elem_begin
<
NumElements
(
level
),
"element begin [%d] out of range [%d]"
,
elem_begin
,
NumElements
(
level
));
PADDLE_ENFORCE
(
elem_end
<
NumElements
(
level
)
+
1
,
"element end [%d] out of range [%d]"
,
elem_end
,
NumElements
(
level
));
auto
new_lod
=
details
::
SliceLOD
(
*
lod_start_pos_
,
level
,
elem_begin
,
elem_end
,
false
/*tensor_shared*/
);
auto
start_idx
=
new_lod
->
front
().
front
();
auto
end_idx
=
new_lod
->
front
().
back
()
-
1
/*the next element's start*/
;
auto
sliced_tensor
=
tensor_
->
Slice
<
T
>
(
start_idx
,
end_idx
);
auto
new_tensor
=
std
::
make_shared
<
Tensor
>
();
new_tensor
->
CopyFrom
<
T
>
(
sliced_tensor
,
dst_place
);
return
LODTensor
(
new_tensor
,
new_lod
);
}
}
// namespace framework
}
// namespace paddle
paddle/framework/lod_tensor_test.cc
浏览文件 @
1e828dc1
...
@@ -15,6 +15,7 @@
...
@@ -15,6 +15,7 @@
#include <glog/logging.h>
#include <glog/logging.h>
#include <gtest/gtest.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <memory>
#include <memory>
namespace
paddle
{
namespace
paddle
{
...
@@ -29,22 +30,28 @@ class LODTensorTester : public ::testing::Test {
...
@@ -29,22 +30,28 @@ class LODTensorTester : public ::testing::Test {
// 0 10 20
// 0 10 20
// 0 5 10 15 20
// 0 5 10 15 20
// 0 2 5 7 10 12 15 20
// 0 2 5 7 10 12 15 20
auto
lod
=
std
::
make_shared
<
LODTensor
::
LOD
>
()
;
LODTensor
::
LOD
lod
;
lod
->
push_back
(
std
::
vector
<
size_t
>
{
0
,
10
,
20
});
lod
.
push_back
(
std
::
vector
<
size_t
>
{
0
,
10
,
20
});
lod
->
push_back
(
std
::
vector
<
size_t
>
{
0
,
5
,
10
,
15
,
20
});
lod
.
push_back
(
std
::
vector
<
size_t
>
{
0
,
5
,
10
,
15
,
20
});
lod
->
push_back
(
std
::
vector
<
size_t
>
{
0
,
2
,
5
,
7
,
10
,
12
,
15
,
17
,
20
});
lod
.
push_back
(
std
::
vector
<
size_t
>
{
0
,
2
,
5
,
7
,
10
,
12
,
15
,
17
,
20
});
auto
tensor
=
std
::
make_shared
<
Tensor
>
();
ASSERT_EQ
(
lod
.
size
(),
3UL
);
tensor
->
Resize
({
20
/*batch size*/
,
128
/*dim*/
});
tensor
.
Resize
({
20
/*batch size*/
,
128
/*dim*/
});
// malloc memory
// malloc memory
tensor
->
mutable_data
<
float
>
(
place
);
tensor
.
mutable_data
<
float
>
(
place
);
lod_tensor
.
reset
(
new
LODTensor
(
lod
));
lod_tensor
->
Resize
({
20
/*batch size*/
,
128
/*dim*/
});
lod_tensor
->
Reset
(
tensor
,
lod
);
lod_tensor
->
ShareDataWith
<
float
>
(
tensor
);
// lod_tensor->ShareDataWith<Tensor>(tensor);
}
}
protected:
protected:
std
::
unique_ptr
<
LODTensor
>
lod_tensor
;
std
::
unique_ptr
<
LODTensor
>
lod_tensor
;
platform
::
CPUPlace
place
;
platform
::
CPUPlace
place
;
Tensor
tensor
;
};
};
TEST_F
(
LODTensorTester
,
NumLevels
)
{
ASSERT_EQ
(
lod_tensor
->
NumLevels
(),
3UL
);
}
TEST_F
(
LODTensorTester
,
NumLevels
)
{
ASSERT_EQ
(
lod_tensor
->
NumLevels
(),
3UL
);
}
...
@@ -55,110 +62,54 @@ TEST_F(LODTensorTester, NumElements) {
...
@@ -55,110 +62,54 @@ TEST_F(LODTensorTester, NumElements) {
ASSERT_EQ
(
lod_tensor
->
NumElements
(
2
),
8UL
);
ASSERT_EQ
(
lod_tensor
->
NumElements
(
2
),
8UL
);
}
}
TEST_F
(
LODTensorTester
,
SliceShared_Level
)
{
TEST_F
(
LODTensorTester
,
SliceLevels
)
{
// slice 1 level
for
(
size_t
level
=
0
;
level
<
3UL
;
++
level
)
{
auto
new_lod_tensor
=
lod_tensor
->
SliceShared
(
level
,
level
+
1
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
1UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0UL
),
lod_tensor
->
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
tensor
(),
lod_tensor
->
tensor
());
}
// slice 2 level
for
(
size_t
level
=
0
;
level
<
2UL
;
++
level
)
{
auto
new_lod_tensor
=
lod_tensor
->
SliceShared
(
level
,
level
+
2
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
lod_tensor
->
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
lod_tensor
->
NumElements
(
level
+
1
));
ASSERT_EQ
(
new_lod_tensor
.
tensor
(),
lod_tensor
->
tensor
());
}
}
TEST_F
(
LODTensorTester
,
SliceCopied_Level
)
{
// slice 1 level
// slice 1 level
for
(
size_t
level
=
0
;
level
<
3UL
;
++
level
)
{
for
(
size_t
level
=
0
;
level
<
3UL
;
++
level
)
{
auto
new_lod_tensor
=
auto
new_lod_tensor
=
lod_tensor
->
SliceLevels
<
float
>
(
level
,
level
+
1
);
lod_tensor
->
SliceCopied
<
float
>
(
level
,
level
+
1
,
place
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
1UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
1UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0UL
),
lod_tensor
->
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0UL
),
lod_tensor
->
NumElements
(
level
));
// ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor());
// ASSERT_EQ(new_lod_tensor, *lod_tensor);
// TODO(superjom) add tensor comparation here.
}
}
// slice 2 level
// slice 2 level
for
(
size_t
level
=
0
;
level
<
2UL
;
++
level
)
{
for
(
size_t
level
=
0
;
level
<
2UL
;
++
level
)
{
auto
new_lod_tensor
=
auto
new_lod_tensor
=
lod_tensor
->
SliceLevels
<
float
>
(
level
,
level
+
2
);
lod_tensor
->
SliceCopied
<
float
>
(
level
,
level
+
2
,
place
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
lod_tensor
->
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
lod_tensor
->
NumElements
(
level
));
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
lod_tensor
->
NumElements
(
level
+
1
));
lod_tensor
->
NumElements
(
level
+
1
));
// ASSERT_EQ(new_lod_tensor.tensor(), lod_tensor->tensor());
ASSERT_EQ
(
new_lod_tensor
.
data
<
float
>
(),
lod_tensor
->
data
<
float
>
());
// TODO(superjom) add tensor comparation here.
}
}
}
}
TEST_F
(
LODTensorTester
,
SliceShared_Element
)
{
TEST_F
(
LODTensorTester
,
SliceInLevel
)
{
size_t
level
=
0
;
auto
new_lod_tensor
=
lod_tensor
->
SliceShared
(
level
,
0
,
2
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
3UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
2
),
8UL
);
ASSERT_EQ
(
new_lod_tensor
.
raw_tensor
(),
lod_tensor
->
raw_tensor
());
level
=
1
;
new_lod_tensor
=
lod_tensor
->
SliceShared
(
level
,
0
,
2
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
ASSERT_EQ
(
new_lod_tensor
.
raw_tensor
(),
lod_tensor
->
raw_tensor
());
}
TEST_F
(
LODTensorTester
,
SliceCopied_Element
)
{
size_t
level
=
0
;
size_t
level
=
0
;
auto
new_lod_tensor
=
lod_tensor
->
Slice
Copied
<
float
>
(
level
,
0
,
2
,
place
);
auto
new_lod_tensor
=
lod_tensor
->
Slice
InLevel
<
float
>
(
level
,
0
,
2
);
ASSER
T_EQ
(
new_lod_tensor
.
NumLevels
(),
3UL
);
EXPEC
T_EQ
(
new_lod_tensor
.
NumLevels
(),
3UL
);
ASSER
T_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
EXPEC
T_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
ASSER
T_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
EXPEC
T_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
ASSER
T_EQ
(
new_lod_tensor
.
NumElements
(
2
),
8UL
);
EXPEC
T_EQ
(
new_lod_tensor
.
NumElements
(
2
),
8UL
);
ASSERT_
NE
(
new_lod_tensor
.
raw_tensor
(),
lod_tensor
->
raw_tensor
());
ASSERT_
EQ
(
new_lod_tensor
.
data
<
float
>
(),
lod_tensor
->
data
<
float
>
());
level
=
1
;
level
=
1
;
new_lod_tensor
=
lod_tensor
->
Slice
Copied
<
float
>
(
level
,
0
,
2
,
place
);
new_lod_tensor
=
lod_tensor
->
Slice
InLevel
<
float
>
(
level
,
0
,
2
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
ASSERT_NE
(
new_lod_tensor
.
raw_tensor
(),
lod_tensor
->
raw_tensor
());
ASSERT_EQ
(
new_lod_tensor
.
data
<
float
>
(),
lod_tensor
->
data
<
float
>
());
level
=
1
;
// LOD is
// 0 5 10
// 0 2 5 7 10
new_lod_tensor
=
lod_tensor
->
SliceCopied
<
float
>
(
level
,
1
,
3
,
place
);
ASSERT_EQ
(
new_lod_tensor
.
NumLevels
(),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
0
),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
NumElements
(
1
),
4UL
);
ASSERT_EQ
(
new_lod_tensor
.
lod_element
(
0
,
0
),
0UL
);
ASSERT_EQ
(
new_lod_tensor
.
lod_element
(
0
,
1
),
5UL
);
ASSERT_EQ
(
new_lod_tensor
.
lod_element
(
1
,
0
),
0UL
);
ASSERT_EQ
(
new_lod_tensor
.
lod_element
(
1
,
1
),
2UL
);
ASSERT_EQ
(
new_lod_tensor
.
lod_element
(
1
,
2
),
5UL
);
ASSERT_EQ
(
new_lod_tensor
.
lod_element
(
1
,
3
),
7UL
);
// TODO(superjom) compare the content of these tensors
}
}
TEST_F
(
LODTensorTester
,
ShareLOD
)
{
TEST_F
(
LODTensorTester
,
ShareLOD
)
{
LODTensor
new_lod_tensor
;
LODTensor
new_lod_tensor
;
new_lod_tensor
.
Share
LOD
(
*
lod_tensor
);
new_lod_tensor
.
Copy
LOD
(
*
lod_tensor
);
ASSERT_EQ
(
new_lod_tensor
.
lod
(),
lod_tensor
->
lod
());
ASSERT_EQ
(
new_lod_tensor
.
lod
(),
lod_tensor
->
lod
());
}
}
TEST_F
(
LODTensorTester
,
CopyLOD
)
{
TEST_F
(
LODTensorTester
,
CopyLOD
)
{
LODTensor
new_lod_tensor
;
LODTensor
new_lod_tensor
;
new_lod_tensor
.
CopyLOD
(
*
lod_tensor
);
new_lod_tensor
.
CopyLOD
(
*
lod_tensor
);
ASSERT_NE
(
new_lod_tensor
.
lod
(),
lod_tensor
->
lod
());
bool
equals
=
std
::
equal
(
lod_tensor
->
lod
().
begin
(),
lod_tensor
->
lod
().
end
(),
new_lod_tensor
.
lod
().
begin
());
ASSERT_TRUE
(
equals
);
}
}
}
// namespace framework
}
// namespace framework
...
...
paddle/gserver/tests/LayerGradUtil.h
浏览文件 @
1e828dc1
...
@@ -98,9 +98,9 @@ struct InputDef {
...
@@ -98,9 +98,9 @@ struct InputDef {
InputDef
(
InputType
type
,
InputDef
(
InputType
type
,
string
nameIn
,
string
nameIn
,
std
::
vector
<
int
>
ids
,
const
std
::
vector
<
int
>&
ids
,
std
::
vector
<
int
>
selfDefinedSeqStartPos
=
{},
const
std
::
vector
<
int
>&
selfDefinedSeqStartPos
=
{},
std
::
vector
<
int
>
selfDefinedSubSeqStartPos
=
{})
const
std
::
vector
<
int
>&
selfDefinedSubSeqStartPos
=
{})
:
labelSeqStartPositions
(
selfDefinedSeqStartPos
),
:
labelSeqStartPositions
(
selfDefinedSeqStartPos
),
labelSubSeqStartPositions
(
selfDefinedSubSeqStartPos
),
labelSubSeqStartPositions
(
selfDefinedSubSeqStartPos
),
ids
(
ids
)
{
ids
(
ids
)
{
...
...
paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp
浏览文件 @
1e828dc1
...
@@ -30,53 +30,102 @@ DECLARE_bool(thread_local_rand_use_global_seed);
...
@@ -30,53 +30,102 @@ DECLARE_bool(thread_local_rand_use_global_seed);
struct
SingleBeamExpansion
{
struct
SingleBeamExpansion
{
vector
<
int
>
seqStartPos
;
vector
<
int
>
seqStartPos
;
vector
<
int
>
subSeqStartPos
;
vector
<
int
>
subSeqStartPos
;
vector
<
real
>
candidateScores
;
vector
<
real
>
candidateScores
;
// TODO(caoying): store this into Argument.ids
// TODO(caoying): store this into Argument.ids
vector
<
real
>
selectedIndices
;
vector
<
real
>
selectedIndices
;
vector
<
int
>
groundTruth
;
vector
<
int
>
groundTruth
;
vector
<
int
>
labelSeqStartPos
;
};
};
void
genCandidateScores
(
bool
hasSubSeq
,
vector
<
real
>&
scores
,
vector
<
int
>&
seqStartPos
,
vector
<
int
>&
subSeqStartPos
)
{}
void
genSelectedIndicesAndGroundtruth
(
size_t
beamSize
,
vector
<
int
>&
seqStartPos
,
vector
<
real
>&
selectedIndices
)
{}
SingleBeamExpansion
genOneBeam
(
size_t
beamSize
,
bool
hasSubSeq
)
{
SingleBeamExpansion
beam
;
genCandidateScores
(
hasSubSeq
,
beam
.
candidateScores
,
beam
.
seqStartPos
,
beam
.
subSeqStartPos
);
genSelectedIndicesAndGroundtruth
(
beamSize
,
hasSubSeq
?
beam
.
subSeqStartPos
:
beam
.
seqStartPos
,
beam
.
selectedIndices
);
return
beam
;
}
void
genRandomBeamExpansion
(
size_t
expansionCount
,
void
genRandomBeamExpansion
(
size_t
expansionCount
,
size_t
beamSize
,
vector
<
SingleBeamExpansion
>&
beamExpansions
)
{
vector
<
SingleBeamExpansion
>&
beamExpansions
)
{
beamExpansions
.
clear
();
beamExpansions
.
clear
();
for
(
size_t
i
=
0
;
i
<
expansionCount
;
++
i
)
{
beamExpansions
.
emplace_back
(
genOneBeam
(
beamSize
,
i
));
}
}
}
void
testCrossEntropyOverBeam
()
{
void
testCrossEntropyOverBeam
(
bool
useGpu
)
{
TestConfig
config
;
config
.
layerConfig
.
set_type
(
"cross_entropy_over_beam"
);
const
size_t
expansionCount
=
3
;
const
size_t
expansionCount
=
3
;
const
size_t
beamSize
=
3
;
vector
<
SingleBeamExpansion
>
beams
;
vector
<
SingleBeamExpansion
>
beams
;
genRandomBeamExpansion
(
expansionCount
,
beams
);
genRandomBeamExpansion
(
expansionCount
,
beam
Size
,
beam
s
);
size_t
seqNum
=
0
;
for
(
size_t
i
=
0
;
i
<
beams
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
beams
.
size
();
++
i
)
{
const
SingleBeamExpansion
&
beam
=
beams
[
i
];
const
SingleBeamExpansion
&
beam
=
beams
[
i
];
// create scores for all the candidates
// create scores for all the candidates
MatrixPtr
candidateScorePtr
=
MatrixPtr
candidateScorePtr
=
Matrix
::
create
(
beam
.
candidateScores
.
size
(),
1
,
false
,
false
);
Matrix
::
create
(
beam
.
candidateScores
.
size
(),
1
,
false
,
false
);
candidateScorePtr
->
copyFrom
(
candidateScores
.
data
(),
candidateScores
.
size
());
candidateScorePtr
->
copyFrom
(
beam
.
candidateScores
.
data
(),
beam
.
candidateScores
.
size
());
ostringstream
paramName
;
ostringstream
paramName
;
paramName
<<
"candidate_scores_"
<<
i
;
paramName
<<
"candidate_scores_"
<<
i
;
beam
.
subSeqStartPos
.
size
()
?
config
.
inputDefs
.
push_back
({
INPUT_SELF_DEFINE_DATA
,
if
(
beam
.
subSeqStartPos
.
size
())
{
ostr
.
str
(),
seqNum
=
beam
.
subSeqStartPos
.
size
()
-
1
;
candidateScorePtr
,
config
.
inputDefs
.
push_back
({
INPUT_SELF_DEFINE_DATA
,
beam
.
seqStartPos
,
paramName
.
str
(),
beam
.
subSeqStartPos
})
candidateScorePtr
,
:
config
.
inputDefs
.
push_back
({
INPUT_SELF_DEFINE_DATA
,
beam
.
seqStartPos
,
ostr
.
str
(),
beam
.
subSeqStartPos
});
candidateScorePtr
,
}
else
{
beam
.
seqStartPos
});
seqNum
=
beam
.
seqStartPos
.
size
()
-
1
;
config
.
inputDefs
.
push_back
({
INPUT_SELF_DEFINE_DATA
,
paramName
.
str
(),
candidateScorePtr
,
beam
.
seqStartPos
});
}
config
.
layerConfig
.
add_inputs
();
// create indices for the selected candidates
// create indices for the selected candidates
MatrixPtr
selectedCandidates
=
Matrix
::
create
(
seqNum
,
beamSize
,
false
,
false
);
selectedCandidates
->
copyFrom
(
beam
.
selectedIndices
.
data
(),
beam
.
selectedIndices
.
size
());
paramName
.
clear
();
paramName
<<
"selected_candidates_"
<<
i
;
config
.
inputDefs
.
push_back
(
{
INPUT_SELF_DEFINE_DATA
,
paramName
.
str
(),
selectedCandidates
});
config
.
layerConfig
.
add_inputs
();
// create the ground truth
// create the ground truth
paramName
.
clear
();
paramName
<<
"label_"
<<
i
;
config
.
inputDefs
.
push_back
({
INPUT_SELF_DEFINE_DATA
,
paramName
.
str
(),
beam
.
groundTruth
,
beam
.
labelSeqStartPos
});
}
}
}
TestConfig
config
;
config
.
layerConfig
.
set_type
(
"cross_entropy_over_beam"
);
//
testLayerGrad(
testLayerGrad
(
//
config, "cross_entropy_over_beam", seqNum, false, useGpu, false);
config
,
"cross_entropy_over_beam"
,
seqNum
,
false
,
useGpu
,
false
);
}
}
TEST
(
Layer
,
CrossEntropyOverBeam
)
{
TEST
(
Layer
,
CrossEntropyOverBeam
)
{
...
...
python/paddle/v2/framework/tests/CMakeLists.txt
浏览文件 @
1e828dc1
...
@@ -24,3 +24,4 @@ py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py)
...
@@ -24,3 +24,4 @@ py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py)
py_test
(
test_operator SRCS test_operator.py
)
py_test
(
test_operator SRCS test_operator.py
)
# py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py)
# py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py)
py_test
(
test_uniform_random_op SRCS test_uniform_random_op.py
)
py_test
(
test_uniform_random_op SRCS test_uniform_random_op.py
)
py_test
(
test_recurrent_op SRCS test_recurrent_op.py
)
python/paddle/v2/framework/tests/test_recurrent_op.py
浏览文件 @
1e828dc1
...
@@ -2,19 +2,74 @@ import logging
...
@@ -2,19 +2,74 @@ import logging
import
paddle.v2.framework.core
as
core
import
paddle.v2.framework.core
as
core
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
import
paddle.v2.framework.create_op_creation_methods
as
creation
from
paddle.v2.framework.op
import
Operator
ops
=
creation
.
op_creations
def
py_sigmoid
(
x
):
return
1.
/
(
1.
+
np
.
exp
(
-
x
))
def
create_tensor
(
scope
,
name
,
shape
):
class
PySimpleRNN
(
object
):
'''
A simple implementation of RNN based on numpy, to futhur test RecurrentOp's alogorithm
'''
def
__init__
(
self
,
input_dim
=
30
,
batch_size
=
50
,
weight_dim
=
15
,
sent_len
=
11
):
self
.
x
=
np
.
random
.
normal
(
size
=
(
sent_len
,
batch_size
,
input_dim
))
self
.
W
=
np
.
random
.
normal
(
size
=
(
input_dim
,
input_dim
))
self
.
U
=
np
.
random
.
normal
(
size
=
(
input_dim
,
input_dim
))
self
.
h_boot
=
np
.
random
.
normal
(
size
=
(
batch_size
,
input_dim
))
# memories
self
.
mems
=
[
np
.
zeros
(
shape
=
(
batch_size
,
input_dim
))
for
i
in
range
(
sent_len
)
]
def
forward
(
self
):
xs
=
self
.
segment_inputs
()
for
step_id
in
range
(
self
.
x
.
shape
[
0
]):
self
.
step
(
step_id
,
xs
[
step_id
])
return
self
.
concat_outputs
()
def
segment_inputs
(
self
):
return
[
self
.
x
[
i
]
for
i
in
range
(
self
.
x
.
shape
[
0
])]
def
concat_outputs
(
self
):
return
np
.
array
(
self
.
mems
)
def
step
(
self
,
step_id
,
x
):
'''
run a step
'''
mem
=
self
.
mems
[
step_id
]
if
step_id
>
0
:
pre_mem
=
self
.
mems
[
step_id
-
1
]
else
:
pre_mem
=
self
.
h_boot
xW
=
np
.
matmul
(
x
,
self
.
W
)
hU
=
np
.
matmul
(
mem
,
self
.
U
)
sum
=
xW
+
hU
self
.
mems
[
step_id
]
=
py_sigmoid
(
sum
)
class
PySimpleRNNTest
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
rnn
=
PySimpleRNN
()
def
test_forward
(
self
):
output
=
self
.
rnn
.
forward
()
print
'output'
,
output
def
create_tensor
(
scope
,
name
,
shape
,
np_data
):
tensor
=
scope
.
new_var
(
name
).
get_tensor
()
tensor
=
scope
.
new_var
(
name
).
get_tensor
()
tensor
.
set_dims
(
shape
)
tensor
.
set_dims
(
shape
)
tensor
.
set
(
np
.
random
.
random
(
shape
)
,
core
.
CPUPlace
())
tensor
.
set
(
np
_data
,
core
.
CPUPlace
())
return
tensor
return
tensor
class
TestR
NN
(
unittest
.
TestCase
):
class
TestR
ecurrentOp
(
unittest
.
TestCase
):
'''
'''
Test RNNOp
Test RNNOp
...
@@ -28,7 +83,7 @@ class TestRNN(unittest.TestCase):
...
@@ -28,7 +83,7 @@ class TestRNN(unittest.TestCase):
memories:
memories:
- h
- h
outputs:
outputs:
- h
- h
'''
'''
input_dim
=
30
input_dim
=
30
...
@@ -36,33 +91,45 @@ class TestRNN(unittest.TestCase):
...
@@ -36,33 +91,45 @@ class TestRNN(unittest.TestCase):
weight_dim
=
15
weight_dim
=
15
sent_len
=
11
sent_len
=
11
def
init
(
self
):
def
setUp
(
self
):
self
.
py_rnn
=
PySimpleRNN
(
self
.
input_dim
,
self
.
batch_size
,
self
.
weight_dim
,
self
.
sent_len
)
def
forward
(
self
):
self
.
scope
=
core
.
Scope
()
self
.
scope
=
core
.
Scope
()
self
.
create_global_variables
()
self
.
create_global_variables
()
self
.
create_step_net
()
self
.
create_step_net
()
rnn_op
=
self
.
create_rnn_op
()
rnn_op
=
self
.
create_rnn_op
()
ctx
=
core
.
DeviceContext
.
create
(
core
.
CPUPlace
())
ctx
=
core
.
DeviceContext
.
create
(
core
.
CPUPlace
())
print
'infer_shape'
rnn_op
.
infer_shape
(
self
.
scope
)
rnn_op
.
infer_shape
(
self
.
scope
)
rnn_op
.
run
(
self
.
scope
,
ctx
)
rnn_op
.
run
(
self
.
scope
,
ctx
)
return
np
.
array
(
self
.
scope
.
find_var
(
"h"
).
get_tensor
())
def
create_global_variables
(
self
):
def
create_global_variables
(
self
):
# create inlink
# create inlink
x_np_data
=
self
.
py_rnn
.
x
create_tensor
(
self
.
scope
,
"x"
,
create_tensor
(
self
.
scope
,
"x"
,
[
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
])
[
self
.
sent_len
,
self
.
batch_size
,
self
.
input_dim
],
create_tensor
(
self
.
scope
,
"W"
,
[
self
.
input_dim
,
self
.
input_dim
])
x_np_data
)
create_tensor
(
self
.
scope
,
"U"
,
[
self
.
input_dim
,
self
.
input_dim
])
W_np_data
=
self
.
py_rnn
.
W
create_tensor
(
self
.
scope
,
"h_boot"
,
[
self
.
batch_size
,
self
.
input_dim
])
create_tensor
(
self
.
scope
,
"W"
,
[
self
.
input_dim
,
self
.
input_dim
],
W_np_data
)
U_np_data
=
self
.
py_rnn
.
U
create_tensor
(
self
.
scope
,
"U"
,
[
self
.
input_dim
,
self
.
input_dim
],
U_np_data
)
h_boot_np_data
=
self
.
py_rnn
.
h_boot
create_tensor
(
self
.
scope
,
"h_boot"
,
[
self
.
batch_size
,
self
.
input_dim
],
h_boot_np_data
)
self
.
scope
.
new_var
(
"step_scopes"
)
self
.
scope
.
new_var
(
"step_scopes"
)
self
.
scope
.
new_var
(
"h@alias"
)
self
.
scope
.
new_var
(
"h@alias"
)
self
.
scope
.
new_var
(
"h"
)
self
.
scope
.
new_var
(
"h"
)
def
create_rnn_op
(
self
):
def
create_rnn_op
(
self
):
# create RNNOp
# create RNNOp
rnnop
=
ops
.
recurrent_op
(
rnnop
=
Operator
(
"recurrent_op"
,
# inputs
# inputs
inlinks
=
[
"x"
],
inlinks
=
[
"x"
],
boot_memories
=
[
"h_boot"
],
boot_memories
=
[
"h_boot"
],
...
@@ -81,17 +148,25 @@ class TestRNN(unittest.TestCase):
...
@@ -81,17 +148,25 @@ class TestRNN(unittest.TestCase):
var
=
self
.
scope
.
new_var
(
"stepnet"
)
var
=
self
.
scope
.
new_var
(
"stepnet"
)
stepnet
=
var
.
get_net
()
stepnet
=
var
.
get_net
()
x_fc_op
=
ops
.
fc
(
X
=
"x@alias"
,
W
=
"W"
,
Y
=
"Wx"
)
# x_fc_op = Operator("fc", X="x@alias", W="W", Y="Wx")
h_fc_op
=
ops
.
fc
(
X
=
"h@pre"
,
W
=
"U"
,
Y
=
"Uh"
)
# h_fc_op = Operator("fc", X="h@pre", W="U", Y="Uh")
sum_op
=
ops
.
add_two
(
X
=
"Wx"
,
Y
=
"Uh"
,
Out
=
"sum"
)
x_fc_op
=
Operator
(
"mul"
,
X
=
"x@alias"
,
Y
=
"W"
,
Out
=
"Wx"
)
sig_op
=
ops
.
sigmoid
(
X
=
"sum"
,
Y
=
"h@alias"
)
h_fc_op
=
Operator
(
"mul"
,
X
=
"h@pre"
,
Y
=
"U"
,
Out
=
"Uh"
)
sum_op
=
Operator
(
"add_two"
,
X
=
"Wx"
,
Y
=
"Uh"
,
Out
=
"sum"
)
sig_op
=
Operator
(
"sigmoid"
,
X
=
"sum"
,
Y
=
"h@alias"
)
for
op
in
[
x_fc_op
,
h_fc_op
,
sum_op
,
sig_op
]:
for
op
in
[
x_fc_op
,
h_fc_op
,
sum_op
,
sig_op
]:
stepnet
.
add_op
(
op
)
stepnet
.
add_op
(
op
)
stepnet
.
complete_add_op
(
True
)
stepnet
.
complete_add_op
(
True
)
def
test_recurrent
(
self
):
def
test_forward
(
self
):
self
.
init
()
print
'test recurrent op forward'
pd_output
=
self
.
forward
()
py_output
=
self
.
py_rnn
.
forward
()
print
'pd_output'
,
pd_output
print
print
'py_output'
,
py_output
self
.
assertEqual
(
pd_output
.
shape
,
py_output
.
shape
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录