Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
48b080db
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
694
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
48b080db
编写于
10月 05, 2017
作者:
Q
qijun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
ensure global BuddyAllocator is initialized before global Scope
上级
45c4dcaa
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
62 addition
and
53 deletion
+62
-53
paddle/framework/executor_test.cc
paddle/framework/executor_test.cc
+51
-43
paddle/operators/feed_op.cc
paddle/operators/feed_op.cc
+2
-2
paddle/operators/feed_op.h
paddle/operators/feed_op.h
+1
-1
paddle/operators/fetch_op.cc
paddle/operators/fetch_op.cc
+4
-3
paddle/operators/fetch_op.h
paddle/operators/fetch_op.h
+4
-4
未找到文件。
paddle/framework/executor_test.cc
浏览文件 @
48b080db
...
...
@@ -13,8 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/executor.h"
#include <memory> // for unique_ptr
#include <mutex> // for call_once
#include <vector>
#include "gtest/gtest.h"
#include "paddle/framework/attribute.h"
...
...
@@ -34,9 +32,8 @@ using namespace paddle::framework;
typedef
paddle
::
framework
::
BlockDesc
proto_block
;
typedef
paddle
::
framework
::
OpDesc
proto_op
;
void
add_gaussian_random_op
(
string
var_name
,
proto_block
*
block
)
{
std
::
vector
<
int
>
dim
{
2
,
3
};
void
add_gaussian_random_op
(
string
var_name
,
std
::
vector
<
int
>&
dim
,
proto_block
*
block
)
{
// insert variable
auto
a
=
block
->
add_vars
();
a
->
set_name
(
var_name
);
...
...
@@ -60,9 +57,8 @@ void add_gaussian_random_op(string var_name, proto_block* block) {
Out
->
add_arguments
(
var_name
);
}
void
add_feed_op
(
string
var_name
,
int
index
,
proto_block
*
block
)
{
std
::
vector
<
int
>
dim
{
3
};
void
add_feed_op
(
string
var_name
,
std
::
vector
<
int
>&
dim
,
int
index
,
proto_block
*
block
)
{
// insert variable
auto
a
=
block
->
add_vars
();
a
->
set_name
(
var_name
);
...
...
@@ -95,9 +91,8 @@ void add_feed_op(string var_name, int index, proto_block* block) {
Out
->
add_arguments
(
var_name
);
}
void
add_fetch_op
(
string
var_name
,
int
index
,
proto_block
*
block
)
{
std
::
vector
<
int
>
dim
{
3
};
void
add_fetch_op
(
string
var_name
,
std
::
vector
<
int
>&
dim
,
int
index
,
proto_block
*
block
)
{
// insert variable
auto
a
=
block
->
add_vars
();
a
->
set_name
(
var_name
);
...
...
@@ -138,20 +133,11 @@ void set_feed_variable(const std::vector<std::vector<T>>& inputs) {
Variable
*
g_feed_value
=
GetScope
()
->
FindVar
(
"feed_value"
);
FeedInputs
&
feed_inputs
=
*
(
g_feed_value
->
GetMutable
<
FeedInputs
>
());
auto
size
=
inputs
.
size
();
std
::
call_once
(
set_variable_flag
,
[
&
]()
{
feed_inputs
.
reserve
(
size
);
for
(
size_t
i
=
0
;
i
<
size
;
i
++
)
{
paddle
::
framework
::
Tensor
tmp
;
tmp
.
mutable_data
<
T
>
(
make_ddim
({
static_cast
<
int64_t
>
(
inputs
[
i
].
size
())}),
CPUPlace
());
feed_inputs
.
push_back
(
tmp
);
}
});
feed_inputs
.
resize
(
size
);
for
(
size_t
i
=
0
;
i
<
size
;
i
++
)
{
memcpy
(
feed_inputs
[
i
].
data
<
T
>
(),
inputs
[
i
].
data
(),
inputs
[
i
].
size
()
*
sizeof
(
T
));
T
*
dst
=
feed_inputs
[
i
].
mutable_data
<
T
>
(
make_ddim
({
static_cast
<
int64_t
>
(
inputs
[
i
].
size
())}),
CPUPlace
());
memcpy
(
dst
,
inputs
[
i
].
data
(),
inputs
[
i
].
size
()
*
sizeof
(
T
));
}
}
...
...
@@ -160,19 +146,17 @@ std::vector<std::vector<T>> get_fetch_variable() {
typedef
std
::
vector
<
paddle
::
framework
::
Tensor
>
FetchOutputs
;
Variable
*
g_fetch_value
=
GetScope
()
->
FindVar
(
"fetch_value"
);
FetchOutputs
&
fetch_outputs
=
*
(
g_fetch_value
->
GetMutable
<
FetchOutputs
>
());
auto
size
=
fetch_outputs
.
size
();
auto
size
=
fetch_outputs
.
size
();
std
::
vector
<
std
::
vector
<
T
>>
result
;
result
.
reserve
(
size
);
for
(
size_t
i
=
0
;
i
<
size
;
i
++
)
{
std
::
vector
<
T
>
tmp
;
tmp
.
res
erv
e
(
fetch_outputs
[
i
].
numel
());
tmp
.
res
iz
e
(
fetch_outputs
[
i
].
numel
());
memcpy
(
tmp
.
data
(),
fetch_outputs
[
i
].
data
<
T
>
(),
fetch_outputs
[
i
].
numel
()
*
sizeof
(
T
));
result
.
push_back
(
tmp
);
}
return
result
;
}
...
...
@@ -183,8 +167,9 @@ class ExecutorTesterRandom : public ::testing::Test {
root_block
->
set_idx
(
0
);
root_block
->
set_parent_idx
(
-
1
);
add_gaussian_random_op
(
"a"
,
root_block
);
add_gaussian_random_op
(
"b"
,
root_block
);
std
::
vector
<
int
>
dim
{
2
,
3
};
add_gaussian_random_op
(
"a"
,
dim
,
root_block
);
add_gaussian_random_op
(
"b"
,
dim
,
root_block
);
auto
c
=
root_block
->
add_vars
();
c
->
set_name
(
"c"
);
...
...
@@ -203,12 +188,11 @@ class ExecutorTesterRandom : public ::testing::Test {
Out
->
set_parameter
(
"Out"
);
Out
->
add_arguments
(
"c"
);
scope_
=
GetScope
(
);
add_fetch_op
(
"c"
,
dim
,
0
,
root_block
);
}
protected:
ProgramDesc
pdesc_
;
Scope
*
scope_
;
};
class
ExecutorTesterFeed
:
public
::
testing
::
Test
{
...
...
@@ -218,8 +202,10 @@ class ExecutorTesterFeed : public ::testing::Test {
root_block
->
set_idx
(
0
);
root_block
->
set_parent_idx
(
-
1
);
add_feed_op
(
"a"
,
0
,
root_block
);
add_feed_op
(
"b"
,
1
,
root_block
);
std
::
vector
<
int
>
dim
{
6
};
add_feed_op
(
"a"
,
dim
,
0
,
root_block
);
add_feed_op
(
"b"
,
dim
,
1
,
root_block
);
auto
c
=
root_block
->
add_vars
();
c
->
set_name
(
"c"
);
...
...
@@ -238,10 +224,10 @@ class ExecutorTesterFeed : public ::testing::Test {
Out
->
set_parameter
(
"Out"
);
Out
->
add_arguments
(
"c"
);
add_fetch_op
(
"c"
,
0
,
root_block
);
add_fetch_op
(
"c"
,
dim
,
0
,
root_block
);
std
::
vector
<
float
>
vec1
=
{
1.0
,
2.0
,
3.0
};
std
::
vector
<
float
>
vec2
=
{
4.0
,
5.0
,
6.0
};
std
::
vector
<
float
>
vec1
=
{
1.0
,
2.0
,
3.0
,
4.0
,
5.0
,
6.0
};
std
::
vector
<
float
>
vec2
=
{
4.0
,
5.0
,
6.0
,
7.0
,
8.0
,
9.0
};
inputs_
.
push_back
(
vec1
);
inputs_
.
push_back
(
vec2
);
}
...
...
@@ -253,12 +239,24 @@ class ExecutorTesterFeed : public ::testing::Test {
TEST_F
(
ExecutorTesterRandom
,
CPU
)
{
std
::
vector
<
Place
>
places
;
CPUPlace
cpu_place1
,
cpu_place2
;
places
.
push_back
(
cpu_place1
);
places
.
push_back
(
cpu_place2
);
CPUPlace
cpu_place
;
places
.
push_back
(
cpu_place
);
// We have a global Scope and BuddyAllocator, and we must ensure
// global BuddyAllocator is initialized before global Scope. Thus,
// global Scope will deconstruct before BuddyAllocator. Otherwise,
// "pointer being freed was not allocated" error will appear.
paddle
::
memory
::
Used
(
cpu_place
);
Executor
*
executor
=
new
Executor
(
places
);
executor
->
Run
(
pdesc_
,
scope_
);
executor
->
Run
(
pdesc_
,
GetScope
());
std
::
vector
<
std
::
vector
<
float
>>
result
=
get_fetch_variable
<
float
>
();
for
(
auto
&
vec
:
result
)
{
for
(
auto
&
num
:
vec
)
{
std
::
cout
<<
num
<<
" "
;
}
std
::
cout
<<
std
::
endl
;
}
delete
executor
;
}
...
...
@@ -267,6 +265,12 @@ TEST_F(ExecutorTesterFeed, CPU) {
CPUPlace
cpu_place
;
places
.
push_back
(
cpu_place
);
// We have a global Scope and BuddyAllocator, and we must ensure
// global BuddyAllocator is initialized before global Scope. Thus,
// global Scope will deconstruct before BuddyAllocator. Otherwise,
// "pointer being freed was not allocated" error will appear.
paddle
::
memory
::
Used
(
cpu_place
);
Executor
*
executor
=
new
Executor
(
places
);
// 3 mini-batch
...
...
@@ -293,8 +297,10 @@ TEST_F(ExecutorTesterRandom, GPU) {
GPUPlace
gpu_place
(
0
);
places
.
push_back
(
gpu_place
);
paddle
::
memory
::
Used
(
gpu_place
);
Executor
*
executor
=
new
Executor
(
places
);
executor
->
Run
(
pdesc_
,
scope_
);
executor
->
Run
(
pdesc_
,
GetScope
()
);
delete
executor
;
}
...
...
@@ -303,11 +309,13 @@ TEST_F(ExecutorTesterFeed, GPU) {
GPUPlace
gpu_place
(
0
);
places
.
push_back
(
gpu_place
);
paddle
::
memory
::
Used
(
gpu_place
);
Executor
*
executor
=
new
Executor
(
places
);
// need to set feed variable before Executor::Run
set_feed_variable
<
float
>
(
inputs_
);
executor
->
Run
(
pdesc_
,
scope_
);
executor
->
Run
(
pdesc_
,
GetScope
()
);
delete
executor
;
}
...
...
paddle/operators/feed_op.cc
浏览文件 @
48b080db
...
...
@@ -29,11 +29,11 @@ class FeedOp : public framework::OperatorWithKernel {
framework
::
Variable
*
g_feed_variable
=
framework
::
GetScope
()
->
FindVar
(
"feed_value"
);
FeedInputs
tensors
=
g_feed_variable
->
Get
<
FeedInputs
>
();
const
FeedInputs
&
tensors
=
g_feed_variable
->
Get
<
FeedInputs
>
();
auto
in_dim
=
tensors
[
col
].
dims
();
ctx
->
SetOutputDim
(
"Out"
,
in_dim
);
// need to handle LodTensor later
//
TODO(qijun)
need to handle LodTensor later
}
framework
::
DataType
IndicateDataType
(
...
...
paddle/operators/feed_op.h
浏览文件 @
48b080db
...
...
@@ -31,7 +31,7 @@ class FeedKernel : public framework::OpKernel<T> {
framework
::
Variable
*
g_feed_variable
=
framework
::
GetScope
()
->
FindVar
(
"feed_value"
);
int
col
=
ctx
.
template
Attr
<
int
>(
"col"
);
FeedInputs
tensors
=
g_feed_variable
->
Get
<
FeedInputs
>
();
const
FeedInputs
&
tensors
=
g_feed_variable
->
Get
<
FeedInputs
>
();
out
->
CopyFrom
<
T
>
(
tensors
[
col
],
ctx
.
GetPlace
());
}
};
...
...
paddle/operators/fetch_op.cc
浏览文件 @
48b080db
...
...
@@ -30,15 +30,16 @@ class FetchOp : public framework::OperatorWithKernel {
framework
::
GetScope
()
->
FindVar
(
"fetch_value"
);
FetchOutputs
*
tensors
=
g_fetch_variable
->
GetMutable
<
FetchOutputs
>
();
if
(
tensors
->
size
()
<
col
)
{
tensors
->
resize
(
col
);
if
(
tensors
->
size
()
<
static_cast
<
size_t
>
(
col
+
1
)
)
{
tensors
->
resize
(
col
+
1
);
}
auto
input_dim
=
ctx
->
GetInputDim
(
"Input"
);
framework
::
Tensor
tmp
;
tmp
.
Resize
(
input_dim
);
(
*
tensors
)[
col
].
Resize
(
input_dim
);
// need to handle LodTensor later
// TODO(qijun) need to handle LodTensor later
}
framework
::
DataType
IndicateDataType
(
...
...
paddle/operators/fetch_op.h
浏览文件 @
48b080db
...
...
@@ -26,13 +26,13 @@ class FetchKernel : public framework::OpKernel<T> {
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
typedef
std
::
vector
<
framework
::
Tensor
>
FetchOutputs
;
Tensor
*
input
=
ctx
.
Out
put
<
Tensor
>
(
"Input"
);
const
Tensor
*
input
=
ctx
.
In
put
<
Tensor
>
(
"Input"
);
int
col
=
ctx
.
template
Attr
<
int
>(
"col"
);
framework
::
Variable
*
g_fetch_variable
=
framework
::
GetScope
()
->
FindVar
(
"fetch_value"
);
FetchOutputs
tensors
=
g_fetch_variable
->
Get
<
FetchOutputs
>
();
tensors
[
col
].
mutable_data
<
T
>
(
platform
::
CPUPlace
());
tensors
[
col
].
CopyFrom
<
T
>
(
*
input
,
platform
::
CPUPlace
());
FetchOutputs
*
tensors
=
g_fetch_variable
->
GetMutable
<
FetchOutputs
>
();
(
*
tensors
)
[
col
].
mutable_data
<
T
>
(
platform
::
CPUPlace
());
(
*
tensors
)
[
col
].
CopyFrom
<
T
>
(
*
input
,
platform
::
CPUPlace
());
}
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录