Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
12858baa
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
12858baa
编写于
11月 14, 2017
作者:
D
Dong Zhihong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
"relauch ci"
上级
fc117ecf
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
28 addition
and
9 deletion
+28
-9
paddle/operators/accuracy_op.cu
paddle/operators/accuracy_op.cu
+24
-5
python/paddle/v2/framework/evaluator.py
python/paddle/v2/framework/evaluator.py
+4
-4
未找到文件。
paddle/operators/accuracy_op.cu
浏览文件 @
12858baa
...
@@ -24,7 +24,8 @@ using platform::PADDLE_CUDA_NUM_THREADS;
...
@@ -24,7 +24,8 @@ using platform::PADDLE_CUDA_NUM_THREADS;
template
<
int
BlockSize
>
template
<
int
BlockSize
>
__global__
void
AccuracyCudaKernel
(
const
int
N
,
const
int
D
,
__global__
void
AccuracyCudaKernel
(
const
int
N
,
const
int
D
,
const
int64_t
*
Xdata
,
const
int64_t
*
Xdata
,
const
int64_t
*
labeldata
,
float
*
accuracy
)
{
const
int64_t
*
labeldata
,
int
*
correct_data
,
float
*
accuracy
)
{
int
count
=
0
;
int
count
=
0
;
__shared__
int
total
[
BlockSize
];
__shared__
int
total
[
BlockSize
];
...
@@ -43,6 +44,7 @@ __global__ void AccuracyCudaKernel(const int N, const int D,
...
@@ -43,6 +44,7 @@ __global__ void AccuracyCudaKernel(const int N, const int D,
// reduce the count with init value 0, and output accuracy.
// reduce the count with init value 0, and output accuracy.
int
result
=
thrust
::
reduce
(
thrust
::
device
,
total
,
total
+
BlockSize
,
0
);
int
result
=
thrust
::
reduce
(
thrust
::
device
,
total
,
total
+
BlockSize
,
0
);
if
(
threadIdx
.
x
==
0
)
{
if
(
threadIdx
.
x
==
0
)
{
*
correct_data
=
result
;
*
accuracy
=
static_cast
<
float
>
(
result
)
/
static_cast
<
float
>
(
N
);
*
accuracy
=
static_cast
<
float
>
(
result
)
/
static_cast
<
float
>
(
N
);
}
}
}
}
...
@@ -56,31 +58,48 @@ class AccuracyOpCUDAKernel : public framework::OpKernel<T> {
...
@@ -56,31 +58,48 @@ class AccuracyOpCUDAKernel : public framework::OpKernel<T> {
auto
*
inference
=
ctx
.
Input
<
Tensor
>
(
"Out"
);
auto
*
inference
=
ctx
.
Input
<
Tensor
>
(
"Out"
);
auto
*
indices
=
ctx
.
Input
<
Tensor
>
(
"Indices"
);
auto
*
indices
=
ctx
.
Input
<
Tensor
>
(
"Indices"
);
auto
*
label
=
ctx
.
Input
<
Tensor
>
(
"Label"
);
auto
*
label
=
ctx
.
Input
<
Tensor
>
(
"Label"
);
auto
*
accuracy
=
ctx
.
Output
<
Tensor
>
(
"Accuracy"
);
auto
*
accuracy
=
ctx
.
Output
<
Tensor
>
(
"Accuracy"
);
auto
*
correct
=
ctx
.
Output
<
Tensor
>
(
"Correct"
);
auto
*
total
=
ctx
.
Output
<
Tensor
>
(
"Total"
);
// FIXME(typhoonzero): only support indices currently
// FIXME(typhoonzero): only support indices currently
// if add support for output values, how to detect the data type?
// if add support for output values, how to detect the data type?
const
int64_t
*
indices_data
=
indices
->
data
<
int64_t
>
();
const
int64_t
*
indices_data
=
indices
->
data
<
int64_t
>
();
const
int64_t
*
label_data
=
label
->
data
<
int64_t
>
();
const
int64_t
*
label_data
=
label
->
data
<
int64_t
>
();
int
*
correct_data
=
correct
->
mutable_data
<
int
>
(
ctx
.
GetPlace
());
int
*
total_data
=
total
->
mutable_data
<
int
>
(
ctx
.
GetPlace
());
float
*
accuracy_data
=
accuracy
->
mutable_data
<
float
>
(
ctx
.
GetPlace
());
float
*
accuracy_data
=
accuracy
->
mutable_data
<
float
>
(
ctx
.
GetPlace
());
size_t
num_samples
=
inference
->
dims
()[
0
]
;
int
num_samples
=
static_cast
<
int
>
(
inference
->
dims
()[
0
])
;
size_t
infer_width
=
inference
->
dims
()[
1
];
size_t
infer_width
=
inference
->
dims
()[
1
];
PADDLE_ENFORCE
(
cudaMemset
(
accuracy_data
,
0
,
sizeof
(
float
)));
PADDLE_ENFORCE
(
cudaMemset
(
accuracy_data
,
0
,
sizeof
(
float
)));
// cudaMemset((void**)&correct_data, 0, sizeof(float));
if
(
num_samples
==
0
)
{
if
(
num_samples
==
0
)
{
return
;
return
;
}
}
cudaMemcpy
(
total_data
,
&
num_samples
,
sizeof
(
int
),
cudaMemcpyHostToDevice
);
AccuracyCudaKernel
<
PADDLE_CUDA_NUM_THREADS
><<<
AccuracyCudaKernel
<
PADDLE_CUDA_NUM_THREADS
><<<
1
,
PADDLE_CUDA_NUM_THREADS
,
0
,
ctx
.
cuda_device_context
().
stream
()
>>>
(
1
,
PADDLE_CUDA_NUM_THREADS
,
0
,
ctx
.
cuda_device_context
().
stream
()
>>>
(
num_samples
,
infer_width
,
indices_data
,
label_data
,
accuracy_data
);
num_samples
,
infer_width
,
indices_data
,
label_data
,
correct_data
,
accuracy_data
);
int
d_num_samples
,
d_num_correct
;
float
d_accuracy
;
cudaMemcpy
(
&
d_num_correct
,
correct_data
,
sizeof
(
int
),
cudaMemcpyDeviceToHost
);
cudaMemcpy
(
&
d_num_samples
,
total_data
,
sizeof
(
int
),
cudaMemcpyDeviceToHost
);
cudaMemcpy
(
&
d_accuracy
,
accuracy_data
,
sizeof
(
float
),
cudaMemcpyDeviceToHost
);
}
}
};
};
}
// namespace operators
}
// namespace operators
}
// namespace paddle
}
// namespace paddle
// FIXME(typhoonzero): types of T is for infer
ne
ce data.
// FIXME(typhoonzero): types of T is for infer
en
ce data.
// label data is always int
// label data is always int
64
REGISTER_OP_GPU_KERNEL
(
accuracy
,
paddle
::
operators
::
AccuracyOpCUDAKernel
<
float
>
,
REGISTER_OP_GPU_KERNEL
(
accuracy
,
paddle
::
operators
::
AccuracyOpCUDAKernel
<
float
>
,
paddle
::
operators
::
AccuracyOpCUDAKernel
<
double
>
);
paddle
::
operators
::
AccuracyOpCUDAKernel
<
double
>
);
python/paddle/v2/framework/evaluator.py
浏览文件 @
12858baa
...
@@ -43,7 +43,7 @@ class Evaluator(object):
...
@@ -43,7 +43,7 @@ class Evaluator(object):
"""
"""
Clear metric states at the begin of each pass/user specified batch
Clear metric states at the begin of each pass/user specified batch
"""
"""
if
program
==
None
:
if
reset_
program
==
None
:
reset_program
=
Program
()
reset_program
=
Program
()
else
:
else
:
reset_program
=
program
reset_program
=
program
...
@@ -147,9 +147,9 @@ class Accuracy(Evaluator):
...
@@ -147,9 +147,9 @@ class Accuracy(Evaluator):
return
acc_out
return
acc_out
def
eval
(
self
,
executor
,
program
=
None
):
def
eval
(
self
,
executor
,
eval_
program
=
None
):
if
program
!=
None
:
if
eval_
program
!=
None
:
eval_program
=
program
eval_program
=
eval_
program
else
:
else
:
eval_program
=
Program
()
eval_program
=
Program
()
block
=
eval_program
.
global_block
()
block
=
eval_program
.
global_block
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录