Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
as350144
Mace
提交
efbb9eb7
Mace
项目概览
as350144
/
Mace
与 Fork 源项目一致
Fork自
Xiaomi / Mace
通知
2
Star
1
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
Mace
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
efbb9eb7
编写于
11月 07, 2017
作者:
L
Liangliang He
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add OpenCL ResizeBilinear empty kernel
上级
b73355bd
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
125 addition
and
29 deletion
+125
-29
mace/kernels/opencl/resize_bilinear_opencl.cc
mace/kernels/opencl/resize_bilinear_opencl.cc
+16
-0
mace/kernels/resize_bilinear.h
mace/kernels/resize_bilinear.h
+34
-13
mace/ops/resize_bilinear.cc
mace/ops/resize_bilinear.cc
+3
-0
mace/ops/resize_bilinear.h
mace/ops/resize_bilinear.h
+3
-16
mace/ops/resize_bilinear_benchmark.cc
mace/ops/resize_bilinear_benchmark.cc
+69
-0
未找到文件。
mace/kernels/opencl/resize_bilinear_opencl.cc
0 → 100644
浏览文件 @
efbb9eb7
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#include "mace/kernels/resize_bilinear.h"
#include "mace/core/tensor.h"
namespace
mace
{
namespace
kernels
{
template
<
>
void
ResizeBilinearFunctor
<
DeviceType
::
OPENCL
,
float
>::
operator
()(
const
Tensor
*
input
,
const
Tensor
*
resize_dims
,
Tensor
*
output
)
{}
}
// namespace kernels
}
// namespace mace
mace/kernels/resize_bilinear.h
浏览文件 @
efbb9eb7
...
...
@@ -106,16 +106,33 @@ struct ResizeBilinearFunctor {
ResizeBilinearFunctor
(
bool
align_corners
)
:
align_corners_
(
align_corners
)
{}
void
operator
()(
const
T
*
input
,
T
*
output
,
index_t
n
,
index_t
channels
,
index_t
in_height
,
index_t
in_width
,
index_t
out_height
,
index_t
out_width
)
{
void
operator
()(
const
Tensor
*
input
,
const
Tensor
*
resize_dims
,
Tensor
*
output
)
{
index_t
n
=
input
->
dim
(
0
);
index_t
channels
=
input
->
dim
(
1
);
index_t
in_height
=
input
->
dim
(
2
);
index_t
in_width
=
input
->
dim
(
3
);
index_t
out_height
;
index_t
out_width
;
{
MACE_CHECK
(
resize_dims
->
dim_size
()
==
1
);
Tensor
::
MappingGuard
resize_dims_mapper
(
resize_dims
);
auto
dims_data
=
resize_dims
->
data
<
index_t
>
();
out_height
=
dims_data
[
0
];
out_width
=
dims_data
[
1
];
}
vector
<
index_t
>
out_shape
{
n
,
channels
,
out_height
,
out_width
};
output
->
Resize
(
out_shape
);
const
T
*
input_data
=
input
->
data
<
T
>
();
T
*
output_data
=
output
->
mutable_data
<
T
>
();
if
(
out_height
==
in_height
&&
out_width
==
in_width
)
{
std
::
copy
(
input
,
input
+
channels
*
in_height
*
in_width
,
output
);
std
::
copy
(
input_data
,
input_data
+
channels
*
in_height
*
in_width
,
output_data
);
return
;
}
...
...
@@ -131,12 +148,16 @@ struct ResizeBilinearFunctor {
ComputeInterpolationWeights
(
out_height
,
in_height
,
height_scale
,
ys
.
data
());
ComputeInterpolationWeights
(
out_width
,
in_width
,
width_scale
,
xs
.
data
());
ResizeImage
(
input
,
n
,
in_height
,
in_width
,
out_height
,
out_width
,
channels
,
xs
,
ys
,
output
);
ResizeImage
(
input
_data
,
n
,
in_height
,
in_width
,
out_height
,
out_width
,
channels
,
xs
,
ys
,
output_data
);
}
};
}
// namespace kernels
}
// namespace mace
template
<
>
void
ResizeBilinearFunctor
<
DeviceType
::
OPENCL
,
float
>::
operator
()(
const
Tensor
*
input
,
const
Tensor
*
resize_dims
,
Tensor
*
output
);
}
// namespace kernels
}
// namespace mace
#endif // MACE_KERNELS_RESIZE_BILINEAR_H_
mace/ops/resize_bilinear.cc
浏览文件 @
efbb9eb7
...
...
@@ -13,4 +13,7 @@ REGISTER_NEON_OPERATOR(ResizeBilinear,
ResizeBilinearOp
<
DeviceType
::
NEON
,
float
>
);
#endif // __ARM_NEON
REGISTER_OPENCL_OPERATOR
(
ResizeBilinear
,
ResizeBilinearOp
<
DeviceType
::
OPENCL
,
float
>
);
}
// namespace mace
mace/ops/resize_bilinear.h
浏览文件 @
efbb9eb7
...
...
@@ -21,28 +21,15 @@ class ResizeBilinearOp : public Operator<D, T> {
bool
Run
()
override
{
const
Tensor
*
input
=
this
->
Input
(
0
);
const
Tensor
*
resize_dims
=
this
->
Input
(
1
);
Tensor
*
output
=
this
->
Output
(
0
);
MACE_CHECK
(
input
->
dim_size
()
==
4
,
"input must be 4-dimensional."
,
input
->
dim_size
());
MACE_CHECK
(
resize_dims
->
dim_size
()
==
1
,
"resize dim must be 2-dimensional."
,
resize_dims
->
dim_size
());
Tensor
*
output
=
this
->
Output
(
0
);
index_t
n
=
input
->
dim
(
0
);
index_t
channels
=
input
->
dim
(
1
);
index_t
in_height
=
input
->
dim
(
2
);
index_t
in_width
=
input
->
dim
(
3
);
index_t
out_height
=
resize_dims
->
data
<
index_t
>
()[
0
];
index_t
out_width
=
resize_dims
->
data
<
index_t
>
()[
1
];
vector
<
index_t
>
out_shape
{
n
,
channels
,
out_height
,
out_width
};
output
->
Resize
(
out_shape
);
const
T
*
input_ptr
=
input
->
data
<
T
>
();
T
*
output_ptr
=
output
->
mutable_data
<
T
>
();
functor_
(
input_ptr
,
output_ptr
,
n
,
channels
,
in_height
,
in_width
,
out_height
,
out_width
);
functor_
(
input
,
resize_dims
,
output
);
return
true
;
}
...
...
@@ -50,6 +37,6 @@ class ResizeBilinearOp : public Operator<D, T> {
kernels
::
ResizeBilinearFunctor
<
D
,
T
>
functor_
;
};
}
//
namespace mace
}
// namespace mace
#endif // MACE_RESIZE_BILINEAR_H
mace/ops/resize_bilinear_benchmark.cc
0 → 100644
浏览文件 @
efbb9eb7
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#include <string>
#include "mace/core/operator.h"
#include "mace/core/testing/test_benchmark.h"
#include "mace/ops/ops_test_util.h"
namespace
mace
{
template
<
DeviceType
D
,
typename
T
>
static
void
ResizeBilinearBenchmark
(
int
iters
,
int
batch
,
int
channels
,
int
input_height
,
int
input_width
,
int
output_height
,
int
output_width
)
{
mace
::
testing
::
StopTiming
();
OpsTestNet
net
;
OpDefBuilder
(
"ResizeBilinear"
,
"ResizeBilinearBenchmark"
)
.
Input
(
"Input"
)
.
Input
(
"OutSize"
)
.
Output
(
"Output"
)
.
Finalize
(
net
.
NewOperatorDef
());
// Add input data
net
.
AddRandomInput
<
DeviceType
::
CPU
,
float
>
(
"Input"
,
{
batch
,
channels
,
input_height
,
input_width
});
net
.
AddInputFromArray
<
DeviceType
::
CPU
,
index_t
>
(
"OutSize"
,
{
2
},
{
output_height
,
output_width
});
// Warm-up
for
(
int
i
=
0
;
i
<
5
;
++
i
)
{
net
.
RunOp
(
D
);
}
mace
::
testing
::
StartTiming
();
while
(
iters
--
)
{
net
.
RunOp
(
D
);
}
}
#define BM_RESIZE_BILINEAR_MACRO(N, C, H0, W0, H1, W1, TYPE, DEVICE) \
static void \
BM_RESIZE_BILINEAR_##N##_##C##_##H0##_##W0##_##H1##_##W1##_##TYPE##_##DEVICE( \
int iters) { \
const int64_t tot = static_cast<int64_t>(iters) * N * C * H1 * W1; \
mace::testing::ItemsProcessed(tot); \
mace::testing::BytesProcessed(tot *(sizeof(TYPE))); \
ResizeBilinearBenchmark<DEVICE, TYPE>(iters, N, C, H0, W0, H1, W1); \
} \
BENCHMARK( \
BM_RESIZE_BILINEAR_##N##_##C##_##H0##_##W0##_##H1##_##W1##_##TYPE##_##DEVICE)
#define BM_RESIZE_BILINEAR(N, C, H0, W0, H1, W1, TYPE) \
BM_RESIZE_BILINEAR_MACRO(N, C, H0, W0, H1, W1, TYPE, CPU); \
BM_RESIZE_BILINEAR_MACRO(N, C, H0, W0, H1, W1, TYPE, NEON); \
BM_RESIZE_BILINEAR_MACRO(N, C, H0, W0, H1, W1, TYPE, OPENCL);
BM_RESIZE_BILINEAR
(
1
,
256
,
7
,
7
,
15
,
15
,
float
);
BM_RESIZE_BILINEAR
(
1
,
256
,
15
,
15
,
30
,
30
,
float
);
BM_RESIZE_BILINEAR
(
1
,
128
,
30
,
30
,
60
,
60
,
float
);
BM_RESIZE_BILINEAR
(
1
,
128
,
240
,
240
,
480
,
480
,
float
);
BM_RESIZE_BILINEAR
(
1
,
3
,
4032
,
3016
,
480
,
480
,
float
);
BM_RESIZE_BILINEAR
(
1
,
3
,
480
,
480
,
4032
,
3016
,
float
);
}
// namespace mace
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录