Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Honmaple
Mace
提交
d9406dd3
Mace
项目概览
Honmaple
/
Mace
与 Fork 源项目一致
Fork自
Xiaomi / Mace
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
Mace
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
前往新版Gitcode,体验更适合开发者的 AI 搜索 >>
提交
d9406dd3
编写于
5月 30, 2019
作者:
卢
卢旭辉
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'bug-in-opencl-runtime' into 'm0.11'
Fix some bugs. See merge request !1127
上级
61de4dab
9f448aa0
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
65 addition
and
85 deletion
+65
-85
mace/core/net_def_adapter.cc
mace/core/net_def_adapter.cc
+64
-76
mace/ops/matmul.cc
mace/ops/matmul.cc
+0
-8
mace/tools/git/gen_version_source.sh
mace/tools/git/gen_version_source.sh
+1
-1
未找到文件。
mace/core/net_def_adapter.cc
浏览文件 @
d9406dd3
...
...
@@ -50,15 +50,6 @@ std::string TransformedName(const std::string &input_name,
return
ss
.
str
();
}
#ifdef MACE_ENABLE_OPENCL
bool
TransformRequiredOp
(
const
std
::
string
&
op_type
)
{
static
const
std
::
unordered_set
<
std
::
string
>
kNoTransformOp
=
{
"Shape"
,
"InferConv2dShape"
};
return
kNoTransformOp
.
count
(
op_type
)
==
0
;
}
#endif // MACE_ENABLE_OPENCL
void
BuildTransposeOpDef
(
const
std
::
string
&
input_name
,
const
std
::
string
&
output_name
,
...
...
@@ -514,76 +505,73 @@ MaceStatus NetDefAdapter::AdaptMemoryType(
// (only support one kind of memory type for multiple outputs)
op_registry_
->
GetInOutMemoryTypes
(
op_def
->
type
(),
context
);
#ifdef MACE_ENABLE_OPENCL
// if op is memory-unused op, no transformation
if
(
TransformRequiredOp
(
op_def
->
type
()))
{
int
input_size
=
op_def
->
input_size
();
for
(
int
i
=
0
;
i
<
input_size
;
++
i
)
{
if
(
output_map
->
count
(
op_def
->
input
(
i
))
==
0
)
{
MACE_CHECK
(
ws_
->
GetTensor
(
op_def
->
input
(
i
))
!=
nullptr
&&
ws_
->
GetTensor
(
op_def
->
input
(
i
))
->
is_weight
(),
"Tensor "
,
op_def
->
input
(
i
),
" of "
,
op_def
->
name
(),
" not allocated"
);
continue
;
}
auto
&
input_info
=
output_map
->
at
(
op_def
->
input
(
i
));
// check whether to do transform
MemoryType
src_mem_type
=
input_info
.
mem_type
;
MemoryType
dst_mem_type
=
context
->
GetInputMemType
(
i
);
auto
wanted_input_dtype
=
context
->
GetInputDataType
(
i
);
if
(
src_mem_type
!=
dst_mem_type
||
(
input_info
.
dtype
!=
wanted_input_dtype
&&
(
src_mem_type
!=
MemoryType
::
CPU_BUFFER
||
dst_mem_type
!=
MemoryType
::
CPU_BUFFER
)))
{
auto
transformed_name
=
TransformedName
(
op_def
->
input
(
i
),
"mem_type"
,
dst_mem_type
);
// check whether the tensor has been transformed
if
(
transformed_set
->
count
(
transformed_name
)
==
0
)
{
VLOG
(
1
)
<<
"Add Transform operation "
<<
op_def
->
name
()
<<
" to transform tensor "
<<
op_def
->
input
(
i
)
<<
"', from memory type "
<<
input_info
.
mem_type
<<
" to "
<<
dst_mem_type
;
OperatorDef
*
transformed_op_def
=
target_net_def
->
add_op
();
OpenCLUtil
::
BuildTransformOpDef
(
op_def
->
input
(
i
),
input_info
.
shape
,
transformed_name
,
wanted_input_dtype
,
context
->
GetInputOpenCLBufferType
(
i
),
dst_mem_type
,
input_info
.
data_format
,
transformed_op_def
);
// set data format arg
SetProtoArg
<
int
>
(
transformed_op_def
,
"data_format"
,
static_cast
<
int
>
(
input_info
.
data_format
));
// set output memory type argument
SetProtoArg
<
int
>
(
transformed_op_def
,
OutputMemoryTypeTagName
(),
dst_mem_type
);
int
input_size
=
op_def
->
input_size
();
for
(
int
i
=
0
;
i
<
input_size
;
++
i
)
{
if
(
output_map
->
count
(
op_def
->
input
(
i
))
==
0
)
{
MACE_CHECK
(
ws_
->
GetTensor
(
op_def
->
input
(
i
))
!=
nullptr
&&
ws_
->
GetTensor
(
op_def
->
input
(
i
))
->
is_weight
(),
"Tensor "
,
op_def
->
input
(
i
),
" of "
,
op_def
->
name
(),
" not allocated"
);
continue
;
}
auto
&
input_info
=
output_map
->
at
(
op_def
->
input
(
i
));
// check whether to do transform
MemoryType
src_mem_type
=
input_info
.
mem_type
;
MemoryType
dst_mem_type
=
context
->
GetInputMemType
(
i
);
auto
wanted_input_dtype
=
context
->
GetInputDataType
(
i
);
if
(
src_mem_type
!=
dst_mem_type
||
(
input_info
.
dtype
!=
wanted_input_dtype
&&
(
src_mem_type
!=
MemoryType
::
CPU_BUFFER
||
dst_mem_type
!=
MemoryType
::
CPU_BUFFER
)))
{
auto
transformed_name
=
TransformedName
(
op_def
->
input
(
i
),
"mem_type"
,
dst_mem_type
);
// check whether the tensor has been transformed
if
(
transformed_set
->
count
(
transformed_name
)
==
0
)
{
VLOG
(
1
)
<<
"Add Transform operation "
<<
op_def
->
name
()
<<
" to transform tensor "
<<
op_def
->
input
(
i
)
<<
"', from memory type "
<<
input_info
.
mem_type
<<
" to "
<<
dst_mem_type
;
OperatorDef
*
transformed_op_def
=
target_net_def
->
add_op
();
OpenCLUtil
::
BuildTransformOpDef
(
op_def
->
input
(
i
),
input_info
.
shape
,
transformed_name
,
wanted_input_dtype
,
context
->
GetInputOpenCLBufferType
(
i
),
dst_mem_type
,
input_info
.
data_format
,
transformed_op_def
);
// set data format arg
SetProtoArg
<
int
>
(
transformed_op_def
,
"data_format"
,
static_cast
<
int
>
(
input_info
.
data_format
));
// set output memory type argument
SetProtoArg
<
int
>
(
transformed_op_def
,
OutputMemoryTypeTagName
(),
dst_mem_type
);
// update tensor consumer information
output_map
->
at
(
op_def
->
input
(
i
)).
consumer_op_indices
.
push_back
(
target_net_def
->
op_size
()
-
1
);
// update tensor consumer information
output_map
->
at
(
op_def
->
input
(
i
)).
consumer_op_indices
.
push_back
(
target_net_def
->
op_size
()
-
1
);
// update output information map
output_map
->
emplace
(
transformed_name
,
InternalOutputInfo
(
dst_mem_type
,
context
->
GetInputDataType
(
i
),
input_info
.
data_format
,
input_info
.
shape
,
target_net_def
->
op_size
()
-
1
));
// update tensor shape map
tensor_shape_map
->
emplace
(
transformed_name
,
input_info
.
shape
);
// record transformed tensors
transformed_set
->
insert
(
transformed_name
);
}
// update original op_def's input
op_def
->
set_input
(
i
,
transformed_name
);
// update output information map
output_map
->
emplace
(
transformed_name
,
InternalOutputInfo
(
dst_mem_type
,
context
->
GetInputDataType
(
i
),
input_info
.
data_format
,
input_info
.
shape
,
target_net_def
->
op_size
()
-
1
));
// update tensor shape map
tensor_shape_map
->
emplace
(
transformed_name
,
input_info
.
shape
);
// record transformed tensors
transformed_set
->
insert
(
transformed_name
);
}
// update original op_def's input
op_def
->
set_input
(
i
,
transformed_name
);
}
}
#else
...
...
mace/ops/matmul.cc
浏览文件 @
d9406dd3
...
...
@@ -602,14 +602,6 @@ void RegisterMatMul(OpRegistryBase *op_registry) {
DeviceType
::
CPU
,
uint8_t
);
#endif // MACE_ENABLE_QUANTIZE
#ifdef MACE_ENABLE_OPENCL
MACE_REGISTER_OP
(
op_registry
,
"MatMul"
,
MatMulOp
,
DeviceType
::
GPU
,
float
);
MACE_REGISTER_OP
(
op_registry
,
"MatMul"
,
MatMulOp
,
DeviceType
::
GPU
,
half
);
#endif // MACE_ENABLE_OPENCL
#if defined(MACE_ENABLE_NEON) && defined(__ANDROID__)
MACE_REGISTER_OP
(
op_registry
,
"MatMul"
,
MatMulOp
,
DeviceType
::
CPU
,
float16_t
);
...
...
mace/tools/git/gen_version_source.sh
浏览文件 @
d9406dd3
...
...
@@ -28,7 +28,7 @@ fi
mkdir
-p
$OUTPUT_DIR
GIT_VERSION
=
$(
git
--git-dir
=
${
MACE_SOURCE_DIR
}
/.git
--work-tree
=
${
MACE_SOURCE_DIR
}
describe
--long
--tags
)
GIT_VERSION
=
$(
git
--git-dir
=
${
MACE_SOURCE_DIR
}
/.git
--work-tree
=
${
MACE_SOURCE_DIR
}
describe
--long
--tags
--abbrev
=
7
)
if
[[
$?
!=
0
]]
;
then
GIT_VERSION
=
unknown
else
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录