Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Xiaomi
Mace
提交
f7bb2064
Mace
项目概览
Xiaomi
/
Mace
通知
106
Star
40
Fork
27
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
Mace
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
前往新版Gitcode,体验更适合开发者的 AI 搜索 >>
提交
f7bb2064
编写于
8月 30, 2017
作者:
李
李寅
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add deserializer to parse tensor from proto
上级
16086da9
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
234 addition
and
9 deletion
+234
-9
mace/core/allocator.cc
mace/core/allocator.cc
+9
-0
mace/core/allocator.h
mace/core/allocator.h
+7
-1
mace/core/integral_types.h
mace/core/integral_types.h
+2
-2
mace/core/operator.h
mace/core/operator.h
+1
-1
mace/core/serializer.cc
mace/core/serializer.cc
+78
-0
mace/core/serializer.h
mace/core/serializer.h
+28
-0
mace/core/tensor.h
mace/core/tensor.h
+36
-0
mace/core/workspace.cc
mace/core/workspace.cc
+7
-1
mace/core/workspace.h
mace/core/workspace.h
+3
-1
mace/examples/BUILD
mace/examples/BUILD
+1
-1
mace/examples/helloworld.cc
mace/examples/helloworld.cc
+62
-2
未找到文件。
mace/core/allocator.cc
浏览文件 @
f7bb2064
...
...
@@ -15,4 +15,13 @@ void SetCPUAllocator(CPUAllocator* alloc) {
g_cpu_allocator
.
reset
(
alloc
);
}
Allocator
*
GetDeviceAllocator
(
DeviceType
type
)
{
if
(
type
==
DeviceType
::
CPU
)
{
return
cpu_allocator
();
}
else
{
REQUIRE
(
false
,
"device type "
,
type
,
" is not supported."
);
}
return
nullptr
;
}
}
// namespace mace
mace/core/allocator.h
浏览文件 @
f7bb2064
...
...
@@ -21,6 +21,7 @@ class Allocator {
virtual
~
Allocator
()
noexcept
{}
virtual
void
*
New
(
size_t
nbytes
)
=
0
;
virtual
void
Delete
(
void
*
data
)
=
0
;
virtual
void
CopyBytes
(
void
*
dst
,
const
void
*
src
,
size_t
size
)
=
0
;
template
<
typename
T
>
T
*
New
(
size_t
num_elements
)
{
...
...
@@ -59,6 +60,10 @@ class CPUAllocator: public Allocator {
free
(
data
);
}
#endif
void
CopyBytes
(
void
*
dst
,
const
void
*
src
,
size_t
size
)
{
memcpy
(
dst
,
src
,
size
);
}
};
// Get the CPU Alloctor.
...
...
@@ -72,9 +77,10 @@ struct DeviceContext {};
template
<
>
struct
DeviceContext
<
DeviceType
::
CPU
>
{
static
Allocator
*
alloctor
()
{
return
cpu_allocator
();
}
static
Allocator
*
alloc
a
tor
()
{
return
cpu_allocator
();
}
};
Allocator
*
GetDeviceAllocator
(
DeviceType
type
);
}
// namespace mace
...
...
mace/core/integral_types.h
浏览文件 @
f7bb2064
...
...
@@ -9,11 +9,11 @@
typedef
signed
char
int8
;
typedef
short
int16
;
typedef
int
int32
;
typedef
long
long
int64
;
typedef
int64_t
int64
;
typedef
unsigned
char
uint8
;
typedef
unsigned
short
uint16
;
typedef
unsigned
int
uint32
;
typedef
u
nsigned
long
long
uint64
;
typedef
u
int64_t
uint64
;
#endif // MACE_CORE_INTEGRAL_TYPES_H_
mace/core/operator.h
浏览文件 @
f7bb2064
...
...
@@ -101,7 +101,7 @@ class Operator : public OperatorBase {
for
(
const
string
&
output_str
:
operator_def
.
output
())
{
outputs_
.
push_back
(
CHECK_NOTNULL
(
ws
->
CreateTensor
(
output_str
,
DeviceContext
<
D
>::
alloctor
(),
DeviceContext
<
D
>::
alloc
a
tor
(),
DataTypeToEnum
<
T
>::
v
())));
}
}
...
...
mace/core/serializer.cc
0 → 100644
浏览文件 @
f7bb2064
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#include "mace/core/serializer.h"
namespace
mace
{
unique_ptr
<
TensorProto
>
Serializer
::
Serialize
(
const
Tensor
&
tensor
,
const
string
&
name
)
{
MACE_NOT_IMPLEMENTED
;
return
nullptr
;
}
unique_ptr
<
Tensor
>
Serializer
::
Deserialize
(
const
TensorProto
&
proto
,
DeviceType
type
)
{
unique_ptr
<
Tensor
>
tensor
(
new
Tensor
(
GetDeviceAllocator
(
type
),
proto
.
data_type
()));
vector
<
TIndex
>
dims
;
for
(
const
TIndex
d
:
proto
.
dims
())
{
dims
.
push_back
(
d
);
}
tensor
->
Resize
(
dims
);
switch
(
proto
.
data_type
())
{
case
DT_FLOAT
:
tensor
->
Copy
<
float
>
(
proto
.
float_data
().
data
(),
proto
.
float_data
().
size
());
break
;
case
DT_DOUBLE
:
tensor
->
Copy
<
double
>
(
proto
.
double_data
().
data
(),
proto
.
double_data
().
size
());
break
;
case
DT_INT32
:
tensor
->
template
Copy
<
int32
>(
proto
.
int32_data
().
data
(),
proto
.
int32_data
().
size
());
break
;
case
DT_UINT8
:
tensor
->
CopyWithCast
<
int32
,
uint8
>
(
proto
.
int32_data
().
data
(),
proto
.
int32_data
().
size
());
break
;
case
DT_INT16
:
tensor
->
CopyWithCast
<
int32
,
int16
>
(
proto
.
int32_data
().
data
(),
proto
.
int32_data
().
size
());
break
;
case
DT_INT8
:
tensor
->
CopyWithCast
<
int32
,
int8
>
(
proto
.
int32_data
().
data
(),
proto
.
int32_data
().
size
());
break
;
case
DT_INT64
:
tensor
->
Copy
<
int64
>
(
proto
.
int64_data
().
data
(),
proto
.
int64_data
().
size
());
break
;
case
DT_UINT16
:
tensor
->
CopyWithCast
<
int32
,
uint16
>
(
proto
.
int32_data
().
data
(),
proto
.
int32_data
().
size
());
break
;
case
DT_BOOL
:
tensor
->
CopyWithCast
<
int32
,
bool
>
(
proto
.
int32_data
().
data
(),
proto
.
int32_data
().
size
());
break
;
case
DT_STRING
:
{
string
*
content
=
tensor
->
mutable_data
<
string
>
();
for
(
int
i
=
0
;
i
<
proto
.
string_data
().
size
();
++
i
)
{
content
[
i
]
=
proto
.
string_data
(
i
);
}
}
break
;
default:
MACE_NOT_IMPLEMENTED
;
break
;
}
return
tensor
;
}
}
// namespace mace
\ No newline at end of file
mace/core/serializer.h
0 → 100644
浏览文件 @
f7bb2064
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#ifndef MACE_CORE_SERIALIZER_H_
#define MACE_CORE_SERIALIZER_H_
#include "mace/proto/mace.pb.h"
#include "mace/core/common.h"
#include "mace/core/tensor.h"
namespace
mace
{
class
Serializer
{
public:
Serializer
()
{}
~
Serializer
()
{}
unique_ptr
<
TensorProto
>
Serialize
(
const
Tensor
&
tensor
,
const
string
&
name
);
unique_ptr
<
Tensor
>
Deserialize
(
const
TensorProto
&
proto
,
DeviceType
type
);
DISABLE_COPY_AND_ASSIGN
(
Serializer
);
};
}
// namespace mace
#endif // MACE_CORE_SERIALIZER_H_
mace/core/tensor.h
浏览文件 @
f7bb2064
...
...
@@ -9,6 +9,7 @@
#include "mace/proto/mace.pb.h"
#include "mace/core/allocator.h"
#include "mace/core/types.h"
#include "mace/core/logging.h"
namespace
mace
{
...
...
@@ -118,6 +119,41 @@ class Tensor {
Resize
(
other
->
shape
());
}
template
<
typename
T
>
inline
void
Copy
(
const
T
*
src
,
size_t
size
)
{
REQUIRE
(
size
==
size_
,
"copy src and dst with different size."
);
CopyBytes
(
static_cast
<
const
void
*>
(
src
),
sizeof
(
T
)
*
size
);
}
template
<
typename
SrcType
,
typename
DstType
>
inline
void
CopyWithCast
(
const
SrcType
*
src
,
size_t
size
)
{
REQUIRE
(
size
==
size_
,
"copy src and dst with different size."
);
unique_ptr
<
DstType
[]
>
buffer
(
new
DstType
[
size
]);
for
(
int
i
=
0
;
i
<
size
;
++
i
)
{
buffer
[
i
]
=
static_cast
<
DstType
>
(
src
[
i
]);
}
CopyBytes
(
static_cast
<
const
void
*>
(
buffer
.
get
()),
sizeof
(
DstType
)
*
size
);
}
inline
void
CopyBytes
(
const
void
*
src
,
size_t
size
)
{
alloc_
->
CopyBytes
(
raw_mutable_data
(),
src
,
size
);
}
inline
void
DebugPrint
()
{
std
::
stringstream
os
;
for
(
int
i
:
shape_
)
{
os
<<
i
<<
", "
;
}
LOG
(
INFO
)
<<
"Tensor shape: "
<<
os
.
str
()
<<
" type: "
<<
DataType_Name
(
dtype_
);
os
.
str
(
""
);
os
.
clear
();
for
(
int
i
=
0
;
i
<
size_
;
++
i
)
{
CASES
(
dtype_
,
(
os
<<
this
->
data
<
T
>
()[
i
])
<<
", "
);
}
LOG
(
INFO
)
<<
os
.
str
();
}
private:
inline
int64
NumElements
()
const
{
return
std
::
accumulate
(
shape_
.
begin
(),
shape_
.
end
(),
1
,
std
::
multiplies
<
int64
>
());
...
...
mace/core/workspace.cc
浏览文件 @
f7bb2064
...
...
@@ -4,6 +4,7 @@
#include "mace/core/common.h"
#include "mace/core/workspace.h"
#include "mace/core/serializer.h"
namespace
mace
{
...
...
@@ -48,6 +49,11 @@ Tensor* Workspace::GetTensor(const string& name) {
return
const_cast
<
Tensor
*>
(
static_cast
<
const
Workspace
*>
(
this
)
->
GetTensor
(
name
));
}
bool
RunNet
();
void
Workspace
::
LoadModelTensor
(
const
NetDef
&
net_def
,
DeviceType
type
)
{
Serializer
serializer
;
for
(
auto
&
tensor_proto
:
net_def
.
tensors
())
{
tensor_map_
[
tensor_proto
.
name
()]
=
serializer
.
Deserialize
(
tensor_proto
,
type
);
}
}
}
// namespace mace
\ No newline at end of file
mace/core/workspace.h
浏览文件 @
f7bb2064
...
...
@@ -32,10 +32,12 @@ class Workspace {
Tensor
*
GetTensor
(
const
string
&
name
);
void
LoadModelTensor
(
const
NetDef
&
net_def
,
DeviceType
type
);
private:
TensorMap
tensor_map_
;
DISABLE_COPY_AND_ASSIGN
(
Workspace
);
DISABLE_COPY_AND_ASSIGN
(
Workspace
);
};
}
// namespace mace
...
...
mace/examples/BUILD
浏览文件 @
f7bb2064
...
...
@@ -7,7 +7,7 @@ cc_binary(
"helloworld.cc"
,
],
deps
=
[
"//mace/
core:core
"
,
"//mace/
ops:ops
"
,
],
copts
=
[
'-std=c++11'
],
linkopts
=
if_android
([
"-pie"
,
"-llog"
]),
...
...
mace/examples/helloworld.cc
浏览文件 @
f7bb2064
#include "mace/core/logging.h"
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#include "mace/core/net.h"
using
namespace
mace
;
int
main
()
{
LOG
(
INFO
)
<<
"Hello World"
;
// Construct graph
OperatorDef
op_def_0
;
op_def_0
.
add_input
(
"Input"
);
op_def_0
.
add_output
(
"Output0"
);
op_def_0
.
set_name
(
"ReluTest0"
);
op_def_0
.
set_type
(
"Relu"
);
auto
arg_0
=
op_def_0
.
add_arg
();
arg_0
->
set_name
(
"arg0"
);
arg_0
->
set_f
(
0.5
);
OperatorDef
op_def_1
;
op_def_1
.
add_input
(
"Input"
);
op_def_1
.
add_output
(
"Output1"
);
op_def_1
.
set_name
(
"ReluTest1"
);
op_def_1
.
set_type
(
"Relu"
);
auto
arg_1
=
op_def_1
.
add_arg
();
arg_1
->
set_name
(
"arg0"
);
arg_1
->
set_f
(
1.5
);
OperatorDef
op_def_2
;
op_def_2
.
add_input
(
"Output1"
);
op_def_2
.
add_output
(
"Output2"
);
op_def_2
.
set_name
(
"ReluTest2"
);
op_def_2
.
set_type
(
"Relu"
);
auto
arg_2
=
op_def_2
.
add_arg
();
arg_2
->
set_name
(
"arg0"
);
arg_2
->
set_f
(
2.5
);
NetDef
net_def
;
net_def
.
set_name
(
"NetTest"
);
net_def
.
add_op
()
->
CopyFrom
(
op_def_0
);
net_def
.
add_op
()
->
CopyFrom
(
op_def_1
);
net_def
.
add_op
()
->
CopyFrom
(
op_def_2
);
auto
input
=
net_def
.
add_tensors
();
input
->
set_name
(
"Input"
);
input
->
set_data_type
(
DataType
::
DT_FLOAT
);
input
->
add_dims
(
2
);
input
->
add_dims
(
3
);
for
(
int
i
=
0
;
i
<
6
;
++
i
)
{
input
->
add_float_data
(
i
-
3
);
}
VLOG
(
0
)
<<
net_def
.
DebugString
();
// Create workspace and input tensor
Workspace
ws
;
ws
.
LoadModelTensor
(
net_def
,
DeviceType
::
CPU
);
// Create Net & run
auto
net
=
CreateNet
(
net_def
,
&
ws
,
DeviceType
::
CPU
);
net
->
Run
();
auto
out_tensor
=
ws
.
GetTensor
(
"Output2"
);
out_tensor
->
DebugPrint
();
return
0
;
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录