Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
1efa91dd
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1efa91dd
编写于
4月 18, 2019
作者:
S
superjomn
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add hash helper function
上级
cd7018bf
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
115 addition
and
25 deletion
+115
-25
paddle/fluid/lite/core/target_wrapper.cc
paddle/fluid/lite/core/target_wrapper.cc
+31
-0
paddle/fluid/lite/core/target_wrapper.h
paddle/fluid/lite/core/target_wrapper.h
+54
-25
paddle/fluid/lite/utils/all.h
paddle/fluid/lite/utils/all.h
+1
-0
paddle/fluid/lite/utils/hash.h
paddle/fluid/lite/utils/hash.h
+28
-0
paddle/fluid/lite/x86/target_wrapper.cc
paddle/fluid/lite/x86/target_wrapper.cc
+1
-0
未找到文件。
paddle/fluid/lite/core/target_wrapper.cc
0 → 100644
浏览文件 @
1efa91dd
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/lite/core/target_wrapper.h"
#include "paddle/fluid/lite/utils/all.h"
namespace
paddle
{
namespace
lite
{
size_t
Place
::
hash
()
const
{
std
::
hash
<
int
>
h
;
size_t
hash
=
h
(
static_cast
<
int
>
(
target
));
hash
=
hash_combine
(
hash
,
static_cast
<
int
>
(
precision
));
hash
=
hash_combine
(
hash
,
static_cast
<
int
>
(
layout
));
hash
=
hash_combine
(
hash
,
static_cast
<
int
>
(
device
));
return
hash
;
}
}
// namespace lite
}
// namespace paddle
\ No newline at end of file
paddle/fluid/lite/core/target_wrapper.h
浏览文件 @
1efa91dd
...
@@ -14,13 +14,20 @@
...
@@ -14,13 +14,20 @@
#pragma once
#pragma once
#include <iostream>
#include <iostream>
#include <sstream>
namespace
paddle
{
namespace
paddle
{
namespace
lite
{
namespace
lite
{
enum
class
TargetType
:
int
{
kHost
=
0
,
kX86
,
kCUDA
,
kLastAsPlaceHolder
};
enum
class
TargetType
:
int
{
enum
class
PrecisionType
:
int
{
kFloat
=
0
,
kInt8
,
kLastAsPlaceHolder
};
kUnk
=
0
,
enum
class
DataLayoutType
:
int
{
kNCHW
=
0
,
kLastAsPlaceHolder
};
kHost
,
kX86
,
kCUDA
,
kLastAsPlaceHolder
};
enum
class
PrecisionType
:
int
{
kUnk
=
0
,
kFloat
,
kInt8
,
kLastAsPlaceHolder
};
enum
class
DataLayoutType
:
int
{
kUnk
=
0
,
kNCHW
,
kLastAsPlaceHolder
};
// Some helper macro to get a specific TargetType.
// Some helper macro to get a specific TargetType.
#define TARGET(item__) paddle::lite::TargetType::item__
#define TARGET(item__) paddle::lite::TargetType::item__
...
@@ -30,14 +37,34 @@ enum class DataLayoutType : int { kNCHW = 0, kLastAsPlaceHolder };
...
@@ -30,14 +37,34 @@ enum class DataLayoutType : int { kNCHW = 0, kLastAsPlaceHolder };
#define PRECISION_VAL(item__) static_cast<int>(PRECISION(item__))
#define PRECISION_VAL(item__) static_cast<int>(PRECISION(item__))
#define DATALAYOUT(item__) paddle::lite::DataLayoutType::item__
#define DATALAYOUT(item__) paddle::lite::DataLayoutType::item__
constexpr
const
int
kNumPrecisions
=
PRECISION_VAL
(
kLastAsPlaceHolder
)
-
PRECISION_VAL
(
kFloat
);
constexpr
const
int
kNumTargets
=
TARGET_VAL
(
kLastAsPlaceHolder
)
-
TARGET_VAL
(
kHost
);
static
const
std
::
string
target2string
[]
=
{
"unk"
,
"host"
,
"x86"
,
"cuda"
};
static
const
std
::
string
&
TargetToStr
(
TargetType
target
)
{
return
target2string
[
static_cast
<
int
>
(
target
)];
}
static
const
std
::
string
precision2string
[]
=
{
"unk"
,
"float"
,
"int8"
};
static
const
std
::
string
&
PrecisionToStr
(
PrecisionType
precision
)
{
return
precision2string
[
static_cast
<
int
>
(
precision
)];
}
static
const
std
::
string
datalayout2string
[]
=
{
"unk"
,
"NCHW"
};
static
const
std
::
string
&
DataLayoutToStr
(
DataLayoutType
x
)
{
return
datalayout2string
[
static_cast
<
int
>
(
x
)];
}
/*
/*
* Place specifies the execution context of a Kernel or input/output for a
* Place specifies the execution context of a Kernel or input/output for a
* kernel. It is used to make the analysis of the MIR more clear and accurate.
* kernel. It is used to make the analysis of the MIR more clear and accurate.
*/
*/
struct
Place
{
struct
Place
{
TargetType
target
{
TARGET
(
k
Host
)};
TargetType
target
{
TARGET
(
k
Unk
)};
PrecisionType
precision
{
PRECISION
(
k
Float
)};
PrecisionType
precision
{
PRECISION
(
k
Unk
)};
DataLayoutType
layout
{
DATALAYOUT
(
k
NCHW
)};
DataLayoutType
layout
{
DATALAYOUT
(
k
Unk
)};
short
device
{
0
};
// device ID
short
device
{
0
};
// device ID
Place
()
=
default
;
Place
()
=
default
;
...
@@ -45,31 +72,33 @@ struct Place {
...
@@ -45,31 +72,33 @@ struct Place {
DataLayoutType
layout
=
DATALAYOUT
(
kNCHW
),
short
device
=
0
)
DataLayoutType
layout
=
DATALAYOUT
(
kNCHW
),
short
device
=
0
)
:
target
(
target
),
precision
(
precision
),
layout
(
layout
),
device
(
device
)
{}
:
target
(
target
),
precision
(
precision
),
layout
(
layout
),
device
(
device
)
{}
bool
is_valid
()
const
{
return
target
!=
TARGET
(
kUnk
)
&&
precision
!=
PRECISION
(
kUnk
)
&&
layout
!=
DATALAYOUT
(
kUnk
);
}
size_t
hash
()
const
;
bool
operator
==
(
const
Place
&
other
)
const
{
bool
operator
==
(
const
Place
&
other
)
const
{
return
target
==
other
.
target
&&
precision
==
other
.
precision
&&
return
target
==
other
.
target
&&
precision
==
other
.
precision
&&
layout
==
other
.
layout
&&
device
==
other
.
device
;
layout
==
other
.
layout
&&
device
==
other
.
device
;
}
}
};
constexpr
const
int
kNumPrecisions
=
PRECISION_VAL
(
kLastAsPlaceHolder
)
-
PRECISION_VAL
(
kFloat
);
constexpr
const
int
kNumTargets
=
TARGET_VAL
(
kLastAsPlaceHolder
)
-
TARGET_VAL
(
kHost
);
static
const
std
::
string
target2string
[]
=
{
"host"
,
"x86"
,
"cuda"
};
static
const
std
::
string
&
TargetToStr
(
TargetType
target
)
{
return
target2string
[
static_cast
<
int
>
(
target
)];
}
static
const
std
::
string
precision2string
[]
=
{
"float"
,
"int8"
};
friend
bool
operator
<
(
const
Place
&
a
,
const
Place
&
b
)
{
static
const
std
::
string
&
PrecisionToStr
(
PrecisionType
precision
)
{
if
(
a
.
target
!=
b
.
target
)
return
a
.
target
<
b
.
target
;
return
precision2string
[
static_cast
<
int
>
(
precision
)];
if
(
a
.
precision
!=
b
.
precision
)
return
a
.
precision
<
b
.
precision
;
}
if
(
a
.
layout
!=
b
.
layout
)
return
a
.
layout
<
b
.
layout
;
if
(
a
.
device
!=
b
.
device
)
return
a
.
device
<
b
.
device
;
return
true
;
}
static
const
std
::
string
datalayout2string
[]
=
{
"NCHW"
};
std
::
string
DebugString
()
const
{
static
const
std
::
string
&
DataLayoutToStr
(
DataLayoutType
x
)
{
std
::
stringstream
os
;
return
datalayout2string
[
static_cast
<
int
>
(
x
)];
os
<<
TargetToStr
(
target
)
<<
"/"
<<
PrecisionToStr
(
precision
)
<<
"/"
}
<<
DataLayoutToStr
(
layout
);
return
os
.
str
();
}
};
// Event sync for multi-stream devices like CUDA and OpenCL.
// Event sync for multi-stream devices like CUDA and OpenCL.
// For the devices without support of stream, leave it empty.
// For the devices without support of stream, leave it empty.
...
...
paddle/fluid/lite/utils/all.h
浏览文件 @
1efa91dd
...
@@ -16,5 +16,6 @@
...
@@ -16,5 +16,6 @@
#include "paddle/fluid/lite/utils/check.h"
#include "paddle/fluid/lite/utils/check.h"
#include "paddle/fluid/lite/utils/factory.h"
#include "paddle/fluid/lite/utils/factory.h"
#include "paddle/fluid/lite/utils/hash.h"
#include "paddle/fluid/lite/utils/macros.h"
#include "paddle/fluid/lite/utils/macros.h"
#include "paddle/fluid/lite/utils/varient.h"
#include "paddle/fluid/lite/utils/varient.h"
paddle/fluid/lite/utils/hash.h
0 → 100644
浏览文件 @
1efa91dd
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <functional>
namespace
paddle
{
namespace
lite
{
template
<
typename
T
>
inline
size_t
hash_combine
(
size_t
s
,
const
T
&
v
)
{
std
::
hash
<
T
>
h
;
return
(
s
^
h
(
v
))
+
0x9e3779b9
+
(
s
<<
6
)
+
(
s
>>
2
);
}
}
// namespace lite
}
// namespace paddle
paddle/fluid/lite/x86/target_wrapper.cc
浏览文件 @
1efa91dd
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
#include "paddle/fluid/lite/core/target_wrapper.h"
#include "paddle/fluid/lite/core/target_wrapper.h"
#include <algorithm>
#include <algorithm>
#include "paddle/fluid/lite/utils/all.h"
namespace
paddle
{
namespace
paddle
{
namespace
lite
{
namespace
lite
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录