Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
957258d9
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
957258d9
编写于
7月 04, 2022
作者:
R
Ruibiao Chen
提交者:
GitHub
7月 04, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Remove boost::optional and boost::none (#44029)
上级
f1e61f04
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
51 addition
and
55 deletion
+51
-55
paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc
paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc
+0
-1
paddle/fluid/operators/sequence_ops/sequence_concat_op.h
paddle/fluid/operators/sequence_ops/sequence_concat_op.h
+0
-1
paddle/fluid/platform/mkldnn_reuse.h
paddle/fluid/platform/mkldnn_reuse.h
+0
-1
paddle/fluid/pybind/reader_py.cc
paddle/fluid/pybind/reader_py.cc
+1
-1
paddle/infrt/host_context/mlir_to_runtime_translate.cc
paddle/infrt/host_context/mlir_to_runtime_translate.cc
+49
-50
paddle/infrt/host_context/mlir_to_runtime_translate.h
paddle/infrt/host_context/mlir_to_runtime_translate.h
+1
-1
未找到文件。
paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc
浏览文件 @
957258d9
...
@@ -12,7 +12,6 @@
...
@@ -12,7 +12,6 @@
See the License for the specific language governing permissions and
See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#include "boost/optional.hpp"
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/memory/malloc.h"
...
...
paddle/fluid/operators/sequence_ops/sequence_concat_op.h
浏览文件 @
957258d9
...
@@ -17,7 +17,6 @@
...
@@ -17,7 +17,6 @@
#include <utility>
#include <utility>
#include <vector>
#include <vector>
#include "boost/optional.hpp"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/math/concat_and_split.h"
...
...
paddle/fluid/platform/mkldnn_reuse.h
浏览文件 @
957258d9
...
@@ -20,7 +20,6 @@ limitations under the License. */
...
@@ -20,7 +20,6 @@ limitations under the License. */
#include <utility>
#include <utility>
#include <vector>
#include <vector>
#include "boost/optional.hpp"
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/pool_op.h"
#include "paddle/fluid/operators/pool_op.h"
...
...
paddle/fluid/pybind/reader_py.cc
浏览文件 @
957258d9
...
@@ -22,7 +22,7 @@
...
@@ -22,7 +22,7 @@
#include <vector>
#include <vector>
#include "Python.h"
#include "Python.h"
#include "boost/optional.hpp"
#include "gflags/gflags.h"
#include "gflags/gflags.h"
#include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/imperative/layer.h"
...
...
paddle/infrt/host_context/mlir_to_runtime_translate.cc
浏览文件 @
957258d9
...
@@ -31,7 +31,6 @@
...
@@ -31,7 +31,6 @@
#include <utility>
#include <utility>
#include <vector>
#include <vector>
#include "boost/optional.hpp"
#include "paddle/infrt/common/string.h"
#include "paddle/infrt/common/string.h"
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/mlir_loader.h"
#include "paddle/infrt/dialect/mlir_loader.h"
...
@@ -124,111 +123,111 @@ bool MlirToRuntimeTranslator::EmitConstantOp(mlir::Operation* op) {
...
@@ -124,111 +123,111 @@ bool MlirToRuntimeTranslator::EmitConstantOp(mlir::Operation* op) {
}
}
template
<
>
template
<
>
boost
::
optional
<
int32_t
>
MlirToRuntimeTranslator
::
EmitAttribute
(
paddle
::
optional
<
int32_t
>
MlirToRuntimeTranslator
::
EmitAttribute
(
const
mlir
::
Attribute
&
attr
)
{
const
mlir
::
Attribute
&
attr
)
{
if
(
!
attr
.
isa
<
mlir
::
IntegerAttr
>
())
return
boost
::
none
;
if
(
!
attr
.
isa
<
mlir
::
IntegerAttr
>
())
return
paddle
::
none
;
if
(
attr
.
isa
<
mlir
::
IntegerAttr
>
())
{
if
(
attr
.
isa
<
mlir
::
IntegerAttr
>
())
{
auto
val
=
attr
.
cast
<
mlir
::
IntegerAttr
>
();
auto
val
=
attr
.
cast
<
mlir
::
IntegerAttr
>
();
if
(
val
.
getType
().
isInteger
(
32
))
{
if
(
val
.
getType
().
isInteger
(
32
))
{
return
val
.
getValue
().
getSExtValue
();
return
val
.
getValue
().
getSExtValue
();
}
}
}
}
return
boost
::
none
;
return
paddle
::
none
;
}
}
template
<
>
template
<
>
boost
::
optional
<
int64_t
>
MlirToRuntimeTranslator
::
EmitAttribute
(
paddle
::
optional
<
int64_t
>
MlirToRuntimeTranslator
::
EmitAttribute
(
const
mlir
::
Attribute
&
attr
)
{
const
mlir
::
Attribute
&
attr
)
{
if
(
!
attr
.
isa
<
mlir
::
IntegerAttr
>
())
return
boost
::
none
;
if
(
!
attr
.
isa
<
mlir
::
IntegerAttr
>
())
return
paddle
::
none
;
if
(
attr
.
isa
<
mlir
::
IntegerAttr
>
())
{
if
(
attr
.
isa
<
mlir
::
IntegerAttr
>
())
{
auto
val
=
attr
.
cast
<
mlir
::
IntegerAttr
>
();
auto
val
=
attr
.
cast
<
mlir
::
IntegerAttr
>
();
if
(
val
.
getType
().
isInteger
(
64
))
{
if
(
val
.
getType
().
isInteger
(
64
))
{
return
val
.
getValue
().
getSExtValue
();
return
val
.
getValue
().
getSExtValue
();
}
}
}
}
return
boost
::
none
;
return
paddle
::
none
;
}
}
// TODO(Superjomn) Make double and float parsing share some thing.
// TODO(Superjomn) Make double and float parsing share some thing.
template
<
>
template
<
>
boost
::
optional
<
float
>
MlirToRuntimeTranslator
::
EmitAttribute
(
paddle
::
optional
<
float
>
MlirToRuntimeTranslator
::
EmitAttribute
(
const
mlir
::
Attribute
&
attr
)
{
const
mlir
::
Attribute
&
attr
)
{
if
(
!
attr
.
isa
<
mlir
::
FloatAttr
>
())
return
boost
::
none
;
if
(
!
attr
.
isa
<
mlir
::
FloatAttr
>
())
return
paddle
::
none
;
if
(
attr
.
isa
<
mlir
::
FloatAttr
>
())
{
if
(
attr
.
isa
<
mlir
::
FloatAttr
>
())
{
auto
val
=
attr
.
cast
<
mlir
::
FloatAttr
>
();
auto
val
=
attr
.
cast
<
mlir
::
FloatAttr
>
();
if
(
val
.
getType
().
isF32
())
return
val
.
getValueAsDouble
();
if
(
val
.
getType
().
isF32
())
return
val
.
getValueAsDouble
();
}
}
return
boost
::
none
;
return
paddle
::
none
;
}
}
template
<
>
template
<
>
boost
::
optional
<
bool
>
MlirToRuntimeTranslator
::
EmitAttribute
(
paddle
::
optional
<
bool
>
MlirToRuntimeTranslator
::
EmitAttribute
(
const
mlir
::
Attribute
&
attr
)
{
const
mlir
::
Attribute
&
attr
)
{
if
(
!
attr
.
isa
<
mlir
::
BoolAttr
>
())
return
boost
::
none
;
if
(
!
attr
.
isa
<
mlir
::
BoolAttr
>
())
return
paddle
::
none
;
if
(
attr
.
isa
<
mlir
::
BoolAttr
>
())
{
if
(
attr
.
isa
<
mlir
::
BoolAttr
>
())
{
auto
val
=
attr
.
cast
<
mlir
::
BoolAttr
>
();
auto
val
=
attr
.
cast
<
mlir
::
BoolAttr
>
();
return
val
.
getValue
();
return
val
.
getValue
();
}
}
return
boost
::
none
;
return
paddle
::
none
;
}
}
template
<
>
template
<
>
boost
::
optional
<
double
>
MlirToRuntimeTranslator
::
EmitAttribute
(
paddle
::
optional
<
double
>
MlirToRuntimeTranslator
::
EmitAttribute
(
const
mlir
::
Attribute
&
attr
)
{
const
mlir
::
Attribute
&
attr
)
{
if
(
!
attr
.
isa
<
mlir
::
FloatAttr
>
())
return
boost
::
none
;
if
(
!
attr
.
isa
<
mlir
::
FloatAttr
>
())
return
paddle
::
none
;
if
(
attr
.
isa
<
mlir
::
FloatAttr
>
())
{
if
(
attr
.
isa
<
mlir
::
FloatAttr
>
())
{
auto
val
=
attr
.
cast
<
mlir
::
FloatAttr
>
();
auto
val
=
attr
.
cast
<
mlir
::
FloatAttr
>
();
if
(
val
.
getType
().
isF64
())
return
val
.
getValueAsDouble
();
if
(
val
.
getType
().
isF64
())
return
val
.
getValueAsDouble
();
}
}
return
boost
::
none
;
return
paddle
::
none
;
}
}
template
<
>
template
<
>
boost
::
optional
<::
infrt
::
TargetType
>
MlirToRuntimeTranslator
::
EmitAttribute
(
paddle
::
optional
<::
infrt
::
TargetType
>
MlirToRuntimeTranslator
::
EmitAttribute
(
const
mlir
::
Attribute
&
attr
)
{
const
mlir
::
Attribute
&
attr
)
{
if
(
!
attr
.
isa
<::
infrt
::
TargetAttr
>
())
return
boost
::
none
;
if
(
!
attr
.
isa
<::
infrt
::
TargetAttr
>
())
return
paddle
::
none
;
if
(
attr
.
isa
<::
infrt
::
TargetAttr
>
())
{
if
(
attr
.
isa
<::
infrt
::
TargetAttr
>
())
{
return
attr
.
cast
<::
infrt
::
TargetAttr
>
().
getTarget
();
return
attr
.
cast
<::
infrt
::
TargetAttr
>
().
getTarget
();
}
}
return
boost
::
none
;
return
paddle
::
none
;
}
}
template
<
>
template
<
>
boost
::
optional
<::
infrt
::
LayoutType
>
MlirToRuntimeTranslator
::
EmitAttribute
(
paddle
::
optional
<::
infrt
::
LayoutType
>
MlirToRuntimeTranslator
::
EmitAttribute
(
const
mlir
::
Attribute
&
attr
)
{
const
mlir
::
Attribute
&
attr
)
{
if
(
!
attr
.
isa
<::
infrt
::
LayoutAttr
>
())
return
boost
::
none
;
if
(
!
attr
.
isa
<::
infrt
::
LayoutAttr
>
())
return
paddle
::
none
;
if
(
attr
.
isa
<::
infrt
::
LayoutAttr
>
())
{
if
(
attr
.
isa
<::
infrt
::
LayoutAttr
>
())
{
return
attr
.
cast
<::
infrt
::
LayoutAttr
>
().
getLayout
();
return
attr
.
cast
<::
infrt
::
LayoutAttr
>
().
getLayout
();
}
}
return
boost
::
none
;
return
paddle
::
none
;
}
}
template
<
>
template
<
>
boost
::
optional
<::
infrt
::
PrecisionType
>
MlirToRuntimeTranslator
::
EmitAttribute
(
paddle
::
optional
<::
infrt
::
PrecisionType
>
MlirToRuntimeTranslator
::
EmitAttribute
(
const
mlir
::
Attribute
&
attr
)
{
const
mlir
::
Attribute
&
attr
)
{
if
(
!
attr
.
isa
<::
infrt
::
PrecisionAttr
>
())
return
boost
::
none
;
if
(
!
attr
.
isa
<::
infrt
::
PrecisionAttr
>
())
return
paddle
::
none
;
if
(
attr
.
isa
<::
infrt
::
PrecisionAttr
>
())
{
if
(
attr
.
isa
<::
infrt
::
PrecisionAttr
>
())
{
return
attr
.
cast
<::
infrt
::
PrecisionAttr
>
().
getPrecision
();
return
attr
.
cast
<::
infrt
::
PrecisionAttr
>
().
getPrecision
();
}
}
return
boost
::
none
;
return
paddle
::
none
;
}
}
template
<
>
template
<
>
boost
::
optional
<
std
::
string
>
MlirToRuntimeTranslator
::
EmitAttribute
(
paddle
::
optional
<
std
::
string
>
MlirToRuntimeTranslator
::
EmitAttribute
(
const
mlir
::
Attribute
&
attr
)
{
const
mlir
::
Attribute
&
attr
)
{
if
(
!
attr
.
isa
<
mlir
::
StringAttr
>
())
return
boost
::
none
;
if
(
!
attr
.
isa
<
mlir
::
StringAttr
>
())
return
paddle
::
none
;
return
attr
.
cast
<
mlir
::
StringAttr
>
().
getValue
().
str
();
return
attr
.
cast
<
mlir
::
StringAttr
>
().
getValue
().
str
();
}
}
#define PROCESS_ARRAY_INT(type__, bits__) \
#define PROCESS_ARRAY_INT(type__, bits__) \
template <> \
template <> \
boost::optional<std::vector<type__>> MlirToRuntimeTranslator::EmitAttribute(
\
paddle::optional<std::vector<type__>>
\
const mlir::Attribute& attr) {
\
MlirToRuntimeTranslator::EmitAttribute(const mlir::Attribute& attr) {
\
if (!attr.isa<mlir::ArrayAttr>()) return
boost::none;
\
if (!attr.isa<mlir::ArrayAttr>()) return
paddle::none;
\
auto array = attr.cast<mlir::ArrayAttr>(); \
auto array = attr.cast<mlir::ArrayAttr>(); \
CHECK(!array.empty()); \
CHECK(!array.empty()); \
\
\
if (!array[0].getType().isInteger(bits__)) { \
if (!array[0].getType().isInteger(bits__)) { \
return
boost::none;
\
return
paddle::none;
\
} \
} \
\
\
std::vector<type__> res; \
std::vector<type__> res; \
...
@@ -244,13 +243,13 @@ PROCESS_ARRAY_INT(int32_t, 32);
...
@@ -244,13 +243,13 @@ PROCESS_ARRAY_INT(int32_t, 32);
PROCESS_ARRAY_INT
(
int64_t
,
64
);
PROCESS_ARRAY_INT
(
int64_t
,
64
);
template
<
>
template
<
>
boost
::
optional
<
std
::
vector
<
float
>>
MlirToRuntimeTranslator
::
EmitAttribute
(
paddle
::
optional
<
std
::
vector
<
float
>>
MlirToRuntimeTranslator
::
EmitAttribute
(
const
mlir
::
Attribute
&
attr
)
{
const
mlir
::
Attribute
&
attr
)
{
if
(
!
attr
.
isa
<
mlir
::
ArrayAttr
>
())
return
boost
::
none
;
if
(
!
attr
.
isa
<
mlir
::
ArrayAttr
>
())
return
paddle
::
none
;
auto
array
=
attr
.
cast
<
mlir
::
ArrayAttr
>
();
auto
array
=
attr
.
cast
<
mlir
::
ArrayAttr
>
();
CHECK
(
!
array
.
empty
());
CHECK
(
!
array
.
empty
());
if
(
!
array
[
0
].
getType
().
isF32
())
return
boost
::
none
;
if
(
!
array
[
0
].
getType
().
isF32
())
return
paddle
::
none
;
std
::
vector
<
float
>
res
;
std
::
vector
<
float
>
res
;
for
(
auto
&
v
:
array
)
{
for
(
auto
&
v
:
array
)
{
...
@@ -260,13 +259,13 @@ boost::optional<std::vector<float>> MlirToRuntimeTranslator::EmitAttribute(
...
@@ -260,13 +259,13 @@ boost::optional<std::vector<float>> MlirToRuntimeTranslator::EmitAttribute(
}
}
template
<
>
template
<
>
boost
::
optional
<
std
::
vector
<
double
>>
MlirToRuntimeTranslator
::
EmitAttribute
(
paddle
::
optional
<
std
::
vector
<
double
>>
MlirToRuntimeTranslator
::
EmitAttribute
(
const
mlir
::
Attribute
&
attr
)
{
const
mlir
::
Attribute
&
attr
)
{
if
(
!
attr
.
isa
<
mlir
::
ArrayAttr
>
())
return
boost
::
none
;
if
(
!
attr
.
isa
<
mlir
::
ArrayAttr
>
())
return
paddle
::
none
;
auto
array
=
attr
.
cast
<
mlir
::
ArrayAttr
>
();
auto
array
=
attr
.
cast
<
mlir
::
ArrayAttr
>
();
CHECK
(
!
array
.
empty
());
CHECK
(
!
array
.
empty
());
if
(
!
array
[
0
].
getType
().
isF64
())
return
boost
::
none
;
if
(
!
array
[
0
].
getType
().
isF64
())
return
paddle
::
none
;
std
::
vector
<
double
>
res
;
std
::
vector
<
double
>
res
;
for
(
auto
&
v
:
array
)
{
for
(
auto
&
v
:
array
)
{
...
...
paddle/infrt/host_context/mlir_to_runtime_translate.h
浏览文件 @
957258d9
...
@@ -75,7 +75,7 @@ class MlirToRuntimeTranslator {
...
@@ -75,7 +75,7 @@ class MlirToRuntimeTranslator {
bool
EmitCallOp
(
mlir
::
Operation
*
op
,
function_defs_t
*
function_table
);
bool
EmitCallOp
(
mlir
::
Operation
*
op
,
function_defs_t
*
function_table
);
template
<
typename
T
>
template
<
typename
T
>
boost
::
optional
<
T
>
EmitAttribute
(
const
mlir
::
Attribute
&
attr
);
paddle
::
optional
<
T
>
EmitAttribute
(
const
mlir
::
Attribute
&
attr
);
Value
*
GetOpResult
(
mlir
::
Operation
*
op
);
Value
*
GetOpResult
(
mlir
::
Operation
*
op
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录