Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
ed0990e7
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ed0990e7
编写于
2月 07, 2022
作者:
Y
Yan Chunwei
提交者:
GitHub
2月 07, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
INFRT/Refine TensorMap (2nd PR) (#39262)
上级
0c43ce22
变更
17
隐藏空白更改
内联
并排
Showing
17 changed file
with
319 addition
and
13 deletion
+319
-13
paddle/infrt/dialect/basic_kernels.td
paddle/infrt/dialect/basic_kernels.td
+3
-3
paddle/infrt/dialect/dense_tensor.td
paddle/infrt/dialect/dense_tensor.td
+45
-5
paddle/infrt/host_context/kernel_frame.h
paddle/infrt/host_context/kernel_frame.h
+8
-0
paddle/infrt/host_context/kernel_utils.h
paddle/infrt/host_context/kernel_utils.h
+1
-1
paddle/infrt/host_context/value.h
paddle/infrt/host_context/value.h
+16
-0
paddle/infrt/host_context/value_test.cc
paddle/infrt/host_context/value_test.cc
+10
-0
paddle/infrt/kernel/tensor_kernels.cc
paddle/infrt/kernel/tensor_kernels.cc
+63
-4
paddle/infrt/tests/.gitignore
paddle/infrt/tests/.gitignore
+7
-0
paddle/infrt/tests/CMakeLists.txt
paddle/infrt/tests/CMakeLists.txt
+2
-0
paddle/infrt/tests/dialect/tensor/.gitignore
paddle/infrt/tests/dialect/tensor/.gitignore
+5
-0
paddle/infrt/tests/dialect/tensor/dense_tensor.mlir
paddle/infrt/tests/dialect/tensor/dense_tensor.mlir
+24
-0
paddle/infrt/tests/dialect/tensor/naive_kernels.mlir
paddle/infrt/tests/dialect/tensor/naive_kernels.mlir
+35
-0
paddle/infrt/tests/dialect/tensor/tensor_map.mlir.in
paddle/infrt/tests/dialect/tensor/tensor_map.mlir.in
+16
-0
paddle/infrt/tests/dialect/tensor/tensor_shape.mlir
paddle/infrt/tests/dialect/tensor/tensor_shape.mlir
+8
-0
paddle/infrt/tests/dialect/tensor/tensor_type.mlir
paddle/infrt/tests/dialect/tensor/tensor_type.mlir
+10
-0
paddle/scripts/infrt_build.sh
paddle/scripts/infrt_build.sh
+10
-0
tools/infrt/fake_models/multi_fc.py
tools/infrt/fake_models/multi_fc.py
+56
-0
未找到文件。
paddle/infrt/dialect/basic_kernels.td
浏览文件 @
ed0990e7
...
...
@@ -106,10 +106,10 @@ class PrintOp<string suffix, Type type> : INFRT_Op<"print." # suffix> {
let verifier = ?;
}
//
def PrintI32Op : PrintOp<"i32", I32>;
//
def PrintI64Op : PrintOp<"i64", I64>;
def PrintI32Op : PrintOp<"i32", I32>;
def PrintI64Op : PrintOp<"i64", I64>;
def PrintF32Op : PrintOp<"f32", F32>;
//
def PrintF64Op : PrintOp<"f64", F64>;
def PrintF64Op : PrintOp<"f64", F64>;
def GetStringOp : INFRT_Op<"get_string"> {
let summary = "infrt.get_string";
...
...
paddle/infrt/dialect/dense_tensor.td
浏览文件 @
ed0990e7
...
...
@@ -112,23 +112,35 @@ def LoadParamsOp : DT_Op<"load_params", [NoSideEffect]> {
let verifier = ?;
}
def
GetParamOp : DT_Op<"get_param
", [NoSideEffect]> {
let summary = "dt.
get_param
operation";
def
TensorMapGetTensorOp : DT_Op<"tensor_map_get_tensor
", [NoSideEffect]> {
let summary = "dt.
tensor_map_get_tensor
operation";
let description = [{
An operation that can get a tensor from TensorMap.
An operation that can get a tensor from
a
TensorMap.
}];
// input path of model params.
let arguments = (ins
TensorMapType:$map,
Str
Attr
:$name
Str
ingType
:$name
);
let results = (outs TensorType:$output);
let assemblyFormat = "`(`
$map `,` $name
`)` attr-dict `->` type($output)";
let assemblyFormat = "`(`
operands
`)` attr-dict `->` type($output)";
let verifier = ?;
}
def TensorMapGetSizeOp : DT_Op<"tensor_map_get_size", [NoSideEffect]> {
let summary = "ddt.tensor_map_get_size operation";
let description = [{
An operation that get the size of a TensorMap.
}];
let arguments = (ins TensorMapType:$map);
let results = (outs I32:$size);
let assemblyFormat = "`(` $map `)` attr-dict `->` type($size)";
}
def GetTensorShapeOp : DT_Op<"get_tensor_shape", [NoSideEffect]> {
let summary = "dt.get_tensor_shape operation";
...
...
@@ -141,10 +153,38 @@ def GetTensorShapeOp : DT_Op<"get_tensor_shape", [NoSideEffect]> {
let assemblyFormat = "$input attr-dict `:` type($input) `->` type($output)";
}
class NaiveElementwiseAddOp<string dtype> :
DT_Op<"naive_elementwise_add." # dtype, [NoSideEffect]> {
let summary = "dt.naive_elementwise_add operation";
let description = [{
Naive elementwise_add operation.
Just for testing.
}];
let arguments = (ins TensorType:$a, TensorType:$b);
let results = (outs TensorType:$output);
let assemblyFormat = "`(` $a `,` $b `)` attr-dict `:` `(` type($a) `,` type($b) `)` `->` type($output)";
}
class NaiveMatmulOp<string dtype> :
DT_Op<"naive_matmul." # dtype, [NoSideEffect]> {
let summary = "dt.naive_matmul operation";
let description = [{
Naive matmul operation.
Just for testing.
}];
let arguments = (ins TensorType:$x, TensorType:$w);
let results = (outs TensorType:$output);
let assemblyFormat = "`(` $x `,` $w `)` attr-dict `:` `(` type($x) `,` type($w) `)` `->` type($output)";
}
foreach dtype = ["ui8", "ui16", "ui32", "ui64", "i32", "f32", "f64", "i64"] in {
def DT_CreateUninitTensorOp_#dtype : CreateUninitTensorOp<dtype>;
def DT_FillTensorOp_#dtype : FillTensorWithConstantOp<dtype>;
def DT_SetTensorOp_#dtype : SetTensorOp<dtype>;
def DT_NaiveElementwiseAddOp_#dtype : NaiveElementwiseAddOp<dtype>;
def DT_NaiveMatmulOp_#dtype : NaiveMatmulOp<dtype>;
}
#endif // DT_OPS
paddle/infrt/host_context/kernel_frame.h
浏览文件 @
ed0990e7
...
...
@@ -37,6 +37,14 @@ class KernelFrame {
(
num_results_
==
-
1
?
0
:
num_results_
);
}
//! Get something at a specific position \p index. The element might be an
//! argument, an attribute or a result.
template
<
typename
T
>
T
&
GetElementAt
(
int
index
)
{
CHECK_LT
(
index
,
GetNumArgs
()
+
GetNumAttributes
()
+
GetNumResults
());
return
value_or_attrs_
[
index
]
->
template
get_or_default
<
T
>();
}
template
<
typename
T
>
T
&
GetArgAt
(
int
index
)
{
CHECK_LT
(
index
,
GetNumArgs
());
...
...
paddle/infrt/host_context/kernel_utils.h
浏览文件 @
ed0990e7
...
...
@@ -244,7 +244,7 @@ struct KernelImpl<Return (*)(Args...), impl_fn> {
static_assert
(
out_idx
==
0
,
"Arguments should appear before results"
);
static_assert
(
const_idx
==
0
,
"Arguments and results should appear before attributes."
);
auto
*
arg
=
&
frame
->
GetArg
At
<
Head
>
(
in_idx
);
auto
*
arg
=
&
frame
->
template
GetElement
At
<
Head
>(
in_idx
);
KernelCallHelper
<
Tail
...
>::
template
Invoke
<
in_idx
+
1
,
out_idx
,
const_idx
>(
frame
,
pargs
...,
...
...
paddle/infrt/host_context/value.h
浏览文件 @
ed0990e7
...
...
@@ -28,6 +28,9 @@
#include "paddle/infrt/tensor/dense_tensor_view.h"
#include "paddle/infrt/tensor/tensor_map.h"
#include "paddle/infrt/tensor/tensor_shape.h"
// Disabled temporarily for failed compile, will enable latter.
// #include "paddle/pten/backends/cpu/cpu_context.h"
// #include "paddle/pten/core/dense_tensor.h"
namespace
infrt
{
namespace
host_context
{
...
...
@@ -82,13 +85,25 @@ class Value : public common::Object {
template
<
typename
T
>
const
T
&
get
()
const
{
CHECK
(
data
.
template
is
<
T
>());
return
data
.
get
<
T
>
();
}
template
<
typename
T
>
T
&
get
()
{
CHECK
(
data
.
template
is
<
T
>());
return
data
.
get
<
T
>
();
}
//! Get the value if assigned before or return a default value instead.
template
<
class
T
>
T
&
get_or_default
()
{
if
(
!
data
.
template
is
<
T
>())
{
this
->
set
(
T
{});
}
return
get
<
T
>
();
}
template
<
typename
T
>
void
set
(
T
&&
v
)
{
data
=
std
::
move
(
v
);
...
...
@@ -124,6 +139,7 @@ class ValueRef : common::Shared<Value> {
using
common
::
Shared
<
Value
>::
Reset
;
using
common
::
Shared
<
Value
>::
operator
->
;
using
common
::
Shared
<
Value
>::
operator
*
;
//! Get a readonly data.
template
<
typename
T
>
const
T
&
get
()
const
{
...
...
paddle/infrt/host_context/value_test.cc
浏览文件 @
ed0990e7
...
...
@@ -30,5 +30,15 @@ TEST(ValueRef, test) {
ASSERT_EQ
(
z
.
get
<
bool
>
(),
true
);
}
// If the value is not assign, the get_or_default should return a default value.
TEST
(
Value
,
init
)
{
Value
x
;
ASSERT_EQ
(
x
.
get_or_default
<
int
>
(),
0
);
Value
tensor
;
auto
&
t
=
tensor
.
get_or_default
<
tensor
::
DenseHostTensor
>
();
ASSERT_EQ
(
t
.
shape
().
GetRank
(),
0
);
}
}
// namespace host_context
}
// namespace infrt
paddle/infrt/kernel/tensor_kernels.cc
浏览文件 @
ed0990e7
...
...
@@ -53,13 +53,62 @@ TensorMap LoadParams(const std::string &path) {
return
*
(
infrt
::
tensor
::
LoadParams
(
path
));
}
DenseHostTensor
GetParam
(
TensorMap
map
,
Attribute
<
std
::
string
>
nameAttr
)
{
auto
&
name
=
nameAttr
.
get
();
return
*
(
map
[
name
]);
void
TensorMapGetTensor
(
TensorMap
map
,
const
std
::
string
&
name
,
DenseHostTensor
*
out
)
{
auto
it
=
map
.
find
(
name
);
CHECK
(
it
!=
map
.
end
())
<<
"No tensor called "
<<
name
<<
" in the TensorMap"
;
*
out
=
*
it
->
second
;
}
int32_t
TensorMapGetSize
(
TensorMap
map
)
{
return
map
.
size
();
}
DenseHostTensor
ShallowCopyTensor
(
DenseHostTensor
v
)
{
return
v
;
}
template
<
typename
T
>
void
NaiveElementwiseAdd
(
const
DenseHostTensor
&
x
,
const
DenseHostTensor
&
y
,
DenseHostTensor
*
out
)
{
CHECK_EQ
(
x
.
shape
().
GetNumElements
(),
y
.
shape
().
GetNumElements
());
// Infer shape
*
out
=
DenseHostTensor
(
x
.
shape
(),
GetDType
<
T
>
());
const
T
*
x_data
=
static_cast
<
T
*>
(
x
.
raw_data
());
const
T
*
y_data
=
static_cast
<
T
*>
(
y
.
raw_data
());
T
*
out_data
=
static_cast
<
T
*>
(
out
->
raw_data
());
for
(
size_t
i
=
0
,
n
=
x
.
shape
().
GetNumElements
();
i
<
n
;
i
++
)
{
out_data
[
i
]
=
x_data
[
i
]
+
y_data
[
i
];
}
}
//! A naive implementation for x matmul w
template
<
typename
T
>
void
NaiveMatmul
(
const
DenseHostTensor
&
x
,
const
DenseHostTensor
&
w
,
DenseHostTensor
*
out
)
{
CHECK_EQ
(
x
.
shape
().
GetRank
(),
2
);
CHECK_EQ
(
w
.
shape
().
GetRank
(),
2
);
CHECK_EQ
(
x
.
shape
().
GetDim
(
x
.
shape
().
GetRank
()
-
1
),
w
.
shape
().
GetDim
(
0
));
std
::
vector
<
int64_t
>
out_dims
({
x
.
shape
().
GetDim
(
0
),
w
.
shape
().
GetDim
(
1
)});
*
out
=
DenseHostTensor
(
TensorShape
(
out_dims
),
GetDType
<
T
>
());
auto
*
out_data
=
static_cast
<
T
*>
(
out
->
raw_data
());
auto
*
x_data
=
static_cast
<
const
T
*>
(
x
.
raw_data
());
auto
*
w_data
=
static_cast
<
const
T
*>
(
w
.
raw_data
());
const
int
M
=
x
.
shape
().
GetDim
(
0
);
const
int
K
=
x
.
shape
().
GetDim
(
1
);
const
int
N
=
w
.
shape
().
GetDim
(
1
);
for
(
int
i
=
0
;
i
<
M
;
i
++
)
{
for
(
int
j
=
0
;
j
<
N
;
j
++
)
{
for
(
int
k
=
0
;
k
<
K
;
k
++
)
{
out_data
[
i
*
N
+
j
]
+=
x_data
[
i
*
K
+
k
]
*
w_data
[
k
*
N
+
j
];
}
}
}
}
/// ===== Kernel end ====
void
RegisterTensorKernels
(
host_context
::
KernelRegistry
*
registry
)
{
...
...
@@ -71,10 +120,20 @@ void RegisterTensorKernels(host_context::KernelRegistry *registry) {
INFRT_KERNEL
(
FillTensorWithConstant
<
float
>
));
registry
->
AddKernel
(
"dt.fill_tensor_with_constant.f64"
,
INFRT_KERNEL
(
FillTensorWithConstant
<
double
>
));
// TensorMap related methods.
registry
->
AddKernel
(
"dt.load_params"
,
INFRT_KERNEL
(
LoadParams
));
registry
->
AddKernel
(
"dt.get_param"
,
INFRT_KERNEL
(
GetParam
));
registry
->
AddKernel
(
"dt.tensor_map_get_tensor"
,
INFRT_KERNEL
(
TensorMapGetTensor
));
registry
->
AddKernel
(
"dt.tensor_map_get_size"
,
INFRT_KERNEL
(
TensorMapGetSize
));
registry
->
AddKernel
(
"dt.shallow_copy_tensor"
,
INFRT_KERNEL
(
ShallowCopyTensor
));
// Naive kernels.
registry
->
AddKernel
(
"dt.naive_elementwise_add.f32"
,
INFRT_KERNEL
(
NaiveElementwiseAdd
<
float
>
));
registry
->
AddKernel
(
"dt.naive_matmul.f32"
,
INFRT_KERNEL
(
NaiveMatmul
<
float
>
));
}
}
// namespace kernel
...
...
paddle/infrt/tests/.gitignore
0 → 100644
浏览文件 @
ed0990e7
.DS_Store
.idea
*.log
tmp/
Output
paddle/infrt/tests/CMakeLists.txt
浏览文件 @
ed0990e7
...
...
@@ -2,3 +2,5 @@ configure_file(lit.cfg.py.in "${CMAKE_SOURCE_DIR}/paddle/infrt/tests/lit.cfg.py"
add_test
(
NAME test_infrt_by_lit COMMAND sh -c
"lit -v
${
CMAKE_SOURCE_DIR
}
/paddle/infrt/tests --filter-out
\"
disabled_*
\"
"
DEPENDS infrtopt infrtexec
)
configure_file
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/dialect/tensor/tensor_map.mlir.in
${
CMAKE_CURRENT_SOURCE_DIR
}
/dialect/tensor/tensor_map.mlir
)
paddle/infrt/tests/dialect/tensor/.gitignore
0 → 100644
浏览文件 @
ed0990e7
.DS_Store
.idea
*.log
tmp/
tensor_map.mlir
paddle/infrt/tests/dialect/tensor/dense_tensor.mlir
0 → 100644
浏览文件 @
ed0990e7
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: dense_shape0
func @dense_shape0() {
%shape = ts.build_shape [1:i64, 57:i64]
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.tensor<X86, NCHW, F32>
infrt.return
}
func @predict(%a: !infrt.tensor<X86, NCHW, F32>, %b: !infrt.tensor<X86, NCHW, F32>) -> (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>) {
%a0 = dt.shallow_copy_tensor %a : !infrt.tensor<X86, NCHW, F32> -> !infrt.tensor<X86, NCHW, F32>
%b0 = dt.shallow_copy_tensor %b : !infrt.tensor<X86, NCHW, F32> -> !infrt.tensor<X86, NCHW, F32>
infrt.return %a0, %b0: !infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>
}
func @main() {
%shape = ts.build_shape [1:i64, 57:i64]
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.tensor<X86, NCHW, F32>
%b, %c = infrt.call @predict(%a, %a) : (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>) -> (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>)
infrt.return
}
paddle/infrt/tests/dialect/tensor/naive_kernels.mlir
0 → 100644
浏览文件 @
ed0990e7
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: naive_elementwise_add
func @naive_elementwise_add() {
// create a
%a = dt.create_uninit_tensor.f32 [2:i64, 8:i64] -> !infrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%a : !infrt.tensor<X86, NCHW, F32>) {value=1.0:f32}
// create b
%b = dt.create_uninit_tensor.f32 [2:i64, 8:i64] -> !infrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%b : !infrt.tensor<X86, NCHW, F32>) {value=2.0:f32}
// get c
%c = dt.naive_elementwise_add.f32(%a, %b) {} : (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>) -> !infrt.tensor<X86, NCHW, F32>
// CHECK: tensor: shape=shape[2,8], values=[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
dt.print_tensor (%c : !infrt.tensor<X86, NCHW, F32>)
infrt.return
}
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: naive_matmul
func @naive_matmul() {
// create a
%a = dt.create_uninit_tensor.f32 [2:i64, 8:i64] -> !infrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%a : !infrt.tensor<X86, NCHW, F32>) {value=1.0:f32}
// create b
%b = dt.create_uninit_tensor.f32 [8:i64, 4:i64] -> !infrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%b : !infrt.tensor<X86, NCHW, F32>) {value=2.0:f32}
// get c
%c = dt.naive_matmul.f32(%a, %b) {} : (!infrt.tensor<X86, NCHW, F32>, !infrt.tensor<X86, NCHW, F32>) -> !infrt.tensor<X86, NCHW, F32>
// CHECK: tensor: shape=shape[2,4], values=[16, 16, 16, 16, 16, 16, 16, 16]
dt.print_tensor (%c : !infrt.tensor<X86, NCHW, F32>)
infrt.return
}
paddle/infrt/tests/dialect/tensor/tensor_map.mlir.in
0 → 100644
浏览文件 @
ed0990e7
// RUN: infrtexec -i %s | FileCheck %s
func @load_tensor_map() {
%path = infrt.get_string("@CMAKE_BINARY_DIR@/multi_fc_model")
%map = dt.load_params(%path)
%size = dt.tensor_map_get_size(%map) -> i32
infrt.print.i32 %size
%tensor_name = infrt.get_string("fc_bias")
%a = dt.tensor_map_get_tensor(%map, %tensor_name) -> !infrt.tensor<X86, NCHW, F32>
// CHECK: tensor: shape=shape[2], values=[0, 0]
dt.print_tensor (%a : !infrt.tensor<X86, NCHW, F32>)
infrt.return
}
paddle/infrt/tests/dialect/tensor/tensor_shape.mlir
0 → 100644
浏览文件 @
ed0990e7
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: @build_tensor1
func @build_tensor1() {
%a = ts.build_shape [1:i64, 57:i64, 92:i64]
// CHECK: shape[1,57,92]
ts.print_shape %a
infrt.return
}
paddle/infrt/tests/dialect/tensor/tensor_type.mlir
0 → 100644
浏览文件 @
ed0990e7
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: test_tensor_type
func @test_tensor_type() {
%a = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.tensor<X86, NCHW, F32>
dt.fill_tensor_with_constant.f32 (%a : !infrt.tensor<X86, NCHW, F32>) {value=1.0:f32}
// CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
dt.print_tensor (%a : !infrt.tensor<X86, NCHW, F32>)
infrt.return
}
paddle/scripts/infrt_build.sh
浏览文件 @
ed0990e7
...
...
@@ -100,7 +100,17 @@ function infrt_gen_and_build() {
echo
"ipipe_log_param_Infrt_Build_Time:
$[
$endTime_s
-
$startTime_s
]s"
>>
${
PADDLE_ROOT
}
/build/infrt_summary.txt
}
function
create_fake_models
()
{
cd
${
PADDLE_ROOT
}
/build
# create multi_fc model, this will generate "multi_fc_model"
python3
-m
pip uninstall
-y
paddlepaddle
python3
-m
pip
install
paddlepaddle
python3
${
PADDLE_ROOT
}
/tools/infrt/fake_models/multi_fc.py
}
function
test_infrt
()
{
create_fake_models
# install llvm-lit toolkit
python3
-m
pip
install
lit
...
...
tools/infrt/fake_models/multi_fc.py
0 → 100644
浏览文件 @
ed0990e7
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A fake model with multiple FC layers to test CINN on a more complex model.
"""
import
numpy
import
sys
,
os
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.backward
import
append_backward
size
=
2
num_layers
=
4
paddle
.
enable_static
()
a
=
fluid
.
layers
.
data
(
name
=
"A"
,
shape
=
[
-
1
,
size
],
dtype
=
'float32'
)
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
shape
=
[
size
],
dtype
=
'float32'
)
fc_out
=
fluid
.
layers
.
fc
(
input
=
a
,
size
=
size
,
act
=
"relu"
,
bias_attr
=
fluid
.
ParamAttr
(
name
=
"fc_bias"
),
num_flatten_dims
=
1
)
for
i
in
range
(
num_layers
-
1
):
fc_out
=
fluid
.
layers
.
fc
(
input
=
fc_out
,
size
=
size
,
act
=
"relu"
,
bias_attr
=
fluid
.
ParamAttr
(
name
=
"fc_bias"
),
num_flatten_dims
=
1
)
cost
=
fluid
.
layers
.
square_error_cost
(
fc_out
,
label
)
avg_cost
=
fluid
.
layers
.
mean
(
cost
)
optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
optimizer
.
minimize
(
avg_cost
)
cpu
=
fluid
.
core
.
CPUPlace
()
loss
=
exe
=
fluid
.
Executor
(
cpu
)
exe
.
run
(
fluid
.
default_startup_program
())
fluid
.
io
.
save_inference_model
(
"./multi_fc_model"
,
[
a
.
name
],
[
fc_out
],
exe
)
print
(
'output name'
,
fc_out
.
name
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录