Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
be523baa
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
be523baa
编写于
3月 01, 2019
作者:
F
flame
提交者:
root
3月 20, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add anakin conv2d/relu/sigmoid/tanh converter (#15997)
* add activation op * test conv2d relu sigmoid tanh
上级
d0ce6a90
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
301 addition
and
40 deletion
+301
-40
paddle/fluid/inference/anakin/convert/CMakeLists.txt
paddle/fluid/inference/anakin/convert/CMakeLists.txt
+3
-1
paddle/fluid/inference/anakin/convert/activation.cc
paddle/fluid/inference/anakin/convert/activation.cc
+59
-0
paddle/fluid/inference/anakin/convert/activation.h
paddle/fluid/inference/anakin/convert/activation.h
+23
-24
paddle/fluid/inference/anakin/convert/conv2d.cc
paddle/fluid/inference/anakin/convert/conv2d.cc
+87
-0
paddle/fluid/inference/anakin/convert/conv2d.h
paddle/fluid/inference/anakin/convert/conv2d.h
+11
-10
paddle/fluid/inference/anakin/convert/fc.cc
paddle/fluid/inference/anakin/convert/fc.cc
+0
-4
paddle/fluid/inference/anakin/convert/op_converter.h
paddle/fluid/inference/anakin/convert/op_converter.h
+0
-1
paddle/fluid/inference/anakin/convert/test_activation_op.cc
paddle/fluid/inference/anakin/convert/test_activation_op.cc
+56
-0
paddle/fluid/inference/anakin/convert/test_conv2d_op.cc
paddle/fluid/inference/anakin/convert/test_conv2d_op.cc
+62
-0
未找到文件。
paddle/fluid/inference/anakin/convert/CMakeLists.txt
浏览文件 @
be523baa
cc_library
(
anakin_op_converter SRCS fc.cc
registrar.cc DEPS anakin_engine framework_proto scope
)
cc_library
(
anakin_op_converter SRCS fc.cc
conv2d.cc activation.cc DEPS anakin_engine framework_proto scope operator op_registry
)
cc_test
(
test_anakin_fc SRCS test_fc_op.cc DEPS anakin_op_converter mul_op
)
cc_test
(
test_anakin_fc SRCS test_fc_op.cc DEPS anakin_op_converter mul_op
)
cc_test
(
test_anakin_conv2d SRCS test_conv2d_op.cc DEPS
${
FLUID_CORE_MODULES
}
${
GLOB_OPERATOR_DEPS
}
anakin_op_converter conv_op im2col vol2col depthwise_conv SERIAL
)
cc_test
(
test_anakin_activation SRCS test_activation_op.cc DEPS
${
FLUID_CORE_MODULES
}
${
GLOB_OPERATOR_DEPS
}
activation_op anakin_op_converter SERIAL
)
paddle/fluid/inference/anakin/convert/activation.cc
0 → 100644
浏览文件 @
be523baa
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/anakin/convert/activation.h"
#include <algorithm>
#include <map>
using
anakin
::
graph
::
GraphGlobalMem
;
using
anakin
::
AK_FLOAT
;
using
anakin
::
saber
::
NV
;
using
anakin
::
saber
::
Shape
;
namespace
paddle
{
namespace
inference
{
namespace
anakin
{
ActivationOpConverter
::
ActivationOpConverter
(
const
std
::
string
&
op_type
)
:
op_type_
(
op_type
)
{
auto
it
=
anakin_ops_type_
.
find
(
op_type_
);
PADDLE_ENFORCE
(
it
!=
anakin_ops_type_
.
end
(),
"activation op type is not support"
);
anakin_op_type_
=
it
->
second
;
}
void
ActivationOpConverter
::
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
const
framework
::
Scope
&
scope
,
bool
test_mode
)
{
framework
::
OpDesc
op_desc
(
op
,
nullptr
);
PADDLE_ENFORCE_EQ
(
op_desc
.
Input
(
"X"
).
size
(),
1
);
PADDLE_ENFORCE_EQ
(
op_desc
.
Output
(
"Out"
).
size
(),
1
);
auto
op_name
=
op_desc
.
Type
()
+
":"
+
op_desc
.
Output
(
"Out"
).
front
();
auto
input_name
=
op_desc
.
Input
(
"X"
).
front
();
auto
output_name
=
op_desc
.
Output
(
"Out"
).
front
();
engine_
->
AddOp
(
op_name
,
"Activation"
,
{
input_name
},
{
output_name
});
engine_
->
AddOpAttr
(
op_name
,
"type"
,
anakin_op_type_
);
if
(
op_type_
==
"relu"
)
{
engine_
->
AddOpAttr
(
op_name
,
"alpha"
,
0
);
}
}
}
// namespace anakin
}
// namespace inference
}
// namespace paddle
REGISTER_ANAKIN_OP_CONVERTER
(
relu
,
ReluOpConverter
);
REGISTER_ANAKIN_OP_CONVERTER
(
sigmoid
,
SigmoidOpConverter
);
REGISTER_ANAKIN_OP_CONVERTER
(
tanh
,
TanhOpConverter
);
paddle/fluid/inference/anakin/convert/
registrar
.h
→
paddle/fluid/inference/anakin/convert/
activation
.h
浏览文件 @
be523baa
...
@@ -14,45 +14,44 @@
...
@@ -14,45 +14,44 @@
#pragma once
#pragma once
#include <functional>
#include <map>
#include <map>
#include <memory>
#include <string>
#include <string>
#include
<utility>
#include
"paddle/fluid/inference/anakin/convert/op_converter.h"
namespace
paddle
{
namespace
paddle
{
namespace
inference
{
namespace
inference
{
namespace
anakin
{
namespace
anakin
{
class
AnakinOpConverter
;
class
ActivationOpConverter
:
public
AnakinOpConverter
{
class
OpRegister
{
public:
public:
OpRegister
()
=
default
;
explicit
ActivationOpConverter
(
const
std
::
string
&
op_type
);
std
::
shared_ptr
<
AnakinOpConverter
>
Get
(
const
std
::
string
&
name
);
static
OpRegister
*
instance
();
virtual
void
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
void
OpRegisterFn
(
const
std
::
string
&
name
,
const
framework
::
Scope
&
scope
,
std
::
function
<
std
::
shared_ptr
<
AnakinOpConverter
>
()
>
fn
)
{
bool
test_mode
)
override
;
registry_
[
name
]
=
fn
;
virtual
~
ActivationOpConverter
()
{}
}
private:
private:
using
RegisterFnType
=
std
::
function
<
std
::
shared_ptr
<
AnakinOpConverter
>
()
>
;
std
::
string
op_type_
;
std
::
map
<
std
::
string
,
std
::
function
<
std
::
shared_ptr
<
AnakinOpConverter
>
()
>>
std
::
string
anakin_op_type_
;
registry_
;
std
::
map
<
std
::
string
,
std
::
string
>
anakin_ops_type_
{
{
"relu"
,
"Relu"
},
{
"tanh"
,
"TanH"
},
{
"sigmoid"
,
"Sigmoid"
}};
};
};
template
<
typename
T
,
typename
...
Args
>
class
ReluOpConverter
:
public
ActivationOpConverter
{
class
Registrar
{
public:
public:
Registrar
(
const
std
::
string
&
name
,
Args
...
args
)
{
ReluOpConverter
()
:
ActivationOpConverter
(
"relu"
)
{}
std
::
shared_ptr
<
AnakinOpConverter
>
converter
=
std
::
make_shared
<
T
>
(
std
::
move
(
args
)...);
OpRegister
::
instance
()
->
OpRegisterFn
(
name
,
[
converter
]()
{
return
converter
;
});
}
};
};
class
TanhOpConverter
:
public
ActivationOpConverter
{
public:
TanhOpConverter
()
:
ActivationOpConverter
(
"tanh"
)
{}
};
class
SigmoidOpConverter
:
public
ActivationOpConverter
{
public:
SigmoidOpConverter
()
:
ActivationOpConverter
(
"tanh"
)
{}
};
}
// namespace anakin
}
// namespace anakin
}
// namespace inference
}
// namespace inference
}
// namespace paddle
}
// namespace paddle
paddle/fluid/inference/anakin/convert/conv2d.cc
0 → 100644
浏览文件 @
be523baa
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/inference/anakin/convert/conv2d.h"
#include <algorithm>
#include <memory>
#include <vector>
using
anakin
::
graph
::
GraphGlobalMem
;
using
anakin
::
AK_FLOAT
;
using
anakin
::
saber
::
NV
;
using
anakin
::
saber
::
Shape
;
using
anakin
::
PTuple
;
namespace
paddle
{
namespace
inference
{
namespace
anakin
{
void
Conv2dOpConverter
::
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
const
framework
::
Scope
&
scope
,
bool
test_mode
)
{
framework
::
OpDesc
op_desc
(
op
,
nullptr
);
PADDLE_ENFORCE_EQ
(
op_desc
.
Input
(
"Input"
).
size
(),
1UL
);
PADDLE_ENFORCE_EQ
(
op_desc
.
Input
(
"Filter"
).
size
(),
1UL
);
PADDLE_ENFORCE_EQ
(
op_desc
.
Output
(
"Output"
).
size
(),
1UL
);
auto
input_name
=
op_desc
.
Input
(
"Input"
).
front
();
auto
output_name
=
op_desc
.
Output
(
"Output"
).
front
();
auto
op_name
=
op_desc
.
Type
()
+
":"
+
op_desc
.
Output
(
"Output"
).
front
();
engine_
->
AddOp
(
op_name
,
"Convolution"
,
{
input_name
},
{
output_name
});
auto
*
filter_v
=
scope
.
FindVar
(
op_desc
.
Input
(
"Filter"
).
front
());
PADDLE_ENFORCE_NOT_NULL
(
filter_v
);
auto
*
filter_t
=
filter_v
->
GetMutable
<
framework
::
LoDTensor
>
();
std
::
unique_ptr
<
framework
::
LoDTensor
>
weight_tensor
(
new
framework
::
LoDTensor
());
weight_tensor
->
Resize
(
filter_t
->
dims
());
TensorCopySync
((
*
filter_t
),
platform
::
CPUPlace
(),
weight_tensor
.
get
());
auto
*
weight_data
=
weight_tensor
->
mutable_data
<
float
>
(
platform
::
CPUPlace
());
PADDLE_ENFORCE_EQ
(
weight_tensor
->
dims
().
size
(),
4UL
);
// const int n_output = weight_tensor->dims()[0];
const
int
n_input
=
weight_tensor
->
dims
()[
1
];
const
int
filter_h
=
weight_tensor
->
dims
()[
2
];
const
int
filter_w
=
weight_tensor
->
dims
()[
3
];
auto
filter_num
=
n_input
*
filter_h
*
filter_w
;
engine_
->
AddOpAttr
<
int
>
(
op_name
,
"filter_num"
,
filter_num
);
engine_
->
AddOpAttr
<
PTuple
<
int
>>
(
op_name
,
"kernel_size"
,
{
filter_h
,
filter_w
});
auto
strides
=
boost
::
get
<
std
::
vector
<
int
>>
(
op_desc
.
GetAttr
(
"strides"
));
engine_
->
AddOpAttr
<
PTuple
<
int
>>
(
op_name
,
"strides"
,
strides
);
auto
paddings
=
boost
::
get
<
std
::
vector
<
int
>>
(
op_desc
.
GetAttr
(
"paddings"
));
engine_
->
AddOpAttr
<
PTuple
<
int
>>
(
op_name
,
"padding"
,
paddings
);
auto
dilations
=
boost
::
get
<
std
::
vector
<
int
>>
(
op_desc
.
GetAttr
(
"dilations"
));
engine_
->
AddOpAttr
<
PTuple
<
int
>>
(
op_name
,
"dilation_rate"
,
dilations
);
const
int
groups
=
boost
::
get
<
int
>
(
op_desc
.
GetAttr
(
"groups"
));
engine_
->
AddOpAttr
(
op_name
,
"group"
,
groups
);
engine_
->
AddOpAttr
(
op_name
,
"axis"
,
1
);
engine_
->
AddOpAttr
(
op_name
,
"bias_term"
,
false
);
auto
weight_shape
=
framework
::
vectorize2int
(
filter_t
->
dims
());
Shape
anakin_shape
(
weight_shape
);
auto
*
weight1
=
GraphGlobalMem
<
NV
>::
Global
().
template
new_block
<
AK_FLOAT
>(
anakin_shape
);
float
*
cpu_data
=
static_cast
<
float
*>
(
weight1
->
h_tensor
().
mutable_data
());
std
::
copy_n
(
weight_tensor
->
data
<
float
>
(),
weight_tensor
->
numel
(),
cpu_data
);
weight1
->
d_tensor
().
set_shape
(
anakin_shape
);
weight1
->
d_tensor
().
copy_from
(
weight1
->
h_tensor
());
engine_
->
AddOpAttr
(
op_name
,
"weight_1"
,
*
weight1
);
}
}
// namespace anakin
}
// namespace inference
}
// namespace paddle
REGISTER_ANAKIN_OP_CONVERTER
(
conv2d
,
Conv2dOpConverter
);
paddle/fluid/inference/anakin/convert/
registrar.cc
→
paddle/fluid/inference/anakin/convert/
conv2d.h
浏览文件 @
be523baa
...
@@ -12,22 +12,23 @@
...
@@ -12,22 +12,23 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include "paddle/fluid/inference/anakin/convert/registrar.h"
#pragma once
#include "paddle/fluid/inference/anakin/convert/op_converter.h"
namespace
paddle
{
namespace
paddle
{
namespace
inference
{
namespace
inference
{
namespace
anakin
{
namespace
anakin
{
std
::
shared_ptr
<
AnakinOpConverter
>
OpRegister
::
Get
(
const
std
::
string
&
name
)
{
class
Conv2dOpConverter
:
public
AnakinOpConverter
{
auto
it
=
registry_
.
find
(
name
);
public:
if
(
it
==
registry_
.
end
())
return
nullptr
;
Conv2dOpConverter
()
=
default
;
return
it
->
second
();
}
OpRegister
*
OpRegister
::
instance
()
{
virtual
void
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
static
OpRegister
factory
;
const
framework
::
Scope
&
scope
,
return
&
factory
;
bool
test_mode
)
override
;
}
virtual
~
Conv2dOpConverter
()
{}
};
}
// namespace anakin
}
// namespace anakin
}
// namespace inference
}
// namespace inference
...
...
paddle/fluid/inference/anakin/convert/fc.cc
浏览文件 @
be523baa
...
@@ -17,12 +17,8 @@
...
@@ -17,12 +17,8 @@
using
anakin
::
graph
::
GraphGlobalMem
;
using
anakin
::
graph
::
GraphGlobalMem
;
using
anakin
::
AK_FLOAT
;
using
anakin
::
AK_FLOAT
;
using
anakin
::
Precision
;
using
anakin
::
saber
::
NV
;
using
anakin
::
saber
::
NV
;
using
anakin
::
saber
::
X86
;
using
anakin
::
saber
::
Shape
;
using
anakin
::
saber
::
Shape
;
using
anakin
::
PBlock
;
using
anakin
::
PTuple
;
namespace
paddle
{
namespace
paddle
{
namespace
inference
{
namespace
inference
{
...
...
paddle/fluid/inference/anakin/convert/op_converter.h
浏览文件 @
be523baa
...
@@ -22,7 +22,6 @@
...
@@ -22,7 +22,6 @@
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/inference/anakin/convert/registrar.h"
#include "paddle/fluid/inference/anakin/engine.h"
#include "paddle/fluid/inference/anakin/engine.h"
#include "paddle/fluid/inference/utils/singleton.h"
#include "paddle/fluid/inference/utils/singleton.h"
#include "saber/saber_types.h"
#include "saber/saber_types.h"
...
...
paddle/fluid/inference/anakin/convert/test_activation_op.cc
0 → 100644
浏览文件 @
be523baa
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "paddle/fluid/inference/anakin/convert/activation.h"
#include "paddle/fluid/inference/anakin/convert/op_converter.h"
#include "paddle/fluid/inference/anakin/convert/ut_helper.h"
namespace
paddle
{
namespace
inference
{
namespace
anakin
{
static
void
test_activation_op
(
const
std
::
string
&
op_type
)
{
auto
*
converter
=
Registry
<
AnakinOpConverter
>::
Global
().
Lookup
(
op_type
);
PADDLE_ENFORCE
(
converter
!=
nullptr
);
std
::
unordered_set
<
std
::
string
>
parameters
;
framework
::
Scope
scope
;
AnakinConvertValidation
validator
(
parameters
,
scope
);
validator
.
DeclInputVar
(
"act-X"
,
{
10
,
6
,
1
,
1
});
validator
.
DeclOutputVar
(
"act-Out"
,
{
10
,
6
,
1
,
1
});
framework
::
OpDesc
desc
;
desc
.
SetType
(
op_type
);
desc
.
SetInput
(
"X"
,
{
"act-X"
});
desc
.
SetOutput
(
"Out"
,
{
"act-Out"
});
LOG
(
INFO
)
<<
"set OP"
;
validator
.
SetOp
(
*
desc
.
Proto
());
LOG
(
INFO
)
<<
"execute"
;
validator
.
Execute
(
5
);
}
TEST
(
relu_op
,
test
)
{
test_activation_op
(
"relu"
);
}
TEST
(
sigm_op
,
test
)
{
test_activation_op
(
"sigmoid"
);
}
TEST
(
tanh_op
,
test
)
{
test_activation_op
(
"tanh"
);
}
}
// namespace anakin
}
// namespace inference
}
// namespace paddle
USE_OP
(
relu
);
USE_OP
(
sigmoid
);
USE_OP
(
tanh
);
USE_ANAKIN_CONVERTER
(
relu
);
USE_ANAKIN_CONVERTER
(
sigmoid
);
USE_ANAKIN_CONVERTER
(
tanh
);
paddle/fluid/inference/anakin/convert/test_conv2d_op.cc
0 → 100644
浏览文件 @
be523baa
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "paddle/fluid/inference/anakin/convert/conv2d.h"
#include "paddle/fluid/inference/anakin/convert/op_converter.h"
#include "paddle/fluid/inference/anakin/convert/ut_helper.h"
namespace
paddle
{
namespace
inference
{
namespace
anakin
{
TEST
(
conv2d_op
,
test
)
{
auto
*
conv2d_converter
=
Registry
<
AnakinOpConverter
>::
Global
().
Lookup
(
"conv2d"
);
ASSERT_TRUE
(
conv2d_converter
!=
nullptr
);
std
::
unordered_set
<
std
::
string
>
parameters
({
"conv2d-Y"
});
framework
::
Scope
scope
;
AnakinConvertValidation
validator
(
parameters
,
scope
);
validator
.
DeclInputVar
(
"conv2d-X"
,
{
1
,
2
,
5
,
5
});
validator
.
DeclParamVar
(
"conv2d-Y"
,
{
3
,
2
,
3
,
3
});
validator
.
DeclOutputVar
(
"conv2d-Out"
,
{
1
,
3
,
5
,
5
});
// Prepare Op description
framework
::
OpDesc
desc
;
desc
.
SetType
(
"conv2d"
);
desc
.
SetInput
(
"Input"
,
{
"conv2d-X"
});
desc
.
SetInput
(
"Filter"
,
{
"conv2d-Y"
});
desc
.
SetOutput
(
"Output"
,
{
"conv2d-Out"
});
const
std
::
vector
<
int
>
strides
({
1
,
1
});
const
std
::
vector
<
int
>
paddings
({
1
,
1
});
const
std
::
vector
<
int
>
dilations
({
1
,
1
});
const
int
groups
=
1
;
desc
.
SetAttr
(
"strides"
,
strides
);
desc
.
SetAttr
(
"paddings"
,
paddings
);
desc
.
SetAttr
(
"dilations"
,
dilations
);
desc
.
SetAttr
(
"groups"
,
groups
);
validator
.
SetOp
(
*
desc
.
Proto
());
validator
.
Execute
(
3
);
}
}
// namespace anakin
}
// namespace inference
}
// namespace paddle
USE_OP
(
conv2d
);
USE_ANAKIN_CONVERTER
(
conv2d
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录