Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
49f2a5c8
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
49f2a5c8
编写于
7月 26, 2018
作者:
Z
zhangyang0701
提交者:
GitHub
7月 26, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #638 from zhangyang0701/develop
add new param definitions for FPGA ops
上级
7b70a56a
4214f32c
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
169 addition
and
8 deletion
+169
-8
src/operators/op_param.h
src/operators/op_param.h
+169
-8
未找到文件。
src/operators/op_param.h
浏览文件 @
49f2a5c8
...
...
@@ -232,7 +232,6 @@ class ConvParam : OpParam {
Print
&
operator
<<
(
Print
&
printer
,
const
ConvParam
&
conv_param
);
#endif
#ifdef ELEMENTWISEADD_OP
class
ElementwiseAddParam
:
OpParam
{
public:
ElementwiseAddParam
(
const
VariableNameMap
&
inputs
,
...
...
@@ -259,6 +258,8 @@ class ElementwiseAddParam : OpParam {
int
axis_
;
};
#ifdef FUSION_ELEMENTWISEADDRELU_OP
using
ElementwiseAddReluParam
=
ElementwiseAddParam
;
#endif
#ifdef MUL_OP
...
...
@@ -421,7 +422,7 @@ class PoolParam : public OpParam {
strides_
=
GetAttr
<
vector
<
int
>>
(
"strides"
,
attrs
);
paddings_
=
GetAttr
<
vector
<
int
>>
(
"paddings"
,
attrs
);
ceil_mode_
=
GetAttr
<
bool
>
(
"ceil_mode"
,
attrs
);
glo
a
bal_pooling_
=
GetAttr
<
bool
>
(
"global_pooling"
,
attrs
);
global_pooling_
=
GetAttr
<
bool
>
(
"global_pooling"
,
attrs
);
}
const
Tensor
*
Input
()
const
{
return
input_
;
}
...
...
@@ -438,7 +439,7 @@ class PoolParam : public OpParam {
bool
isCeilMode
()
const
{
return
ceil_mode_
;
}
bool
isGlobalPooling
()
const
{
return
glo
a
bal_pooling_
;
}
bool
isGlobalPooling
()
const
{
return
global_pooling_
;
}
private:
Tensor
*
input_
;
...
...
@@ -448,9 +449,82 @@ class PoolParam : public OpParam {
vector
<
int
>
strides_
;
vector
<
int
>
paddings_
;
bool
ceil_mode_
;
bool
glo
a
bal_pooling_
=
false
;
bool
global_pooling_
=
false
;
};
#endif
#ifdef FUSION_POOLBN_OP
class
FusionPoolBNParam
:
OpParam
{
public:
FusionPoolBNParam
(
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
AttributeMap
&
attrs
,
const
Scope
&
scope
)
{
input_
=
InputXFrom
<
LoDTensor
>
(
inputs
,
scope
);
pooling_type_
=
GetAttr
<
string
>
(
"pooling_type"
,
attrs
);
ksize_
=
GetAttr
<
vector
<
int
>>
(
"ksize"
,
attrs
);
strides_
=
GetAttr
<
vector
<
int
>>
(
"strides"
,
attrs
);
paddings_
=
GetAttr
<
vector
<
int
>>
(
"paddings"
,
attrs
);
ceil_mode_
=
GetAttr
<
bool
>
(
"ceil_mode"
,
attrs
);
global_pooling_
=
GetAttr
<
bool
>
(
"global_pooling"
,
attrs
);
output_y_
=
OutputYFrom
<
LoDTensor
>
(
outputs
,
scope
);
input_bias_
=
InputBiasFrom
<
LoDTensor
>
(
inputs
,
scope
);
input_mean_
=
InputMeanFrom
<
LoDTensor
>
(
inputs
,
scope
);
input_scale_
=
InputScaleFrom
<
LoDTensor
>
(
inputs
,
scope
);
input_variance_
=
InputVarianceFrom
<
LoDTensor
>
(
inputs
,
scope
);
epsilon_
=
GetAttr
<
float
>
(
"epsilon"
,
attrs
);
momentum_
=
GetAttr
<
float
>
(
"momentum"
,
attrs
);
// is_test_ = GetAttr<bool>("is_test", attrs);
}
const
Tensor
*
Input
()
const
{
return
input_
;
}
const
string
&
PoolingType
()
const
{
return
pooling_type_
;
}
const
vector
<
int
>
&
Ksize
()
const
{
return
ksize_
;
}
const
vector
<
int
>
&
Strides
()
const
{
return
strides_
;
}
const
vector
<
int
>
&
Paddings
()
const
{
return
paddings_
;
}
bool
isCeilMode
()
const
{
return
ceil_mode_
;
}
bool
isGlobalPooling
()
const
{
return
global_pooling_
;
}
Tensor
*
OutputY
()
const
{
return
output_y_
;
}
const
Tensor
*
InputBias
()
const
{
return
input_bias_
;
}
const
Tensor
*
InputMean
()
const
{
return
input_mean_
;
}
const
Tensor
*
InputScale
()
const
{
return
input_scale_
;
}
const
Tensor
*
InputVariance
()
const
{
return
input_variance_
;
}
const
float
&
Epsilon
()
const
{
return
epsilon_
;
}
const
float
&
Momentum
()
const
{
return
momentum_
;
}
const
bool
&
IsTest
()
const
{
return
is_test_
;
}
const
string
&
DataFormat
()
const
{
return
data_format_
;
}
private:
Tensor
*
input_
;
string
pooling_type_
;
vector
<
int
>
ksize_
;
vector
<
int
>
strides_
;
vector
<
int
>
paddings_
;
bool
ceil_mode_
;
bool
global_pooling_
=
false
;
Tensor
*
output_y_
;
Tensor
*
input_bias_
;
Tensor
*
input_mean_
;
Tensor
*
input_scale_
;
Tensor
*
input_variance_
;
float
epsilon_
;
float
momentum_
;
bool
is_test_
;
string
data_format_
;
};
#endif
#ifdef PRIORBOX_OP
...
...
@@ -875,7 +949,6 @@ class PReluParam : public OpParam {
};
#endif
#ifdef FUSION_FC_OP
class
FusionFcParam
:
public
OpParam
{
public:
FusionFcParam
(
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
...
...
@@ -911,9 +984,11 @@ class FusionFcParam : public OpParam {
int
y_num_col_dims_
;
int
axis_
;
};
#ifdef FUSION_FCRELU_OP
using
FusionFcReluParam
=
FusionFcParam
;
#endif
#ifdef FUSION_CONVADD_OP
class
FusionConvAddParam
:
public
OpParam
{
public:
FusionConvAddParam
(
const
VariableNameMap
&
inputs
,
...
...
@@ -960,7 +1035,6 @@ class FusionConvAddParam : public OpParam {
};
Print
&
operator
<<
(
Print
&
printer
,
const
FusionConvAddParam
&
conv_param
);
#endif
#ifdef FUSION_CONVADDRELU_OP
class
FusionConvAddReluParam
:
public
FusionConvAddParam
{
...
...
@@ -1055,8 +1129,91 @@ class FusionConvAddBNReluParam : public OpParam {
Tensor
*
new_bias_
;
Tensor
*
new_scale_
;
};
#endif
Print
&
operator
<<
(
Print
&
printer
,
const
FusionConvAddParam
&
conv_param
);
#ifdef FUSION_CONVADDBN_OP
class
FusionConvAddBNParam
:
public
OpParam
{
public:
FusionConvAddBNParam
(
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
AttributeMap
&
attrs
,
const
Scope
&
scope
)
{
bias_
=
InputYFrom
<
LoDTensor
>
(
inputs
,
scope
);
axis_
=
GetAttr
<
int
>
(
"axis"
,
attrs
);
filter_
=
FilterFrom
<
LoDTensor
>
(
inputs
,
scope
);
input_
=
InputFrom
<
LoDTensor
>
(
inputs
,
scope
);
output_y_
=
OutputYFrom
<
LoDTensor
>
(
outputs
,
scope
);
strides_
=
GetAttr
<
vector
<
int
>>
(
"strides"
,
attrs
);
paddings_
=
GetAttr
<
vector
<
int
>>
(
"paddings"
,
attrs
);
dilations_
=
GetAttr
<
vector
<
int
>>
(
"dilations"
,
attrs
);
groups
=
GetAttr
<
int
>
(
"groups"
,
attrs
);
input_bias_
=
InputBiasFrom
<
LoDTensor
>
(
inputs
,
scope
);
input_mean_
=
InputMeanFrom
<
LoDTensor
>
(
inputs
,
scope
);
input_scale_
=
InputScaleFrom
<
LoDTensor
>
(
inputs
,
scope
);
input_variance_
=
InputVarianceFrom
<
LoDTensor
>
(
inputs
,
scope
);
epsilon_
=
GetAttr
<
float
>
(
"epsilon"
,
attrs
);
momentum_
=
GetAttr
<
float
>
(
"momentum"
,
attrs
);
// is_test_ = GetAttr<bool>("is_test", attrs);
}
Tensor
*
Bias
()
const
{
return
bias_
;
}
const
int
&
Axis
()
const
{
return
axis_
;
}
const
Tensor
*
Input
()
const
{
return
input_
;
}
const
Tensor
*
Filter
()
const
{
return
filter_
;
}
Tensor
*
OutputY
()
const
{
return
output_y_
;
}
const
vector
<
int
>
&
Strides
()
const
{
return
strides_
;
}
const
vector
<
int
>
&
Paddings
()
const
{
return
paddings_
;
}
const
vector
<
int
>
&
Dilations
()
const
{
return
dilations_
;
}
const
int
&
Groups
()
const
{
return
groups
;
}
const
Tensor
*
InputBias
()
const
{
return
input_bias_
;
}
const
Tensor
*
InputMean
()
const
{
return
input_mean_
;
}
const
Tensor
*
InputScale
()
const
{
return
input_scale_
;
}
const
Tensor
*
InputVariance
()
const
{
return
input_variance_
;
}
const
float
&
Epsilon
()
const
{
return
epsilon_
;
}
const
float
&
Momentum
()
const
{
return
momentum_
;
}
const
bool
&
IsTest
()
const
{
return
is_test_
;
}
void
SetNewScale
(
Tensor
*
new_scale
)
{
new_scale_
=
new_scale
;
}
void
SetNewBias
(
Tensor
*
new_bias
)
{
new_bias_
=
new_bias
;
}
const
Tensor
*
NewScale
()
const
{
return
new_scale_
;
}
const
Tensor
*
NewBias
()
const
{
return
new_bias_
;
}
protected:
Tensor
*
bias_
;
int
axis_
;
Tensor
*
input_
;
Tensor
*
output_y_
;
Tensor
*
filter_
;
vector
<
int
>
strides_
;
vector
<
int
>
paddings_
;
vector
<
int
>
dilations_
;
int
groups
;
Tensor
*
input_bias_
;
Tensor
*
input_mean_
;
Tensor
*
input_scale_
;
Tensor
*
input_variance_
;
float
epsilon_
;
float
momentum_
;
bool
is_test_
;
Tensor
*
new_bias_
;
Tensor
*
new_scale_
;
};
#endif
#ifdef FUSION_DWCONVBNRELU_OP
...
...
@@ -1269,5 +1426,9 @@ class DropoutParam : public OpParam {
};
#endif
#ifdef REGION_OP
class
RegionParam
:
public
OpParam
{};
#endif
}
// namespace operators
}
// namespace paddle_mobile
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录