Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
4fdb49e8
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4fdb49e8
编写于
4月 11, 2019
作者:
S
superjomn
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
make fc model works
load_model add scale, mul Op
上级
8c7c13f1
变更
25
隐藏空白更改
内联
并排
Showing
25 changed file
with
512 addition
and
25 deletion
+512
-25
paddle/fluid/lite/CMakeLists.txt
paddle/fluid/lite/CMakeLists.txt
+1
-0
paddle/fluid/lite/api/CMakeLists.txt
paddle/fluid/lite/api/CMakeLists.txt
+3
-0
paddle/fluid/lite/api/cxx_api.cc
paddle/fluid/lite/api/cxx_api.cc
+19
-0
paddle/fluid/lite/api/cxx_api.h
paddle/fluid/lite/api/cxx_api.h
+20
-0
paddle/fluid/lite/api/cxx_api_test.cc
paddle/fluid/lite/api/cxx_api_test.cc
+49
-0
paddle/fluid/lite/core/executor.h
paddle/fluid/lite/core/executor.h
+1
-0
paddle/fluid/lite/core/kernel.h
paddle/fluid/lite/core/kernel.h
+3
-0
paddle/fluid/lite/core/op_lite.cc
paddle/fluid/lite/core/op_lite.cc
+4
-3
paddle/fluid/lite/core/op_lite.h
paddle/fluid/lite/core/op_lite.h
+2
-1
paddle/fluid/lite/core/tensor.cc
paddle/fluid/lite/core/tensor.cc
+2
-2
paddle/fluid/lite/core/types.h
paddle/fluid/lite/core/types.h
+15
-0
paddle/fluid/lite/kernels/host/CMakeLists.txt
paddle/fluid/lite/kernels/host/CMakeLists.txt
+6
-1
paddle/fluid/lite/kernels/host/mul_compute.cc
paddle/fluid/lite/kernels/host/mul_compute.cc
+70
-0
paddle/fluid/lite/kernels/host/scale_compute.cc
paddle/fluid/lite/kernels/host/scale_compute.cc
+54
-0
paddle/fluid/lite/model_parser/CMakeLists.txt
paddle/fluid/lite/model_parser/CMakeLists.txt
+1
-1
paddle/fluid/lite/model_parser/model_parser.cc
paddle/fluid/lite/model_parser/model_parser.cc
+15
-3
paddle/fluid/lite/model_parser/model_parser.h
paddle/fluid/lite/model_parser/model_parser.h
+3
-1
paddle/fluid/lite/model_parser/model_parser_test.cc
paddle/fluid/lite/model_parser/model_parser_test.cc
+16
-0
paddle/fluid/lite/operators/CMakeLists.txt
paddle/fluid/lite/operators/CMakeLists.txt
+7
-1
paddle/fluid/lite/operators/fc_op.h
paddle/fluid/lite/operators/fc_op.h
+0
-5
paddle/fluid/lite/operators/mul_op.cc
paddle/fluid/lite/operators/mul_op.cc
+58
-0
paddle/fluid/lite/operators/mul_op.h
paddle/fluid/lite/operators/mul_op.h
+66
-0
paddle/fluid/lite/operators/op_params.h
paddle/fluid/lite/operators/op_params.h
+22
-2
paddle/fluid/lite/operators/relu_op.h
paddle/fluid/lite/operators/relu_op.h
+0
-5
paddle/fluid/lite/operators/scale_op.cc
paddle/fluid/lite/operators/scale_op.cc
+75
-0
未找到文件。
paddle/fluid/lite/CMakeLists.txt
浏览文件 @
4fdb49e8
...
...
@@ -5,3 +5,4 @@ add_subdirectory(operators)
add_subdirectory
(
kernels
)
add_subdirectory
(
model_parser
)
add_subdirectory
(
utils
)
add_subdirectory
(
api
)
paddle/fluid/lite/api/CMakeLists.txt
0 → 100644
浏览文件 @
4fdb49e8
cc_library
(
cxx_api_lite SRCS cxx_api.h DEPS scope_lite executor_lite host_kernels ops_lite
)
cc_test
(
test_cxx_api_lite SRCS cxx_api_test.cc DEPS cxx_api_lite model_parser_lite
)
paddle/fluid/lite/api/cxx_api.cc
0 → 100644
浏览文件 @
4fdb49e8
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Created by chunwei on 19-4-11.
//
#include "paddle/fluid/lite/api/cxx_api.h"
paddle/fluid/lite/api/cxx_api.h
0 → 100644
浏览文件 @
4fdb49e8
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/lite/model_parser/model_parser.h"
namespace
paddle
{
namespace
lite
{}
// namespace lite
}
// namespace paddle
paddle/fluid/lite/api/cxx_api_test.cc
0 → 100644
浏览文件 @
4fdb49e8
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/lite/api/cxx_api.h"
#include <gtest/gtest.h>
#include "paddle/fluid/lite/core/executor.h"
#include "paddle/fluid/lite/core/op_registry.h"
namespace
paddle
{
namespace
lite
{
TEST
(
CXXApi
,
test
)
{
Scope
scope
;
framework
::
proto
::
ProgramDesc
prog
;
LoadModel
(
"/home/chunwei/project2/models/model2"
,
&
scope
,
&
prog
);
framework
::
ProgramDesc
prog_desc
(
prog
);
lite
::
Executor
executor
(
&
scope
,
{
OpLite
::
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)}});
auto
x
=
scope
.
Var
(
"a"
)
->
GetMutable
<
Tensor
>
();
x
->
Resize
({
100
,
100
});
x
->
mutable_data
<
float
>
();
executor
.
PrepareWorkspace
(
prog_desc
,
&
scope
);
executor
.
Build
(
prog_desc
);
executor
.
Run
();
}
}
// namespace lite
}
// namespace paddle
USE_LITE_OP
(
mul
);
USE_LITE_OP
(
fc
);
USE_LITE_OP
(
scale
);
USE_LITE_KERNEL
(
fc
,
kHost
,
kFloat
);
USE_LITE_KERNEL
(
mul
,
kHost
,
kFloat
);
USE_LITE_KERNEL
(
scale
,
kHost
,
kFloat
);
paddle/fluid/lite/core/executor.h
浏览文件 @
4fdb49e8
...
...
@@ -47,6 +47,7 @@ class Executor {
// Create operators.
for
(
auto
*
op_desc
:
program
.
Block
(
0
).
AllOps
())
{
auto
op_type
=
op_desc
->
Type
();
if
(
op_type
==
"feed"
||
op_type
==
"fetch"
)
continue
;
LOG
(
INFO
)
<<
"create Op ["
<<
op_type
<<
"]"
;
ops_
.
emplace_back
(
LiteOpRegistry
::
Global
().
Create
(
op_type
));
// pick initial kernel
...
...
paddle/fluid/lite/core/kernel.h
浏览文件 @
4fdb49e8
...
...
@@ -67,6 +67,9 @@ class OpKernel : public KernelBase {
void
Touch
()
{}
TargetType
target
()
const
override
{
return
Target
;
}
PrecisionType
precision
()
const
override
{
return
Precision
;
}
OpKernel
()
=
default
;
virtual
~
OpKernel
()
=
default
;
...
...
paddle/fluid/lite/core/op_lite.cc
浏览文件 @
4fdb49e8
...
...
@@ -20,13 +20,14 @@ namespace paddle {
namespace
lite
{
std
::
vector
<
std
::
unique_ptr
<
KernelBase
>>
OpLite
::
CreateKernels
(
const
std
::
vector
<
OpLite
::
Place
>
&
places
)
{
const
std
::
vector
<
OpLite
::
Place
>
&
places
,
const
std
::
string
&
kernel_type
)
{
std
::
vector
<
std
::
unique_ptr
<
KernelBase
>>
kernels
;
CHECK
(
!
op_type_
.
empty
())
<<
"op_type_ should be set first"
;
for
(
auto
place
:
places
)
{
kernels
.
emplace_back
(
KernelRegistry
::
Global
().
Create
(
op_type_
,
place
.
target
,
place
.
precision
));
kernels
.
emplace_back
(
KernelRegistry
::
Global
().
Create
(
(
kernel_type
.
empty
()
?
op_type_
:
kernel_type
),
place
.
target
,
place
.
precision
));
}
return
kernels
;
...
...
paddle/fluid/lite/core/op_lite.h
浏览文件 @
4fdb49e8
...
...
@@ -119,7 +119,8 @@ class OpLite : public Registry {
// Create all the kernels for the valid targets.
std
::
vector
<
std
::
unique_ptr
<
KernelBase
>>
CreateKernels
(
const
std
::
vector
<
Place
>
&
places
);
const
std
::
vector
<
OpLite
::
Place
>
&
places
,
const
std
::
string
&
kernel_type
=
""
);
protected:
std
::
unique_ptr
<
OpContext
>
op_context_
;
...
...
paddle/fluid/lite/core/tensor.cc
浏览文件 @
4fdb49e8
...
...
@@ -32,8 +32,8 @@ std::ostream &operator<<(std::ostream &os, const DDim &dims) {
}
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
Tensor
&
tensor
)
{
os
<<
"Tensor:"
<<
std
::
endl
;
os
<<
"dim: "
<<
tensor
.
dims
();
os
<<
"Tensor:"
<<
'\n'
;
os
<<
"dim: "
<<
tensor
.
dims
()
<<
'\n'
;
for
(
int
i
=
0
;
i
<
product
(
tensor
.
dims
());
i
++
)
{
os
<<
tensor
.
data
<
float
>
()[
i
]
<<
" "
;
}
...
...
paddle/fluid/lite/core/types.h
浏览文件 @
4fdb49e8
...
...
@@ -25,6 +25,21 @@ using any_context_t = variant<Context<TARGET(kX86)>, //
Context
<
TARGET
(
kCUDA
)
>
//
>
;
struct
dim2
{
int
x
{};
int
y
{};
dim2
(
int
x
,
int
y
)
:
x
(
x
),
y
(
y
)
{}
};
struct
dim3
{
int
x
{};
int
y
{};
int
z
{};
dim3
(
int
x
,
int
y
,
int
z
)
:
x
(
x
),
y
(
y
),
z
(
z
)
{}
};
}
// namespace core
}
// namespace lite
}
// namespace paddle
paddle/fluid/lite/kernels/host/CMakeLists.txt
浏览文件 @
4fdb49e8
cc_library
(
fc_compute_host SRCS fc_compute.cc DEPS tensor_lite
)
cc_library
(
relu_compute_host SRCS relu_compute.cc DEPS tensor_lite
)
cc_library
(
mul_compute_host SRCS mul_compute.cc DEPS tensor_lite
)
cc_library
(
scale_compute_host SRCS scale_compute.cc DEPS tensor_lite
)
cc_library
(
host_kernels DEPS
fc_compute_host
relu_compute_host
)
relu_compute_host
mul_compute_host
scale_compute_host
)
cc_test
(
test_fc_compute SRCS fc_compute_test.cc DEPS fc_compute_host fc_op_lite
)
paddle/fluid/lite/kernels/host/mul_compute.cc
0 → 100644
浏览文件 @
4fdb49e8
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <Eigen/Core>
#include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/core/types.h"
namespace
paddle
{
namespace
lite
{
namespace
kernels
{
namespace
host
{
template
<
typename
T
>
void
mul_compute_eigen
(
const
T
*
x
,
int
x_h
,
int
x_w
,
const
T
*
y
,
int
y_h
,
int
y_w
,
T
*
out
)
{
using
matrix_t
=
Eigen
::
Matrix
<
T
,
Eigen
::
Dynamic
,
Eigen
::
Dynamic
,
Eigen
::
RowMajor
>
;
Eigen
::
Map
<
const
matrix_t
>
X
(
x
,
x_h
,
x_w
);
Eigen
::
Map
<
const
matrix_t
>
Y
(
y
,
y_h
,
y_w
);
Eigen
::
Map
<
matrix_t
>
Out
(
out
,
x_h
,
y_w
);
Out
=
X
*
Y
;
}
class
MulCompute
:
public
OpKernel
<
TARGET
(
kHost
),
PRECISION
(
kFloat
)
>
{
public:
using
param_t
=
operators
::
MulParam
;
void
Run
()
override
{
auto
&
theparam
=
param
<
operators
::
MulParam
>
();
core
::
dim2
x_shape
(
{
product
(
theparam
.
x
->
dims
().
begin
(),
theparam
.
x
->
dims
().
begin
()
+
theparam
.
x_num_col_dims
),
product
(
theparam
.
x
->
dims
().
begin
()
+
theparam
.
x_num_col_dims
,
theparam
.
x
->
dims
().
end
())});
core
::
dim2
y_shape
(
{
product
(
theparam
.
y
->
dims
().
begin
(),
theparam
.
y
->
dims
().
begin
()
+
theparam
.
x_num_col_dims
),
product
(
theparam
.
y
->
dims
().
begin
()
+
theparam
.
x_num_col_dims
,
theparam
.
y
->
dims
().
end
())});
mul_compute_eigen
(
theparam
.
x
->
data
<
float
>
(),
x_shape
.
x
,
x_shape
.
y
,
//
theparam
.
y
->
data
<
float
>
(),
y_shape
.
x
,
y_shape
.
y
,
//
theparam
.
output
->
mutable_data
<
float
>
());
}
virtual
~
MulCompute
()
=
default
;
};
}
// namespace host
}
// namespace kernels
}
// namespace lite
}
// namespace paddle
REGISTER_LITE_KERNEL
(
mul
,
kHost
,
kFloat
,
paddle
::
lite
::
kernels
::
host
::
MulCompute
);
paddle/fluid/lite/kernels/host/scale_compute.cc
0 → 100644
浏览文件 @
4fdb49e8
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <Eigen/Core>
#include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/core/types.h"
namespace
paddle
{
namespace
lite
{
namespace
kernels
{
namespace
host
{
template
<
typename
T
>
void
scale_compute
(
const
T
*
x
,
T
*
out
,
int
size
,
float
scale
,
float
bias
,
bool
bias_before
)
{
if
(
bias_before
)
bias
*=
scale
;
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
out
[
i
]
=
x
[
i
]
*
scale
+
bias
;
}
}
class
ScaleCompute
:
public
OpKernel
<
TARGET
(
kHost
),
PRECISION
(
kFloat
)
>
{
public:
using
param_t
=
operators
::
MulParam
;
void
Run
()
override
{
auto
&
theparam
=
param
<
operators
::
ScaleParam
>
();
scale_compute
(
theparam
.
x
->
data
<
float
>
(),
theparam
.
x
->
mutable_data
<
float
>
(),
product
(
theparam
.
x
->
dims
()),
theparam
.
scale
,
theparam
.
bias
,
theparam
.
bias_after_scale
);
}
virtual
~
ScaleCompute
()
=
default
;
};
}
// namespace host
}
// namespace kernels
}
// namespace lite
}
// namespace paddle
REGISTER_LITE_KERNEL
(
scale
,
kHost
,
kFloat
,
paddle
::
lite
::
kernels
::
host
::
ScaleCompute
);
paddle/fluid/lite/model_parser/CMakeLists.txt
浏览文件 @
4fdb49e8
cc_library
(
model_parser_lite SRCS model_parser.cc DEPS variable_lite scope_lite
)
cc_library
(
model_parser_lite SRCS model_parser.cc DEPS variable_lite scope_lite
tensor_lite scope_lite
)
cc_library
(
runtime_lite SRCS runtime.cc
)
cc_test
(
test_model_parser_lite SRCS model_parser_test.cc DEPS model_parser_lite
)
paddle/fluid/lite/model_parser/model_parser.cc
浏览文件 @
4fdb49e8
...
...
@@ -138,15 +138,27 @@ void LoadParam(const std::string &path, Variable *out) {
LoadLoDTensor
(
fin
,
out
);
}
void
LoadModel
(
const
std
::
string
&
model_dir
,
Scope
*
scope
)
{
void
LoadModel
(
const
std
::
string
&
model_dir
,
Scope
*
scope
,
framework
::
proto
::
ProgramDesc
*
prog
)
{
const
std
::
string
prog_path
=
model_dir
+
"/__model__"
;
auto
prog
=
LoadProgram
(
prog_path
);
*
prog
=
*
LoadProgram
(
prog_path
);
auto
main_block
=
prog
->
blocks
(
0
);
for
(
auto
&
var
:
main_block
.
vars
())
{
if
(
var
.
name
()
==
"feed"
||
var
.
name
()
==
"fetch"
||
!
var
.
persistable
())
continue
;
std
::
string
file_path
=
model_dir
+
"/"
+
var
.
name
();
LOG
(
INFO
)
<<
"reading weight "
<<
var
.
name
();
std
::
ifstream
file
(
file_path
);
LoadLoDTensor
(
file
,
scope
->
Var
(
var
.
name
()));
switch
(
var
.
type
().
type
())
{
case
framework
::
proto
::
VarType_Type_LOD_TENSOR
:
LoadLoDTensor
(
file
,
scope
->
Var
(
var
.
name
()));
break
;
default:
CHECK
(
false
)
<<
"unknown weight type"
;
}
}
}
...
...
paddle/fluid/lite/model_parser/model_parser.h
浏览文件 @
4fdb49e8
...
...
@@ -19,6 +19,7 @@
#include <string>
#include <vector>
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/lite/core/scope.h"
#include "paddle/fluid/lite/core/tensor.h"
#include "paddle/fluid/lite/core/variable.h"
...
...
@@ -36,7 +37,8 @@ void LoadParams(const std::string& path);
void
LoadParam
(
const
std
::
string
&
path
,
Variable
*
out
);
// Read a model and files of parameters.
void
LoadModel
(
const
std
::
string
&
model_dir
);
void
LoadModel
(
const
std
::
string
&
model_dir
,
Scope
*
scope
,
framework
::
proto
::
ProgramDesc
*
prog
);
}
// namespace lite
}
// namespace paddle
paddle/fluid/lite/model_parser/model_parser_test.cc
浏览文件 @
4fdb49e8
...
...
@@ -14,6 +14,7 @@
#include "paddle/fluid/lite/model_parser/model_parser.h"
#include <gtest/gtest.h>
#include "paddle/fluid/lite/core/scope.h"
namespace
paddle
{
namespace
lite
{
...
...
@@ -23,5 +24,20 @@ TEST(ModelParser, LoadProgram) {
"/home/chunwei/project2/models/fc/fluid_checkpoint/__model__"
);
}
TEST
(
ModelParser
,
LoadParam
)
{
Scope
scope
;
auto
*
v
=
scope
.
Var
(
"xxx"
);
LoadParam
(
"/home/chunwei/project2/models/fc/fluid_checkpoint/b1"
,
v
);
const
auto
&
t
=
v
->
Get
<
Tensor
>
();
LOG
(
INFO
)
<<
"loaded
\n
"
;
LOG
(
INFO
)
<<
t
;
}
TEST
(
ModelParser
,
LoadModel
)
{
Scope
scope
;
framework
::
proto
::
ProgramDesc
prog
;
LoadModel
(
"/home/chunwei/project2/models/fc/fluid_checkpoint"
,
&
scope
,
&
prog
);
}
}
// namespace lite
}
// namespace paddle
paddle/fluid/lite/operators/CMakeLists.txt
浏览文件 @
4fdb49e8
cc_library
(
fc_op_lite SRCS fc_op.cc DEPS op_lite op_params_lite tensor_lite proto_desc
)
cc_library
(
relu_op_lite SRCS relu_op.cc DEPS op_lite
)
cc_library
(
mul_op_lite SRCS mul_op.cc DEPS op_lite
)
cc_library
(
scale_op_lite SRCS scale_op.cc DEPS op_lite
)
cc_library
(
op_params_lite SRCS op_params.cc DEPS tensor_lite
)
cc_library
(
ops_lite DEPS
fc_op_lite
relu_op_lite
)
relu_op_lite
mul_op_lite
scale_op_lite
)
cc_test
(
test_fc_op_lite SRCS fc_op_test.cc DEPS fc_op_lite fc_compute_host
)
paddle/fluid/lite/operators/fc_op.h
浏览文件 @
4fdb49e8
...
...
@@ -66,11 +66,6 @@ class FcOpLite : public OpLite {
std
::
string
DebugString
()
const
override
{
return
"fc"
;
}
void
StaticPickKernel
(
const
std
::
vector
<
Place
>
&
valid_targets
)
override
{
auto
kernels
=
CreateKernels
(
valid_targets
);
kernel_
=
std
::
move
(
kernels
.
front
());
}
private:
mutable
FcParam
param_
;
};
...
...
paddle/fluid/lite/operators/mul_op.cc
0 → 100644
浏览文件 @
4fdb49e8
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/lite/operators/mul_op.h"
#include "paddle/fluid/lite/core/op_registry.h"
namespace
paddle
{
namespace
lite
{
namespace
operators
{
bool
MulOpLite
::
CheckShape
()
const
{
CHECK_OR_FALSE
(
param_
.
x
);
CHECK_OR_FALSE
(
param_
.
y
);
CHECK_OR_FALSE
(
param_
.
output
);
// bias is optional.
const
auto
x_dims
=
param_
.
x
->
dims
();
const
auto
y_dims
=
param_
.
y
->
dims
();
CHECK_EQ_OR_FALSE
(
y_dims
.
size
(),
2UL
);
CHECK_GT_OR_FALSE
(
x_dims
.
size
(),
static_cast
<
size_t
>
(
param_
.
x_num_col_dims
));
return
true
;
}
bool
MulOpLite
::
InferShape
()
const
{
const
auto
x_dims
=
param_
.
x
->
dims
();
const
auto
y_dims
=
param_
.
y
->
dims
();
// Set output dims
std
::
vector
<
int64_t
>
out_dims
(
param_
.
x_num_col_dims
+
1
,
0
);
for
(
int
i
=
0
;
i
<
param_
.
x_num_col_dims
;
++
i
)
{
out_dims
[
i
]
=
x_dims
[
i
];
}
out_dims
.
back
()
=
y_dims
[
1
];
param_
.
output
->
Resize
(
out_dims
);
// share LoD
// param_.output->set_lod(param_.input->lod());
return
true
;
}
}
// namespace operators
}
// namespace lite
}
// namespace paddle
REGISTER_LITE_OP
(
mul
,
paddle
::
lite
::
operators
::
MulOpLite
);
paddle/fluid/lite/operators/mul_op.h
0 → 100644
浏览文件 @
4fdb49e8
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <vector>
#include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/op_lite.h"
#include "paddle/fluid/lite/core/scope.h"
#include "paddle/fluid/lite/core/tensor.h"
#include "paddle/fluid/lite/operators/op_params.h"
#include "paddle/fluid/lite/utils/all.h"
namespace
paddle
{
namespace
lite
{
namespace
operators
{
class
MulOpLite
:
public
OpLite
{
public:
MulOpLite
()
{}
explicit
MulOpLite
(
const
std
::
string
&
type
)
:
OpLite
(
type
)
{}
bool
CheckShape
()
const
override
;
bool
InferShape
()
const
override
;
// TODO(Superjomn) replace framework::OpDesc with a lite one.
bool
Attach
(
const
framework
::
OpDesc
&
op_desc
,
lite
::
Scope
*
scope
)
override
{
auto
input
=
op_desc
.
Input
(
"X"
).
front
();
auto
W
=
op_desc
.
Input
(
"Y"
).
front
();
auto
out
=
op_desc
.
Output
(
"Out"
).
front
();
param_
.
x
=
scope
->
FindVar
(
input
)
->
GetMutable
<
Tensor
>
();
param_
.
y
=
scope
->
FindVar
(
W
)
->
GetMutable
<
Tensor
>
();
CHECK
(
scope
->
FindVar
(
out
));
param_
.
output
=
scope
->
FindVar
(
out
)
->
GetMutable
<
Tensor
>
();
param_
.
x_num_col_dims
=
boost
::
get
<
int
>
(
op_desc
.
GetAttr
(
"x_num_col_dims"
));
param_
.
y_num_col_dims
=
boost
::
get
<
int
>
(
op_desc
.
GetAttr
(
"y_num_col_dims"
));
CHECK
(
kernel_
);
kernel_
->
SetParam
(
param_
);
return
true
;
}
std
::
string
DebugString
()
const
override
{
return
"mul"
;
}
private:
mutable
MulParam
param_
;
};
}
// namespace operators
}
// namespace lite
}
// namespace paddle
paddle/fluid/lite/operators/op_params.h
浏览文件 @
4fdb49e8
...
...
@@ -30,7 +30,7 @@ struct FcParam {
Tensor
*
bias
{};
Tensor
*
output
{};
DDim
in_mat_dims
;
int
in_num_col_dims
{
0
};
int
in_num_col_dims
{
1
};
};
struct
ReluParam
{
...
...
@@ -38,7 +38,27 @@ struct ReluParam {
Tensor
*
output
{};
};
using
param_t
=
variant
<
FcParam
,
ReluParam
>
;
// For Mul Op
struct
MulParam
{
Tensor
*
x
{};
Tensor
*
y
{};
Tensor
*
output
{};
int
x_num_col_dims
{
1
};
int
y_num_col_dims
{
1
};
};
// For Scale Op
struct
ScaleParam
{
Tensor
*
x
{};
Tensor
*
output
{};
float
scale
{
1.
};
float
bias
{};
bool
bias_after_scale
{
true
};
};
using
param_t
=
variant
<
FcParam
,
ReluParam
,
MulParam
,
ScaleParam
>
;
}
// namespace operators
}
// namespace lite
...
...
paddle/fluid/lite/operators/relu_op.h
浏览文件 @
4fdb49e8
...
...
@@ -36,11 +36,6 @@ class ReluOp : public OpLite {
std
::
string
DebugString
()
const
override
{
return
"tanh"
;
}
void
StaticPickKernel
(
const
std
::
vector
<
OpLite
::
Place
>
&
valid_targets
)
override
{
kernel_
=
std
::
move
(
CreateKernels
(
valid_targets
).
front
());
}
private:
mutable
ReluParam
param_
;
};
...
...
paddle/fluid/lite/operators/scale_op.cc
0 → 100644
浏览文件 @
4fdb49e8
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string>
#include <vector>
#include "paddle/fluid/lite/core/kernel.h"
#include "paddle/fluid/lite/core/op_lite.h"
#include "paddle/fluid/lite/core/op_registry.h"
#include "paddle/fluid/lite/core/scope.h"
#include "paddle/fluid/lite/core/tensor.h"
#include "paddle/fluid/lite/operators/op_params.h"
#include "paddle/fluid/lite/utils/all.h"
namespace
paddle
{
namespace
lite
{
namespace
operators
{
class
ScaleOp
:
public
OpLite
{
public:
ScaleOp
()
{}
explicit
ScaleOp
(
const
std
::
string
&
type
)
:
OpLite
(
type
)
{}
bool
CheckShape
()
const
override
{
CHECK_OR_FALSE
(
param_
.
x
);
CHECK_OR_FALSE
(
param_
.
output
);
return
true
;
}
bool
InferShape
()
const
override
{
param_
.
output
->
Resize
(
param_
.
x
->
dims
());
return
true
;
}
// TODO(Superjomn) replace framework::OpDesc with a lite one.
bool
Attach
(
const
framework
::
OpDesc
&
op_desc
,
lite
::
Scope
*
scope
)
override
{
auto
x
=
op_desc
.
Input
(
"X"
).
front
();
auto
out
=
op_desc
.
Output
(
"Out"
).
front
();
param_
.
x
=
scope
->
FindVar
(
x
)
->
GetMutable
<
Tensor
>
();
CHECK
(
scope
->
FindVar
(
out
));
param_
.
output
=
scope
->
FindVar
(
out
)
->
GetMutable
<
Tensor
>
();
param_
.
scale
=
boost
::
get
<
float
>
(
op_desc
.
GetAttr
(
"scale"
));
param_
.
bias
=
boost
::
get
<
float
>
(
op_desc
.
GetAttr
(
"bias"
));
param_
.
bias_after_scale
=
boost
::
get
<
bool
>
(
op_desc
.
GetAttr
(
"bias_after_scale"
));
CHECK
(
kernel_
);
kernel_
->
SetParam
(
param_
);
return
true
;
}
std
::
string
DebugString
()
const
override
{
return
op_type_
;
}
private:
mutable
ScaleParam
param_
;
};
}
// namespace operators
}
// namespace lite
}
// namespace paddle
REGISTER_LITE_OP
(
scale
,
paddle
::
lite
::
operators
::
ScaleOp
);
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录