Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
53b401d5
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
53b401d5
编写于
5月 07, 2018
作者:
L
Luo Tao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refine io_convert and op_convert
上级
2a2c83b9
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
37 addition
and
39 deletion
+37
-39
paddle/fluid/inference/tensorrt/CMakeLists.txt
paddle/fluid/inference/tensorrt/CMakeLists.txt
+0
-1
paddle/fluid/inference/tensorrt/convert/CMakeLists.txt
paddle/fluid/inference/tensorrt/convert/CMakeLists.txt
+3
-2
paddle/fluid/inference/tensorrt/convert/io_converter.cc
paddle/fluid/inference/tensorrt/convert/io_converter.cc
+2
-2
paddle/fluid/inference/tensorrt/convert/io_converter.h
paddle/fluid/inference/tensorrt/convert/io_converter.h
+2
-1
paddle/fluid/inference/tensorrt/convert/op_converter.h
paddle/fluid/inference/tensorrt/convert/op_converter.h
+14
-24
paddle/fluid/inference/tensorrt/convert/test_activation_op.cc
...le/fluid/inference/tensorrt/convert/test_activation_op.cc
+3
-3
paddle/fluid/inference/tensorrt/convert/test_io_converter.cc
paddle/fluid/inference/tensorrt/convert/test_io_converter.cc
+3
-3
paddle/fluid/inference/tensorrt/convert/test_op_converter.cc
paddle/fluid/inference/tensorrt/convert/test_op_converter.cc
+1
-1
paddle/fluid/inference/utils/singleton.h
paddle/fluid/inference/utils/singleton.h
+9
-2
未找到文件。
paddle/fluid/inference/tensorrt/CMakeLists.txt
浏览文件 @
53b401d5
nv_test
(
test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader
)
nv_test
(
test_tensorrt_engine SRCS test_engine.cc engine.cc DEPS dynload_cuda
)
nv_test
(
test_io_converter SRCS test_io_converter.cc io_converter.cc DEPS dynload_cuda dynamic_loader lod_tensor
)
set
(
ENGINE_FILE
${
CMAKE_CURRENT_SOURCE_DIR
}
/engine.cc
)
add_subdirectory
(
convert
)
paddle/fluid/inference/tensorrt/convert/CMakeLists.txt
浏览文件 @
53b401d5
nv_test
(
test_
tensorrt_
op_converter SRCS test_op_converter.cc mul_op.cc conv2d_op.cc DEPS
${
FLUID_CORE_MODULES
}
)
nv_test
(
test_t
ensor
rt_activation_op SRCS test_activation_op.cc
${
ENGINE_FILE
}
activation_op.cc
nv_test
(
test_op_converter SRCS test_op_converter.cc mul_op.cc conv2d_op.cc DEPS
${
FLUID_CORE_MODULES
}
)
nv_test
(
test_trt_activation_op SRCS test_activation_op.cc
${
ENGINE_FILE
}
activation_op.cc
DEPS
${
FLUID_CORE_MODULES
}
activation_op
)
nv_test
(
test_io_converter SRCS test_io_converter.cc io_converter.cc DEPS dynload_cuda dynamic_loader lod_tensor
)
paddle/fluid/inference/tensorrt/io_converter.cc
→
paddle/fluid/inference/tensorrt/
convert/
io_converter.cc
浏览文件 @
53b401d5
...
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/io_converter.h"
#include "paddle/fluid/inference/tensorrt/
convert/
io_converter.h"
#include <cuda.h>
#include "paddle/fluid/platform/enforce.h"
...
...
@@ -50,7 +50,7 @@ class DefaultInputConverter : public EngineInputConverter {
}
};
REGISTER_TENSORRT_INPUT_CONVERTER
(
mul
,
DefaultInputConverter
);
REGISTER_TENSORRT_INPUT_CONVERTER
(
default
,
DefaultInputConverter
);
}
// namespace tensorrt
}
// namespace inference
...
...
paddle/fluid/inference/tensorrt/io_converter.h
→
paddle/fluid/inference/tensorrt/
convert/
io_converter.h
浏览文件 @
53b401d5
...
...
@@ -40,7 +40,8 @@ class EngineInputConverter {
static
void
Run
(
const
std
::
string
&
in_op_type
,
const
LoDTensor
&
in
,
void
*
out
,
size_t
max_size
,
cudaStream_t
*
stream
)
{
PADDLE_ENFORCE
(
stream
!=
nullptr
);
auto
*
converter
=
Registry
<
EngineInputConverter
>::
Lookup
(
in_op_type
);
auto
*
converter
=
Registry
<
EngineInputConverter
>::
Lookup
(
in_op_type
,
"default"
/* default_type */
);
PADDLE_ENFORCE_NOT_NULL
(
converter
);
converter
->
SetStream
(
stream
);
(
*
converter
)(
in
,
out
,
max_size
);
...
...
paddle/fluid/inference/tensorrt/convert/op_converter.h
浏览文件 @
53b401d5
...
...
@@ -19,6 +19,7 @@ limitations under the License. */
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/inference/tensorrt/engine.h"
#include "paddle/fluid/inference/utils/singleton.h"
namespace
paddle
{
namespace
inference
{
...
...
@@ -32,34 +33,23 @@ class OpConverter {
OpConverter
()
{}
virtual
void
operator
()(
const
framework
::
OpDesc
&
op
)
{}
void
Execute
(
const
framework
::
OpDesc
&
op
,
TensorRTEngine
*
engine
)
{
void
Run
(
const
framework
::
OpDesc
&
op
,
TensorRTEngine
*
engine
)
{
std
::
string
type
=
op
.
Type
();
auto
it
=
converters_
.
find
(
type
);
PADDLE_ENFORCE
(
it
!=
converters_
.
end
(),
"no OpConverter for optype [%s]"
,
type
);
it
->
second
->
SetEngine
(
engine
);
(
*
it
->
second
)(
op
);
}
static
OpConverter
&
Global
()
{
static
auto
*
x
=
new
OpConverter
;
return
*
x
;
}
template
<
typename
T
>
void
Register
(
const
std
::
string
&
key
)
{
converters_
[
key
]
=
new
T
;
auto
*
it
=
Registry
<
OpConverter
>::
Lookup
(
type
);
PADDLE_ENFORCE_NOT_NULL
(
it
,
"no OpConverter for optype [%s]"
,
type
);
it
->
SetEngine
(
engine
);
(
*
it
)(
op
);
}
// convert fluid op to tensorrt layer
void
ConvertOp
(
const
framework
::
OpDesc
&
op
,
TensorRTEngine
*
engine
)
{
OpConverter
::
Global
().
Execute
(
op
,
engine
);
OpConverter
::
Run
(
op
,
engine
);
}
// convert fluid block to tensorrt network
void
ConvertBlock
(
const
framework
::
BlockDesc
&
block
,
TensorRTEngine
*
engine
)
{
for
(
auto
op
:
block
.
AllOps
())
{
OpConverter
::
Global
().
Execute
(
*
op
,
engine
);
OpConverter
::
Run
(
*
op
,
engine
);
}
}
...
...
@@ -78,12 +68,12 @@ class OpConverter {
framework
::
Scope
*
scope_
{
nullptr
};
};
#define REGISTER_TRT_OP_CONVERTER(op_type__, Converter__) \
struct trt_##op_type__##_converter { \
trt_##op_type__##_converter() { \
OpConverter::Global().
Register<Converter__>(#op_type__); \
} \
}; \
#define REGISTER_TRT_OP_CONVERTER(op_type__, Converter__)
\
struct trt_##op_type__##_converter {
\
trt_##op_type__##_converter() {
\
Registry<OpConverter>::
Register<Converter__>(#op_type__); \
}
\
};
\
trt_##op_type__##_converter trt_##op_type__##_converter__;
}
// namespace tensorrt
...
...
paddle/fluid/inference/tensorrt/convert/test_activation_op.cc
浏览文件 @
53b401d5
...
...
@@ -26,7 +26,7 @@ namespace paddle {
namespace
inference
{
namespace
tensorrt
{
void
c
ompare
(
float
input
,
float
expect
)
{
void
C
ompare
(
float
input
,
float
expect
)
{
framework
::
Scope
scope
;
platform
::
CUDAPlace
place
;
platform
::
CUDADeviceContext
ctx
(
place
);
...
...
@@ -85,8 +85,8 @@ void compare(float input, float expect) {
}
TEST
(
OpConverter
,
ConvertRelu
)
{
c
ompare
(
1
,
1
);
// relu(1) = 1
c
ompare
(
-
5
,
0
);
// relu(-5) = 0
C
ompare
(
1
,
1
);
// relu(1) = 1
C
ompare
(
-
5
,
0
);
// relu(-5) = 0
}
}
// namespace tensorrt
...
...
paddle/fluid/inference/tensorrt/test_io_converter.cc
→
paddle/fluid/inference/tensorrt/
convert/
test_io_converter.cc
浏览文件 @
53b401d5
...
...
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/inference/tensorrt/io_converter.h"
#include "paddle/fluid/inference/tensorrt/
convert/
io_converter.h"
#include <gtest/gtest.h>
...
...
@@ -34,7 +34,7 @@ TEST_F(EngineInputConverterTester, DefaultCPU) {
ASSERT_EQ
(
cudaMalloc
(
&
buffer
,
tensor
.
memory_size
()),
0
);
cudaStream_t
stream
;
EngineInputConverter
::
Run
(
"
mul
"
,
tensor
,
buffer
,
tensor
.
memory_size
(),
EngineInputConverter
::
Run
(
"
test
"
,
tensor
,
buffer
,
tensor
.
memory_size
(),
&
stream
);
}
...
...
@@ -44,7 +44,7 @@ TEST_F(EngineInputConverterTester, DefaultGPU) {
ASSERT_EQ
(
cudaMalloc
(
&
buffer
,
tensor
.
memory_size
()),
0
);
cudaStream_t
stream
;
EngineInputConverter
::
Run
(
"
mul
"
,
tensor
,
buffer
,
tensor
.
memory_size
(),
EngineInputConverter
::
Run
(
"
test
"
,
tensor
,
buffer
,
tensor
.
memory_size
(),
&
stream
);
}
...
...
paddle/fluid/inference/tensorrt/convert/test_op_converter.cc
浏览文件 @
53b401d5
...
...
@@ -20,7 +20,7 @@ namespace paddle {
namespace
inference
{
namespace
tensorrt
{
TEST
(
Block
Converter
,
ConvertBlock
)
{
TEST
(
Op
Converter
,
ConvertBlock
)
{
framework
::
ProgramDesc
prog
;
auto
*
block
=
prog
.
MutableBlock
(
0
);
auto
*
mul_op
=
block
->
AppendOp
();
...
...
paddle/fluid/inference/utils/singleton.h
浏览文件 @
53b401d5
...
...
@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once
#include <string>
#include <unordered_map>
#include "paddle/fluid/platform/enforce.h"
...
...
@@ -49,9 +50,15 @@ struct Registry {
items_
[
name
]
=
new
ItemChild
;
}
static
ItemParent
*
Lookup
(
const
std
::
string
&
name
)
{
static
ItemParent
*
Lookup
(
const
std
::
string
&
name
,
const
std
::
string
&
default_name
=
""
)
{
auto
it
=
items_
.
find
(
name
);
if
(
it
==
items_
.
end
())
return
nullptr
;
if
(
it
==
items_
.
end
())
{
if
(
default_name
==
""
)
return
nullptr
;
else
return
items_
.
find
(
default_name
)
->
second
;
}
return
it
->
second
;
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录