Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
4f5f0be7
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4f5f0be7
编写于
5月 14, 2018
作者:
L
Luo Tao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
use the latest buffer to update the convert
上级
a3ba264c
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
9 addition
and
9 deletion
+9
-9
paddle/fluid/inference/engine.h
paddle/fluid/inference/engine.h
+2
-2
paddle/fluid/inference/tensorrt/CMakeLists.txt
paddle/fluid/inference/tensorrt/CMakeLists.txt
+0
-1
paddle/fluid/inference/tensorrt/convert/CMakeLists.txt
paddle/fluid/inference/tensorrt/convert/CMakeLists.txt
+1
-1
paddle/fluid/inference/tensorrt/convert/io_converter.cc
paddle/fluid/inference/tensorrt/convert/io_converter.cc
+2
-1
paddle/fluid/inference/tensorrt/convert/test_activation_op.cc
...le/fluid/inference/tensorrt/convert/test_activation_op.cc
+4
-4
未找到文件。
paddle/fluid/inference/engine.h
浏览文件 @
4f5f0be7
...
...
@@ -59,8 +59,8 @@ class EngineBase {
struct
Buffer
{
void
*
buffer
{
nullptr
};
// buffer should be allocated only once.
int
max_size
;
// buffer allocated space.
int
size
;
// data size.
size_t
max_size
;
// buffer allocated space.
size_t
size
;
// data size.
DeviceType
device
{
DeviceType
::
UNK
};
// tells which device this buffer is on.
};
...
...
paddle/fluid/inference/tensorrt/CMakeLists.txt
浏览文件 @
4f5f0be7
nv_library
(
tensorrt_engine SRCS engine.cc DEPS framework_proto
)
nv_test
(
test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader
)
nv_test
(
test_tensorrt_engine SRCS test_engine.cc DEPS dynload_cuda tensorrt_engine
)
add_subdirectory
(
convert
)
paddle/fluid/inference/tensorrt/convert/CMakeLists.txt
浏览文件 @
4f5f0be7
nv_test
(
test_op_converter SRCS test_op_converter.cc mul_op.cc conv2d_op.cc DEPS
${
FLUID_CORE_MODULES
}
)
nv_test
(
test_trt_activation_op SRCS test_activation_op.cc activation_op.cc
nv_test
(
test_trt_activation_op SRCS test_activation_op.cc activation_op.cc
io_converter.cc
DEPS
${
FLUID_CORE_MODULES
}
activation_op tensorrt_engine
)
nv_test
(
test_io_converter SRCS test_io_converter.cc io_converter.cc DEPS dynload_cuda dynamic_loader lod_tensor
)
paddle/fluid/inference/tensorrt/convert/io_converter.cc
浏览文件 @
4f5f0be7
...
...
@@ -58,7 +58,7 @@ class DefaultIOConverter : public EngineIOConverter {
cudaMemcpyDeviceToHost
,
*
stream_
));
}
else
if
(
is_gpu_place
(
place
))
{
PADDLE_ENFORCE_EQ
(
0
,
cudaMemcpyAsync
(
out
->
data
<
float
>
(),
in
,
size
,
cudaMemcpy
HostToHost
,
*
stream_
));
cudaMemcpy
DeviceToDevice
,
*
stream_
));
}
else
{
PADDLE_THROW
(
"Unknown device for converter"
);
}
...
...
@@ -66,6 +66,7 @@ class DefaultIOConverter : public EngineIOConverter {
}
};
// fluid LodTensor <-> tensorrt ITensor
REGISTER_TENSORRT_IO_CONVERTER
(
default
,
DefaultIOConverter
);
}
// namespace tensorrt
...
...
paddle/fluid/inference/tensorrt/convert/test_activation_op.cc
浏览文件 @
4f5f0be7
...
...
@@ -74,13 +74,13 @@ void Compare(const std::string op_type, float input, float expect) {
// convert LoDTensor to ITensor
size_t
size
=
x_tensor
->
memory_size
();
EngineIOConverter
::
ConvertInput
(
op_type
,
*
x_tensor
,
engine
->
buffer
(
"X"
),
size
,
&
stream
);
EngineIOConverter
::
ConvertInput
(
op_type
,
*
x_tensor
,
engine
->
buffer
(
"X"
).
buffer
,
size
,
&
stream
);
// run tensorrt Outp
engine
->
Execute
(
1
);
// convert ITensor to LoDTensor
EngineIOConverter
::
ConvertOutput
(
op_type
,
engine
->
buffer
(
"Out"
)
,
out_tenso
r
,
size
,
&
stream
);
EngineIOConverter
::
ConvertOutput
(
op_type
,
engine
->
buffer
(
"Out"
)
.
buffe
r
,
out_tensor
,
size
,
&
stream
);
// get tensorrt output
std
::
vector
<
float
>
out2
;
framework
::
TensorToVector
(
*
out_tensor
,
ctx
,
&
out2
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录