Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
55f0d840
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
55f0d840
编写于
5月 02, 2018
作者:
A
Abhinav Arora
提交者:
GitHub
5月 02, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix Cpplint Issues in fluid/inference/tensorrt/ (#10318)
* Fix CPPLint issues in fluid/inference/tensorrt/ * Fix compile errors
上级
0bc44c18
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
19 addition
and
19 deletion
+19
-19
paddle/fluid/inference/tensorrt/engine.h
paddle/fluid/inference/tensorrt/engine.h
+1
-1
paddle/fluid/inference/tensorrt/helper.h
paddle/fluid/inference/tensorrt/helper.h
+5
-5
paddle/fluid/inference/tensorrt/test_tensorrt.cc
paddle/fluid/inference/tensorrt/test_tensorrt.cc
+13
-13
未找到文件。
paddle/fluid/inference/tensorrt/engine.h
浏览文件 @
55f0d840
...
...
@@ -65,7 +65,7 @@ class TensorRTEngine : public EngineBase {
// Initialize the inference network, so that TensorRT layers can add to this
// network.
void
InitNetwork
()
{
infer_builder_
.
reset
(
createInferBuilder
(
logger_
));
infer_builder_
.
reset
(
createInferBuilder
(
&
logger_
));
infer_network_
.
reset
(
infer_builder_
->
createNetwork
());
}
// After finishing adding ops, freeze this network and creates the executation
...
...
paddle/fluid/inference/tensorrt/helper.h
浏览文件 @
55f0d840
...
...
@@ -46,13 +46,13 @@ const int kDataTypeSize[] = {
// The following two API are implemented in TensorRT's header file, cannot load
// from the dynamic library. So create our own implementation and directly
// trigger the method from the dynamic library.
static
nvinfer1
::
IBuilder
*
createInferBuilder
(
nvinfer1
::
ILogger
&
logger
)
{
static
nvinfer1
::
IBuilder
*
createInferBuilder
(
nvinfer1
::
ILogger
*
logger
)
{
return
static_cast
<
nvinfer1
::
IBuilder
*>
(
dy
::
createInferBuilder_INTERNAL
(
&
logger
,
NV_TENSORRT_VERSION
));
dy
::
createInferBuilder_INTERNAL
(
logger
,
NV_TENSORRT_VERSION
));
}
static
nvinfer1
::
IRuntime
*
createInferRuntime
(
nvinfer1
::
ILogger
&
logger
)
{
static
nvinfer1
::
IRuntime
*
createInferRuntime
(
nvinfer1
::
ILogger
*
logger
)
{
return
static_cast
<
nvinfer1
::
IRuntime
*>
(
dy
::
createInferRuntime_INTERNAL
(
&
logger
,
NV_TENSORRT_VERSION
));
dy
::
createInferRuntime_INTERNAL
(
logger
,
NV_TENSORRT_VERSION
));
}
// A logger for create TensorRT infer builder.
...
...
@@ -80,7 +80,7 @@ class NaiveLogger : public nvinfer1::ILogger {
return
*
x
;
}
virtual
~
NaiveLogger
()
override
{}
~
NaiveLogger
()
override
{}
};
}
// namespace tensorrt
...
...
paddle/fluid/inference/tensorrt/test_tensorrt.cc
浏览文件 @
55f0d840
...
...
@@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "NvInfer.h"
#include "cuda.h"
#include "cuda_runtime_api.h"
#include "paddle/fluid/platform/dynload/tensorrt.h"
namespace
dy
=
paddle
::
platform
::
dynload
;
...
...
@@ -43,7 +43,7 @@ class Logger : public nvinfer1::ILogger {
class
ScopedWeights
{
public:
ScopedWeights
(
float
value
)
:
value_
(
value
)
{
explicit
ScopedWeights
(
float
value
)
:
value_
(
value
)
{
w
.
type
=
nvinfer1
::
DataType
::
kFLOAT
;
w
.
values
=
&
value_
;
w
.
count
=
1
;
...
...
@@ -58,13 +58,13 @@ class ScopedWeights {
// The following two API are implemented in TensorRT's header file, cannot load
// from the dynamic library. So create our own implementation and directly
// trigger the method from the dynamic library.
nvinfer1
::
IBuilder
*
createInferBuilder
(
nvinfer1
::
ILogger
&
logger
)
{
nvinfer1
::
IBuilder
*
createInferBuilder
(
nvinfer1
::
ILogger
*
logger
)
{
return
static_cast
<
nvinfer1
::
IBuilder
*>
(
dy
::
createInferBuilder_INTERNAL
(
&
logger
,
NV_TENSORRT_VERSION
));
dy
::
createInferBuilder_INTERNAL
(
logger
,
NV_TENSORRT_VERSION
));
}
nvinfer1
::
IRuntime
*
createInferRuntime
(
nvinfer1
::
ILogger
&
logger
)
{
nvinfer1
::
IRuntime
*
createInferRuntime
(
nvinfer1
::
ILogger
*
logger
)
{
return
static_cast
<
nvinfer1
::
IRuntime
*>
(
dy
::
createInferRuntime_INTERNAL
(
&
logger
,
NV_TENSORRT_VERSION
));
dy
::
createInferRuntime_INTERNAL
(
logger
,
NV_TENSORRT_VERSION
));
}
const
char
*
kInputTensor
=
"input"
;
...
...
@@ -74,7 +74,7 @@ const char* kOutputTensor = "output";
nvinfer1
::
IHostMemory
*
CreateNetwork
()
{
Logger
logger
;
// Create the engine.
nvinfer1
::
IBuilder
*
builder
=
createInferBuilder
(
logger
);
nvinfer1
::
IBuilder
*
builder
=
createInferBuilder
(
&
logger
);
ScopedWeights
weights
(
2.
);
ScopedWeights
bias
(
3.
);
...
...
@@ -103,9 +103,9 @@ nvinfer1::IHostMemory* CreateNetwork() {
return
model
;
}
void
Execute
(
nvinfer1
::
IExecutionContext
&
context
,
const
float
*
input
,
void
Execute
(
nvinfer1
::
IExecutionContext
*
context
,
const
float
*
input
,
float
*
output
)
{
const
nvinfer1
::
ICudaEngine
&
engine
=
context
.
getEngine
();
const
nvinfer1
::
ICudaEngine
&
engine
=
context
->
getEngine
();
// Two binds, input and output
ASSERT_EQ
(
engine
.
getNbBindings
(),
2
);
const
int
input_index
=
engine
.
getBindingIndex
(
kInputTensor
);
...
...
@@ -119,7 +119,7 @@ void Execute(nvinfer1::IExecutionContext& context, const float* input,
// Copy the input to the GPU, execute the network, and copy the output back.
ASSERT_EQ
(
0
,
cudaMemcpyAsync
(
buffers
[
input_index
],
input
,
sizeof
(
float
),
cudaMemcpyHostToDevice
,
stream
));
context
.
enqueue
(
1
,
buffers
,
stream
,
nullptr
);
context
->
enqueue
(
1
,
buffers
,
stream
,
nullptr
);
ASSERT_EQ
(
0
,
cudaMemcpyAsync
(
output
,
buffers
[
output_index
],
sizeof
(
float
),
cudaMemcpyDeviceToHost
,
stream
));
cudaStreamSynchronize
(
stream
);
...
...
@@ -136,7 +136,7 @@ TEST(TensorrtTest, BasicFunction) {
// Use the model to create an engine and an execution context.
Logger
logger
;
nvinfer1
::
IRuntime
*
runtime
=
createInferRuntime
(
logger
);
nvinfer1
::
IRuntime
*
runtime
=
createInferRuntime
(
&
logger
);
nvinfer1
::
ICudaEngine
*
engine
=
runtime
->
deserializeCudaEngine
(
model
->
data
(),
model
->
size
(),
nullptr
);
model
->
destroy
();
...
...
@@ -145,7 +145,7 @@ TEST(TensorrtTest, BasicFunction) {
// Execute the network.
float
input
=
1234
;
float
output
;
Execute
(
*
context
,
&
input
,
&
output
);
Execute
(
context
,
&
input
,
&
output
);
EXPECT_EQ
(
output
,
input
*
2
+
3
);
// Destroy the engine.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录