Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
8ce2482b
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
8ce2482b
编写于
1月 11, 2021
作者:
石
石晓伟
提交者:
GitHub
1月 11, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix header file paths of gflags, commit 1, test=develop (#30271)
上级
c7371b7b
变更
15
隐藏空白更改
内联
并排
Showing
15 changed file
with
36 addition
and
33 deletion
+36
-33
paddle/fluid/distributed/service/communicator.cc
paddle/fluid/distributed/service/communicator.cc
+12
-12
paddle/fluid/distributed/service/env.h
paddle/fluid/distributed/service/env.h
+9
-8
paddle/fluid/distributed/table/depends/dense.h
paddle/fluid/distributed/table/depends/dense.h
+1
-1
paddle/fluid/distributed/table/depends/initializers.h
paddle/fluid/distributed/table/depends/initializers.h
+1
-1
paddle/fluid/distributed/table/depends/large_scale_kv.h
paddle/fluid/distributed/table/depends/large_scale_kv.h
+1
-1
paddle/fluid/distributed/table/depends/sparse.h
paddle/fluid/distributed/table/depends/sparse.h
+1
-1
paddle/fluid/framework/operator.cc
paddle/fluid/framework/operator.cc
+1
-1
paddle/fluid/framework/unused_var_check.cc
paddle/fluid/framework/unused_var_check.cc
+1
-1
paddle/fluid/framework/unused_var_check.h
paddle/fluid/framework/unused_var_check.h
+1
-1
paddle/fluid/imperative/profiler.cc
paddle/fluid/imperative/profiler.cc
+1
-1
paddle/fluid/inference/analysis/analyzer.h
paddle/fluid/inference/analysis/analyzer.h
+1
-1
paddle/fluid/inference/analysis/flags.h
paddle/fluid/inference/analysis/flags.h
+3
-1
paddle/fluid/inference/analysis/ut_helper.h
paddle/fluid/inference/analysis/ut_helper.h
+1
-1
paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc
paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc
+1
-1
paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc
paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc
+1
-1
未找到文件。
paddle/fluid/distributed/service/communicator.cc
浏览文件 @
8ce2482b
...
@@ -13,10 +13,8 @@ See the License for the specific language governing permissions and
...
@@ -13,10 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#include "paddle/fluid/distributed/service/communicator.h"
#include "paddle/fluid/distributed/service/communicator.h"
#include <google/protobuf/text_format.h>
#include "paddle/fluid/distributed/table/table.h"
#include <g
flags/gflags
.h>
#include <g
oogle/protobuf/text_format
.h>
#include <paddle/fluid/framework/program_desc.h>
#include <paddle/fluid/framework/program_desc.h>
#include <algorithm>
#include <algorithm>
...
@@ -25,6 +23,8 @@ limitations under the License. */
...
@@ -25,6 +23,8 @@ limitations under the License. */
#include <thread> // NOLINT
#include <thread> // NOLINT
#include <unordered_set>
#include <unordered_set>
#include "gflags/gflags.h"
#include "paddle/fluid/distributed/table/table.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/tensor_util.h"
...
@@ -64,7 +64,7 @@ void Communicator::init_gflag(const std::string &gflags) {
...
@@ -64,7 +64,7 @@ void Communicator::init_gflag(const std::string &gflags) {
flags
.
insert
(
it
,
"exe default"
);
flags
.
insert
(
it
,
"exe default"
);
char
*
flags_ptr
[
flags
.
size
()];
char
*
flags_ptr
[
flags
.
size
()];
for
(
size_t
i
=
0
;
i
<
flags
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
flags
.
size
();
++
i
)
{
flags_ptr
[
i
]
=
(
char
*
)(
flags
[
i
].
c_str
());
flags_ptr
[
i
]
=
(
char
*
)(
flags
[
i
].
c_str
());
// NOLINT
}
}
int
params_cnt
=
flags
.
size
();
int
params_cnt
=
flags
.
size
();
char
**
params_ptr
=
&
(
flags_ptr
[
0
]);
char
**
params_ptr
=
&
(
flags_ptr
[
0
]);
...
@@ -225,7 +225,7 @@ void Communicator::RpcSendDense(const CommContext &ctx, const Scope &scope) {
...
@@ -225,7 +225,7 @@ void Communicator::RpcSendDense(const CommContext &ctx, const Scope &scope) {
DownpourBrpcClosure
*
closure
=
new
DownpourBrpcClosure
(
DownpourBrpcClosure
*
closure
=
new
DownpourBrpcClosure
(
request_call_num
,
[
this
,
request_call_num
](
void
*
done
)
{
request_call_num
,
[
this
,
request_call_num
](
void
*
done
)
{
int
ret
=
0
;
int
ret
=
0
;
auto
*
closure
=
(
DownpourBrpcClosure
*
)
done
;
auto
*
closure
=
(
DownpourBrpcClosure
*
)
done
;
// NOLINT
for
(
size_t
i
=
0
;
i
<
request_call_num
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
request_call_num
;
++
i
)
{
if
(
closure
->
check_response
(
i
,
PS_PUSH_DENSE_TABLE
)
!=
0
)
{
if
(
closure
->
check_response
(
i
,
PS_PUSH_DENSE_TABLE
)
!=
0
)
{
ret
=
-
1
;
ret
=
-
1
;
...
@@ -262,7 +262,7 @@ void Communicator::RpcSendSparseParam(const std::string &varname, int table_id,
...
@@ -262,7 +262,7 @@ void Communicator::RpcSendSparseParam(const std::string &varname, int table_id,
DownpourBrpcClosure
*
closure
=
new
DownpourBrpcClosure
(
DownpourBrpcClosure
*
closure
=
new
DownpourBrpcClosure
(
request_call_num
,
[
this
,
request_call_num
](
void
*
done
)
{
request_call_num
,
[
this
,
request_call_num
](
void
*
done
)
{
int
ret
=
0
;
int
ret
=
0
;
auto
*
closure
=
(
DownpourBrpcClosure
*
)
done
;
auto
*
closure
=
(
DownpourBrpcClosure
*
)
done
;
// NOLINT
for
(
size_t
i
=
0
;
i
<
request_call_num
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
request_call_num
;
++
i
)
{
if
(
closure
->
check_response
(
i
,
PS_PUSH_SPARSE_PARAM
)
!=
0
)
{
if
(
closure
->
check_response
(
i
,
PS_PUSH_SPARSE_PARAM
)
!=
0
)
{
ret
=
-
1
;
ret
=
-
1
;
...
@@ -300,7 +300,7 @@ void Communicator::RpcSendSparse(const std::string &var_name, int table_id,
...
@@ -300,7 +300,7 @@ void Communicator::RpcSendSparse(const std::string &var_name, int table_id,
DownpourBrpcClosure
*
closure
=
new
DownpourBrpcClosure
(
DownpourBrpcClosure
*
closure
=
new
DownpourBrpcClosure
(
request_call_num
,
[
this
,
request_call_num
](
void
*
done
)
{
request_call_num
,
[
this
,
request_call_num
](
void
*
done
)
{
int
ret
=
0
;
int
ret
=
0
;
auto
*
closure
=
(
DownpourBrpcClosure
*
)
done
;
auto
*
closure
=
(
DownpourBrpcClosure
*
)
done
;
// NOLINT
for
(
size_t
i
=
0
;
i
<
request_call_num
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
request_call_num
;
++
i
)
{
if
(
closure
->
check_response
(
i
,
PS_PUSH_SPARSE_TABLE
)
!=
0
)
{
if
(
closure
->
check_response
(
i
,
PS_PUSH_SPARSE_TABLE
)
!=
0
)
{
ret
=
-
1
;
ret
=
-
1
;
...
@@ -333,9 +333,9 @@ void Communicator::RpcRecvSparse(const std::string &varname, int table_id,
...
@@ -333,9 +333,9 @@ void Communicator::RpcRecvSparse(const std::string &varname, int table_id,
push_g_vec
.
push_back
(
tensor
->
data
<
float
>
()
+
i
*
dim
);
push_g_vec
.
push_back
(
tensor
->
data
<
float
>
()
+
i
*
dim
);
}
}
auto
status
=
_worker_ptr
->
pull_sparse
(
(
float
**
)
push_g_vec
.
data
(),
table_id
,
auto
status
=
_worker_ptr
->
pull_sparse
(
sparse_push_keys
.
data
(),
(
float
**
)
push_g_vec
.
data
(),
table_id
,
// NOLINT
sparse_push_keys
.
size
());
sparse_push_keys
.
data
(),
sparse_push_keys
.
size
());
status
.
wait
();
status
.
wait
();
return
;
return
;
}
}
...
@@ -397,7 +397,7 @@ void Communicator::SendGlobalStep(const CommContext &ctx, int batches,
...
@@ -397,7 +397,7 @@ void Communicator::SendGlobalStep(const CommContext &ctx, int batches,
DownpourBrpcClosure
*
closure
=
new
DownpourBrpcClosure
(
DownpourBrpcClosure
*
closure
=
new
DownpourBrpcClosure
(
request_call_num
,
[
this
,
request_call_num
](
void
*
done
)
{
request_call_num
,
[
this
,
request_call_num
](
void
*
done
)
{
int
ret
=
0
;
int
ret
=
0
;
auto
*
closure
=
(
DownpourBrpcClosure
*
)
done
;
auto
*
closure
=
(
DownpourBrpcClosure
*
)
done
;
// NOLINT
for
(
size_t
i
=
0
;
i
<
request_call_num
;
++
i
)
{
for
(
size_t
i
=
0
;
i
<
request_call_num
;
++
i
)
{
if
(
closure
->
check_response
(
i
,
PS_PUSH_GLOBAL_STEP
)
!=
0
)
{
if
(
closure
->
check_response
(
i
,
PS_PUSH_GLOBAL_STEP
)
!=
0
)
{
ret
=
-
1
;
ret
=
-
1
;
...
@@ -1106,7 +1106,7 @@ void GeoCommunicator::SendSparse(const std::string &varname,
...
@@ -1106,7 +1106,7 @@ void GeoCommunicator::SendSparse(const std::string &varname,
++
_async_call_num
;
++
_async_call_num
;
DownpourBrpcClosure
*
closure
=
new
DownpourBrpcClosure
(
1
,
[
this
](
void
*
done
)
{
DownpourBrpcClosure
*
closure
=
new
DownpourBrpcClosure
(
1
,
[
this
](
void
*
done
)
{
int
ret
=
0
;
int
ret
=
0
;
auto
*
closure
=
(
DownpourBrpcClosure
*
)
done
;
auto
*
closure
=
(
DownpourBrpcClosure
*
)
done
;
// NOLINT
if
(
closure
->
check_response
(
0
,
PS_PUSH_SPARSE_TABLE
)
!=
0
)
{
if
(
closure
->
check_response
(
0
,
PS_PUSH_SPARSE_TABLE
)
!=
0
)
{
ret
=
-
1
;
ret
=
-
1
;
}
}
...
...
paddle/fluid/distributed/service/env.h
浏览文件 @
8ce2482b
...
@@ -15,7 +15,6 @@
...
@@ -15,7 +15,6 @@
#pragma once
#pragma once
#include <arpa/inet.h>
#include <arpa/inet.h>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <glog/logging.h>
#include <netinet/in.h>
#include <netinet/in.h>
#include <stdio.h>
#include <stdio.h>
...
@@ -24,6 +23,7 @@
...
@@ -24,6 +23,7 @@
#include <string>
#include <string>
#include <unordered_set>
#include <unordered_set>
#include <vector>
#include <vector>
#include "gflags/gflags.h"
namespace
paddle
{
namespace
paddle
{
namespace
distributed
{
namespace
distributed
{
...
@@ -55,7 +55,7 @@ struct PSHost {
...
@@ -55,7 +55,7 @@ struct PSHost {
rank
=
host_label
&
rank_label_mask
;
rank
=
host_label
&
rank_label_mask
;
port
=
(
host_label
>>
12
)
&
port_label_mask
;
port
=
(
host_label
>>
12
)
&
port_label_mask
;
uint32_t
ip_addr
=
(
host_label
>>
32
);
uint32_t
ip_addr
=
(
host_label
>>
32
);
ip
=
inet_ntoa
(
*
(
in_addr
*
)
&
ip_addr
);
ip
=
inet_ntoa
(
*
(
in_addr
*
)
&
ip_addr
);
// NOLINT
}
}
std
::
string
to_string
()
{
std
::
string
to_string
()
{
...
@@ -108,7 +108,7 @@ struct PSHost {
...
@@ -108,7 +108,7 @@ struct PSHost {
class
PSEnvironment
{
class
PSEnvironment
{
public:
public:
explicit
PSEnvironment
()
{}
explicit
PSEnvironment
()
{}
// NOLINT
virtual
~
PSEnvironment
()
{}
virtual
~
PSEnvironment
()
{}
virtual
int32_t
set_ps_servers
(
uint64_t
*
host_sign_list
,
int
node_num
)
{
virtual
int32_t
set_ps_servers
(
uint64_t
*
host_sign_list
,
int
node_num
)
{
...
@@ -162,10 +162,11 @@ class PSEnvironment {
...
@@ -162,10 +162,11 @@ class PSEnvironment {
}
}
protected:
protected:
//注册一个host
//注册一个host // NOLINT
virtual
int32_t
registe_ps_host
(
const
std
::
string
&
ip
,
uint32_t
port
,
virtual
int32_t
registe_ps_host
(
int32_t
rank
,
std
::
vector
<
PSHost
>
&
host_list
,
const
std
::
string
&
ip
,
uint32_t
port
,
int32_t
rank
,
std
::
unordered_set
<
uint64_t
>
&
sign_set
)
{
std
::
vector
<
PSHost
>
&
host_list
,
// NOLINT
std
::
unordered_set
<
uint64_t
>
&
sign_set
)
{
// NOLINT
PSHost
host
;
PSHost
host
;
host
.
ip
=
ip
;
host
.
ip
=
ip
;
host
.
port
=
port
;
host
.
port
=
port
;
...
@@ -198,7 +199,7 @@ class PSEnvironment {
...
@@ -198,7 +199,7 @@ class PSEnvironment {
class
PaddlePSEnvironment
:
public
PSEnvironment
{
class
PaddlePSEnvironment
:
public
PSEnvironment
{
public:
public:
explicit
PaddlePSEnvironment
()
{}
explicit
PaddlePSEnvironment
()
{}
// NOLINT
virtual
~
PaddlePSEnvironment
()
{}
virtual
~
PaddlePSEnvironment
()
{}
virtual
int32_t
set_ps_servers
(
uint64_t
*
host_sign_list
,
int
node_num
)
{
virtual
int32_t
set_ps_servers
(
uint64_t
*
host_sign_list
,
int
node_num
)
{
...
...
paddle/fluid/distributed/table/depends/dense.h
浏览文件 @
8ce2482b
...
@@ -14,13 +14,13 @@
...
@@ -14,13 +14,13 @@
#pragma once
#pragma once
#include <gflags/gflags.h>
#include <math.h> // for sqrt in CPU and CUDA
#include <math.h> // for sqrt in CPU and CUDA
#include <functional>
#include <functional>
#include <memory>
#include <memory>
#include <string>
#include <string>
#include <utility>
#include <utility>
#include <vector>
#include <vector>
#include "gflags/gflags.h"
#include "paddle/fluid/distributed/common/utils.h"
#include "paddle/fluid/distributed/common/utils.h"
...
...
paddle/fluid/distributed/table/depends/initializers.h
浏览文件 @
8ce2482b
...
@@ -14,12 +14,12 @@
...
@@ -14,12 +14,12 @@
#pragma once
#pragma once
#include <gflags/gflags.h>
#include <functional>
#include <functional>
#include <memory>
#include <memory>
#include <string>
#include <string>
#include <utility>
#include <utility>
#include <vector>
#include <vector>
#include "gflags/gflags.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/generator.h"
...
...
paddle/fluid/distributed/table/depends/large_scale_kv.h
浏览文件 @
8ce2482b
...
@@ -15,7 +15,6 @@
...
@@ -15,7 +15,6 @@
#pragma once
#pragma once
#include <ThreadPool.h>
#include <ThreadPool.h>
#include <gflags/gflags.h>
#include <functional>
#include <functional>
#include <future> // NOLINT
#include <future> // NOLINT
#include <memory>
#include <memory>
...
@@ -25,6 +24,7 @@
...
@@ -25,6 +24,7 @@
#include <unordered_set>
#include <unordered_set>
#include <utility>
#include <utility>
#include <vector>
#include <vector>
#include "gflags/gflags.h"
#include "paddle/fluid/distributed/common/utils.h"
#include "paddle/fluid/distributed/common/utils.h"
#include "paddle/fluid/distributed/table/depends/initializers.h"
#include "paddle/fluid/distributed/table/depends/initializers.h"
...
...
paddle/fluid/distributed/table/depends/sparse.h
浏览文件 @
8ce2482b
...
@@ -14,7 +14,6 @@
...
@@ -14,7 +14,6 @@
#pragma once
#pragma once
#include <gflags/gflags.h>
#include <math.h> // for sqrt in CPU and CUDA
#include <math.h> // for sqrt in CPU and CUDA
#include <functional>
#include <functional>
#include <memory>
#include <memory>
...
@@ -22,6 +21,7 @@
...
@@ -22,6 +21,7 @@
#include <unordered_map>
#include <unordered_map>
#include <utility>
#include <utility>
#include <vector>
#include <vector>
#include "gflags/gflags.h"
#include "paddle/fluid/distributed/common/utils.h"
#include "paddle/fluid/distributed/common/utils.h"
#include "paddle/fluid/distributed/table/depends/large_scale_kv.h"
#include "paddle/fluid/distributed/table/depends/large_scale_kv.h"
...
...
paddle/fluid/framework/operator.cc
浏览文件 @
8ce2482b
...
@@ -14,7 +14,6 @@ limitations under the License. */
...
@@ -14,7 +14,6 @@ limitations under the License. */
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/operator.h"
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <glog/logging.h>
#include <algorithm>
#include <algorithm>
...
@@ -23,6 +22,7 @@ limitations under the License. */
...
@@ -23,6 +22,7 @@ limitations under the License. */
#include <unordered_set>
#include <unordered_set>
#include <vector>
#include <vector>
#include "gflags/gflags.h"
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/data_type_transform.h"
#include "paddle/fluid/framework/data_type_transform.h"
#include "paddle/fluid/framework/details/nan_inf_utils.h"
#include "paddle/fluid/framework/details/nan_inf_utils.h"
...
...
paddle/fluid/framework/unused_var_check.cc
浏览文件 @
8ce2482b
...
@@ -14,10 +14,10 @@ limitations under the License. */
...
@@ -14,10 +14,10 @@ limitations under the License. */
#include "paddle/fluid/framework/unused_var_check.h"
#include "paddle/fluid/framework/unused_var_check.h"
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <glog/logging.h>
#include <string>
#include <string>
#include <vector>
#include <vector>
#include "gflags/gflags.h"
#include "paddle/fluid/framework/no_need_buffer_vars_inference.h"
#include "paddle/fluid/framework/no_need_buffer_vars_inference.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_info.h"
...
...
paddle/fluid/framework/unused_var_check.h
浏览文件 @
8ce2482b
...
@@ -14,10 +14,10 @@ limitations under the License. */
...
@@ -14,10 +14,10 @@ limitations under the License. */
#pragma once
#pragma once
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <glog/logging.h>
#include <string>
#include <string>
#include <unordered_set>
#include <unordered_set>
#include "gflags/gflags.h"
namespace
paddle
{
namespace
paddle
{
namespace
framework
{
namespace
framework
{
...
...
paddle/fluid/imperative/profiler.cc
浏览文件 @
8ce2482b
...
@@ -17,9 +17,9 @@
...
@@ -17,9 +17,9 @@
#ifdef WITH_GPERFTOOLS
#ifdef WITH_GPERFTOOLS
#include "gperftools/profiler.h"
#include "gperftools/profiler.h"
#endif
#endif
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <glog/logging.h>
#include <mutex> // NOLINT
#include <mutex> // NOLINT
#include "gflags/gflags.h"
DEFINE_string
(
DEFINE_string
(
tracer_profile_fname
,
"xxgperf"
,
tracer_profile_fname
,
"xxgperf"
,
...
...
paddle/fluid/inference/analysis/analyzer.h
浏览文件 @
8ce2482b
...
@@ -35,9 +35,9 @@ limitations under the License. */
...
@@ -35,9 +35,9 @@ limitations under the License. */
* phase in the inference service.
* phase in the inference service.
*/
*/
#include <gflags/gflags.h>
#include <string>
#include <string>
#include <vector>
#include <vector>
#include "gflags/gflags.h"
#include "paddle/fluid/inference/analysis/analysis_pass.h"
#include "paddle/fluid/inference/analysis/analysis_pass.h"
#include "paddle/fluid/inference/analysis/flags.h"
#include "paddle/fluid/inference/analysis/flags.h"
...
...
paddle/fluid/inference/analysis/flags.h
浏览文件 @
8ce2482b
...
@@ -12,7 +12,9 @@
...
@@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
#include <gflags/gflags.h>
#pragma once
#include "gflags/gflags.h"
// TODO(Superjomn) add a definition flag like PADDLE_WITH_TENSORRT and hide this
// TODO(Superjomn) add a definition flag like PADDLE_WITH_TENSORRT and hide this
// flag if not available.
// flag if not available.
...
...
paddle/fluid/inference/analysis/ut_helper.h
浏览文件 @
8ce2482b
...
@@ -13,10 +13,10 @@ See the License for the specific language governing permissions and
...
@@ -13,10 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
#pragma once
#pragma once
#include <gflags/gflags.h>
#include <gtest/gtest.h>
#include <gtest/gtest.h>
#include <fstream>
#include <fstream>
#include <string>
#include <string>
#include "gflags/gflags.h"
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/inference/analysis/helper.h"
#include "paddle/fluid/inference/analysis/helper.h"
...
...
paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc
浏览文件 @
8ce2482b
...
@@ -16,13 +16,13 @@ limitations under the License. */
...
@@ -16,13 +16,13 @@ limitations under the License. */
* This file contains a simple demo for how to take a model for inference.
* This file contains a simple demo for how to take a model for inference.
*/
*/
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <glog/logging.h>
#include <algorithm>
#include <algorithm>
#include <memory>
#include <memory>
#include <thread> //NOLINT
#include <thread> //NOLINT
#include "gflags/gflags.h"
#include "utils.h" // NOLINT
#include "utils.h" // NOLINT
DEFINE_string
(
dirname
,
""
,
"Directory of the inference model."
);
DEFINE_string
(
dirname
,
""
,
"Directory of the inference model."
);
...
...
paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc
浏览文件 @
8ce2482b
...
@@ -16,8 +16,8 @@ limitations under the License. */
...
@@ -16,8 +16,8 @@ limitations under the License. */
* This file contains demo of mobilenet for tensorrt.
* This file contains demo of mobilenet for tensorrt.
*/
*/
#include <gflags/gflags.h>
#include <glog/logging.h> // use glog instead of CHECK to avoid importing other paddle header files.
#include <glog/logging.h> // use glog instead of CHECK to avoid importing other paddle header files.
#include "gflags/gflags.h"
#include "utils.h" // NOLINT
#include "utils.h" // NOLINT
DECLARE_double
(
fraction_of_gpu_memory_to_use
);
DECLARE_double
(
fraction_of_gpu_memory_to_use
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录