Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
ec4155d7
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ec4155d7
编写于
9月 24, 2020
作者:
W
Wilber
提交者:
GitHub
9月 24, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
windows lib size crop from 5.4G to 3.9G (#27477)
上级
b6ecf356
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
112 addition
and
16 deletion
+112
-16
cmake/generic.cmake
cmake/generic.cmake
+9
-0
cmake/inference_lib.cmake
cmake/inference_lib.cmake
+7
-4
cmake/init.cmake
cmake/init.cmake
+3
-0
cmake/paddle_win.props
cmake/paddle_win.props
+91
-0
paddle/fluid/inference/CMakeLists.txt
paddle/fluid/inference/CMakeLists.txt
+2
-7
paddle/fluid/inference/api/demo_ci/run.sh
paddle/fluid/inference/api/demo_ci/run.sh
+0
-5
未找到文件。
cmake/generic.cmake
浏览文件 @
ec4155d7
...
...
@@ -446,6 +446,9 @@ function(nv_library TARGET_NAME)
message
(
FATAL
"Please specify source file or library in nv_library."
)
endif
()
endif
(
nv_library_SRCS
)
if
(
WIN32
)
set_target_properties
(
${
TARGET_NAME
}
PROPERTIES VS_USER_PROPS
${
WIN_PROPS
}
)
endif
(
WIN32
)
endif
()
endfunction
(
nv_library
)
...
...
@@ -461,6 +464,9 @@ function(nv_binary TARGET_NAME)
add_dependencies
(
${
TARGET_NAME
}
${
nv_binary_DEPS
}
)
common_link
(
${
TARGET_NAME
}
)
endif
()
if
(
WIN32
)
set_target_properties
(
${
TARGET_NAME
}
PROPERTIES VS_USER_PROPS
${
WIN_PROPS
}
)
endif
(
WIN32
)
endif
()
endfunction
(
nv_binary
)
...
...
@@ -482,6 +488,9 @@ function(nv_test TARGET_NAME)
set_property
(
TEST
${
TARGET_NAME
}
PROPERTY ENVIRONMENT FLAGS_cpu_deterministic=true
)
set_property
(
TEST
${
TARGET_NAME
}
PROPERTY ENVIRONMENT FLAGS_init_allocated_mem=true
)
set_property
(
TEST
${
TARGET_NAME
}
PROPERTY ENVIRONMENT FLAGS_cudnn_deterministic=true
)
if
(
WIN32
)
set_target_properties
(
${
TARGET_NAME
}
PROPERTIES VS_USER_PROPS
${
WIN_PROPS
}
)
endif
(
WIN32
)
endif
()
endfunction
(
nv_test
)
...
...
cmake/inference_lib.cmake
浏览文件 @
ec4155d7
...
...
@@ -19,9 +19,8 @@ set(PADDLE_INSTALL_DIR "${CMAKE_BINARY_DIR}/paddle_install_dir" CACHE STRING
set
(
PADDLE_INFERENCE_INSTALL_DIR
"
${
CMAKE_BINARY_DIR
}
/paddle_inference_install_dir"
CACHE STRING
"A path setting paddle inference shared and static libraries"
)
# TODO(zhaolong)
# At present, the size of static lib in Windows exceeds the system limit,
# so the generation of static lib is temporarily turned off.
# At present, the size of static lib in Windows is very large,
# so we need to crop the library size.
if
(
WIN32
)
#todo: remove the option
option
(
WITH_STATIC_LIB
"Compile demo with static/shared library, default use dynamic."
OFF
)
...
...
@@ -196,7 +195,11 @@ set(PADDLE_INFERENCE_C_INSTALL_DIR "${CMAKE_BINARY_DIR}/paddle_inference_c_insta
copy_part_of_thrid_party
(
inference_lib_dist
${
PADDLE_INFERENCE_C_INSTALL_DIR
}
)
set
(
src_dir
"
${
PADDLE_SOURCE_DIR
}
/paddle/fluid"
)
set
(
paddle_fluid_c_lib
${
PADDLE_BINARY_DIR
}
/paddle/fluid/inference/capi/libpaddle_fluid_c.*
)
if
(
WIN32
)
set
(
paddle_fluid_c_lib
${
PADDLE_BINARY_DIR
}
/paddle/fluid/inference/capi/
${
CMAKE_BUILD_TYPE
}
/paddle_fluid_c.*
)
else
(
WIN32
)
set
(
paddle_fluid_c_lib
${
PADDLE_BINARY_DIR
}
/paddle/fluid/inference/capi/libpaddle_fluid_c.*
)
endif
(
WIN32
)
copy
(
inference_lib_dist
SRCS
${
src_dir
}
/inference/capi/paddle_c_api.h
${
paddle_fluid_c_lib
}
...
...
cmake/init.cmake
浏览文件 @
ec4155d7
...
...
@@ -26,4 +26,7 @@ if(WITH_GPU)
set
(
CMAKE_CUDA_FLAGS_MINSIZEREL
"-O1 -DNDEBUG"
)
endif
()
if
(
WIN32
)
set
(
WIN_PROPS
${
CMAKE_SOURCE_DIR
}
/cmake/paddle_win.props
)
endif
()
cmake/paddle_win.props
0 → 100644
浏览文件 @
ec4155d7
<?xml version="1.0" encoding="utf-8"?>
<Project
xmlns=
"http://schemas.microsoft.com/developer/msbuild/2003"
>
<ItemDefinitionGroup>
<CudaCompile>
<!-- Project schema: Host properties -->
<UseHostDefines>
true
</UseHostDefines>
<Emulation>
false
</Emulation>
<HostDebugInfo
Condition=
"'$(Configuration)' == 'Debug'"
>
true
</HostDebugInfo>
<HostDebugInfo
Condition=
"'$(Configuration)' != 'Debug'"
>
false
</HostDebugInfo>
<FastMath>
false
</FastMath>
<Optimization>
InheritFromHost
</Optimization>
<Runtime>
InheritFromHost
</Runtime>
<RuntimeChecks>
InheritFromHost
</RuntimeChecks>
<TypeInfo>
InheritFromHost
</TypeInfo>
<Warning>
InheritFromHost
</Warning>
<BaseCommandLineTemplate>
-ccbin "%(VCBinDir)" -x cu [GenerateRelocatableDeviceCode] [Include] [RequiredIncludes] [InterleaveSourceInPTX] [GPUDebugInfo] [GenerateLineInfo] [Keep] [KeepDir] [MaxRegCount] [PtxAsOptionV] [TargetMachinePlatform] [NvccCompilation] [CudaRuntime] [AdditionalOptions]
</BaseCommandLineTemplate>
<BuildCommandLineTemplate>
--use-local-env
</BuildCommandLineTemplate>
<BuildDynamicCommandLineTemplate>
[CodeGeneration]
</BuildDynamicCommandLineTemplate>
<CleanCommandLineTemplate>
-clean
</CleanCommandLineTemplate>
<!-- <HostCommandLineTemplate>-Xcompiler "/EHsc [Warning] /nologo [Optimization] [ProgramDataBaseFileName] $(CudaForceSynchronousPdbWrites) /Zi [RuntimeChecks] [Runtime] [TypeInfo]"</HostCommandLineTemplate> -->
<HostCommandLineTemplate>
-Xcompiler
"
/EHsc [Warning] /nologo [Optimization] [ProgramDataBaseFileName] $(CudaForceSynchronousPdbWrites) [RuntimeChecks] [Runtime] [TypeInfo]
"
</HostCommandLineTemplate>
<DriverApiCommandLineTemplate>
%(BaseCommandLineTemplate) [CompileOut] "%(FullPath)"
</DriverApiCommandLineTemplate>
<RuntimeApiCommandLineTemplate>
%(BaseCommandLineTemplate) [HostDebugInfo] [Emulation] [FastMath] [Defines] %(HostCommandLineTemplate) [CompileOut] "%(FullPath)"
</RuntimeApiCommandLineTemplate>
<CommandLineTemplate>
# (Approximate command-line. Settings inherited from host are not visible below.)
# (Please see the output window after a build for the full command-line)
# Driver API (NVCC Compilation Type is .cubin, .gpu, or .ptx)
set CUDAFE_FLAGS=--sdk_dir "$(WindowsSdkDir)"
"$(CudaToolkitNvccPath)" %(BuildCommandLineTemplate) %(DriverApiCommandLineTemplate)
# Runtime API (NVCC Compilation Type is hybrid object or .c file)
set CUDAFE_FLAGS=--sdk_dir "$(WindowsSdkDir)"
"$(CudaToolkitNvccPath)" %(BuildCommandLineTemplate) %(RuntimeApiCommandLineTemplate)
</CommandLineTemplate>
<ExecutionDescription>
Compiling CUDA source file %(Identity)...
</ExecutionDescription>
<ExclusionDescription>
Skipping CUDA source file %(Identity) (excluded from build).
</ExclusionDescription>
<!-- Miscellaneous -->
<PropsCacheOutputFile>
%(Filename)%(Extension).cache
</PropsCacheOutputFile>
<PropsCacheOutputPath>
$(IntDir)%(PropsCacheOutputFile)
</PropsCacheOutputPath>
<CudaCompileCoreProject>
$(MSBuildProjectFullPath)
</CudaCompileCoreProject>
</CudaCompile>
<CudaLink>
<PerformDeviceLink>
true
</PerformDeviceLink>
<LinkOut>
$(IntDir)$(TargetName).device-link.obj
</LinkOut>
<AdditionalLibraryDirectories></AdditionalLibraryDirectories>
<UseHostLibraryDirectories>
true
</UseHostLibraryDirectories>
<AdditionalDependencies></AdditionalDependencies>
<UseHostLibraryDependencies>
true
</UseHostLibraryDependencies>
<GPUDebugInfo>
InheritFromProject
</GPUDebugInfo>
<Optimization>
InheritFromProject
</Optimization>
<!-- Implicitly inherited from the project via @(CudaCompile) -->
<CodeGeneration></CodeGeneration>
<RuntimeChecks></RuntimeChecks>
<Runtime></Runtime>
<TargetMachinePlatform></TargetMachinePlatform>
<TypeInfo></TypeInfo>
<Warning></Warning>
<Inputs></Inputs>
<!-- <HostCommandLineTemplate>-Xcompiler "/EHsc [Warning] /nologo [Optimization] /Zi [RuntimeChecks] [Runtime] [TypeInfo]"</HostCommandLineTemplate> -->
<HostCommandLineTemplate>
-Xcompiler
"
/EHsc [Warning] /nologo [Optimization] [RuntimeChecks] [Runtime] [TypeInfo]
"
</HostCommandLineTemplate>
<LinkCommandLineTemplate>
"$(CudaToolkitNvccPath)" -dlink [LinkOut] %(HostCommandLineTemplate) [AdditionalLibraryDirectories] [AdditionalDependencies] [AdditionalOptions] [CodeGeneration] [GPUDebugInfo] [TargetMachinePlatform] [Inputs]
</LinkCommandLineTemplate>
<CommandLineTemplate>
# (Approximate command-line. Settings inherited from host are not visible below.)
# (Please see the output window after a build for the full command-line)
%(LinkCommandLineTemplate)
</CommandLineTemplate>
</CudaLink>
<Link>
<AdditionalLibraryDirectories>
%(AdditionalLibraryDirectories);$(CudaToolkitLibDir)
</AdditionalLibraryDirectories>
</Link>
<ClCompile>
<AdditionalIncludeDirectories>
%(AdditionalIncludeDirectories);$(CudaToolkitIncludeDir)
</AdditionalIncludeDirectories>
</ClCompile>
</ItemDefinitionGroup>
</Project>
paddle/fluid/inference/CMakeLists.txt
浏览文件 @
ec4155d7
...
...
@@ -44,14 +44,9 @@ add_subdirectory(api)
set
(
STATIC_INFERENCE_API paddle_inference_api analysis_predictor
zero_copy_tensor reset_tensor_array
analysis_config paddle_pass_builder activation_functions
${
mkldnn_quantizer_cfg
}
)
# TODO(xingzhaolong, jiweibo): remove this and create_static_lib(paddle_fluid) on windows GPU
if
(
WIN32 AND WITH_GPU
)
cc_library
(
paddle_fluid DEPS
${
fluid_modules
}
${
STATIC_INFERENCE_API
}
)
else
()
create_static_lib
(
paddle_fluid
${
fluid_modules
}
${
STATIC_INFERENCE_API
}
)
endif
()
create_static_lib
(
paddle_fluid
${
fluid_modules
}
${
STATIC_INFERENCE_API
}
)
if
(
NOT APPLE
AND NOT WIN32
)
if
(
NOT APPLE
)
# TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac.
set
(
LINK_FLAGS
"-Wl,--retain-symbols-file
${
CMAKE_CURRENT_SOURCE_DIR
}
/paddle_fluid.sym"
)
set_target_properties
(
paddle_fluid PROPERTIES LINK_FLAGS
"
${
LINK_FLAGS
}
"
)
...
...
paddle/fluid/inference/api/demo_ci/run.sh
浏览文件 @
ec4155d7
...
...
@@ -68,11 +68,6 @@ rm -rf *
for
WITH_STATIC_LIB
in
ON OFF
;
do
if
[
$(
echo
`
uname
`
|
grep
"Win"
)
!=
""
]
;
then
# TODO(xingzhaolong, jiweibo): remove this if windows GPU library is ready.
if
[
$TEST_GPU_CPU
==
ON]
&&
[
$WITH_STATIC_LIB
==
ON
]
;
then
return
0
fi
# -----simple_on_word2vec on windows-----
cmake ..
-G
"Visual Studio 14 2015"
-A
x64
-DPADDLE_LIB
=
${
inference_install_dir
}
\
-DWITH_MKL
=
$TURN_ON_MKL
\
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录