未验证 提交 0e30d56a 编写于 作者: H huangjiyi 提交者: GitHub

Add CMake option WITH_CPP_DIST for installing C++ distribution (#53282)

* rm paddle_install_dir

* update test=document_fix

* update test=document_fix

* update

* add test

* update

* update

* update

* update

* update

* update

* update

* update

* update

* add todo commont
上级 55c4eb8a
......@@ -246,6 +246,7 @@ option(WITH_DISTRIBUTE "Compile with distributed support" OFF)
option(WITH_BRPC_RDMA "Use brpc rdma as the rpc protocal" OFF)
option(ON_INFER "Turn on inference optimization and inference-lib generation"
ON)
option(WITH_CPP_DIST "Install PaddlePaddle C++ distribution" OFF)
################################ Internal Configurations #######################################
option(WITH_NV_JETSON "Compile PaddlePaddle with NV JETSON" OFF)
option(WITH_PROFILER "Compile PaddlePaddle with GPU profiler and gperftools"
......@@ -662,6 +663,21 @@ if(WITH_STRIP)
endif()
endif()
if(WITH_CPP_DIST)
# TODO(huangjiyi): Separate installing C++ distribution from python package
# installation and support for installing C++ distribution on more platforms.
if(NOT LINUX OR NOT WITH_PYTHON)
set(WITH_CPP_DIST
OFF
CACHE
STRING
"Currently C++ Distribution Generation is only available on Linux and compiling WITH_PYTHON=ON."
FORCE)
else()
include(paddle_lib)
endif()
endif()
add_subdirectory(paddle)
if(WITH_PYTHON)
add_subdirectory(python)
......
# Paddle CMake configuration file
# -------
#
# Finds the Paddle library
#
# This will define the following variables:
#
# PADDLE_FOUND -- True if the system has the Paddle library
# PADDLE_INCLUDE_DIRS -- The include directories for Paddle
# PADDLE_LIBRARIES -- Libraries to link against
get_filename_component(PADDLE_INSTALL_PREFIX "${CMAKE_CURRENT_LIST_FILE}/../.." ABSOLUTE)
# include directories
set(PADDLE_INCLUDE_DIRS
${PADDLE_INSTALL_PREFIX}/include
${PADDLE_INSTALL_PREFIX}/include/third_party
)
# Library dependencies.
set(PADDLE_LIBRARIES_DIRS ${PADDLE_INSTALL_PREFIX}/lib)
link_directories(${PADDLE_LIBRARIES_DIRS})
file(GLOB PADDLE_LIBRARIES ${PADDLE_LIBRARIES_DIRS}/lib*)
find_package(PythonLibs @PY_VERSION@ REQUIRED)
list(APPEND PADDLE_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS})
list(APPEND PADDLE_LIBRARIES ${PYTHON_LIBRARIES})
if(@WITH_GPU@)
find_package(CUDA @CUDA_VERSION@ REQUIRED)
list(APPEND PADDLE_LIBRARIES ${CUDA_LIBRARIES})
endif()
......@@ -12,11 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# make package for paddle fluid shared and static library
set(PADDLE_INSTALL_DIR
"${CMAKE_BINARY_DIR}/paddle_install_dir"
CACHE STRING "A path setting paddle shared and static libraries")
# make package for paddle inference shared and static library
set(PADDLE_INFERENCE_INSTALL_DIR
"${CMAKE_BINARY_DIR}/paddle_inference_install_dir"
CACHE STRING "A path setting paddle inference shared and static libraries")
......@@ -214,7 +210,7 @@ endfunction()
# inference library for only inference
set(inference_lib_deps third_party paddle_inference paddle_inference_c
paddle_inference_shared paddle_inference_c_shared)
add_custom_target(inference_lib_dist DEPENDS ${inference_lib_deps})
add_custom_target(inference_lib_dist ALL DEPENDS ${inference_lib_deps})
set(dst_dir "${PADDLE_INFERENCE_INSTALL_DIR}/third_party/threadpool")
copy(
......@@ -387,158 +383,5 @@ if(WITH_STRIP AND NOT WIN32)
)
endif()
# fluid library for both train and inference
set(fluid_lib_deps inference_lib_dist)
add_custom_target(fluid_lib_dist ALL DEPENDS ${fluid_lib_deps})
set(dst_dir "${PADDLE_INSTALL_DIR}/paddle/fluid")
set(module "inference")
if(WIN32)
copy(
fluid_lib_dist
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/api/paddle_*.h
${paddle_inference_lib}
DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module}
${dst_dir}/${module})
else()
copy(
fluid_lib_dist
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/api/paddle_*.h
${paddle_inference_lib}
DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module})
endif()
set(module "framework")
set(framework_lib_deps framework_proto data_feed_proto trainer_desc_proto)
add_dependencies(fluid_lib_dist ${framework_lib_deps})
copy(
fluid_lib_dist
SRCS ${src_dir}/${module}/*.h
${src_dir}/${module}/details/*.h
${PADDLE_BINARY_DIR}/paddle/fluid/framework/trainer_desc.pb.h
${PADDLE_BINARY_DIR}/paddle/fluid/framework/framework.pb.h
${PADDLE_BINARY_DIR}/paddle/fluid/framework/data_feed.pb.h
${src_dir}/${module}/ir/memory_optimize_pass/*.h
${src_dir}/${module}/ir/*.h
${src_dir}/${module}/fleet/*.h
DSTS ${dst_dir}/${module}
${dst_dir}/${module}/details
${dst_dir}/${module}
${dst_dir}/${module}
${dst_dir}/${module}
${dst_dir}/${module}/ir/memory_optimize_pass
${dst_dir}/${module}/ir
${dst_dir}/${module}/fleet)
set(module "operators")
copy(
fluid_lib_dist
SRCS ${src_dir}/${module}/reader/blocking_queue.h
DSTS ${dst_dir}/${module}/reader/)
set(module "memory")
copy(
fluid_lib_dist
SRCS ${src_dir}/${module}/allocation/*.h
DSTS ${dst_dir}/${module}/allocation)
set(module "platform")
set(platform_lib_deps phi_profiler_proto errors)
if(WITH_GPU)
set(platform_lib_deps ${platform_lib_deps} external_error_proto)
endif()
add_dependencies(fluid_lib_dist ${platform_lib_deps})
copy(
fluid_lib_dist
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/dynload/*.h
${PADDLE_BINARY_DIR}/paddle/phi/api/profiler/*.pb.h
DSTS ${dst_dir}/${module} ${dst_dir}/${module}/dynload ${dst_dir}/${module})
set(module "string")
copy(
fluid_lib_dist
SRCS ${PADDLE_SOURCE_DIR}/paddle/utils/${module}/*.h
${PADDLE_SOURCE_DIR}/paddle/utils/${module}/tinyformat/*.h
DSTS ${dst_dir}/${module} ${dst_dir}/${module}/tinyformat)
set(module "imperative")
copy(
fluid_lib_dist
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/jit/*.h
DSTS ${dst_dir}/${module} ${dst_dir}/${module}/jit)
set(module "pybind")
copy(
fluid_lib_dist
SRCS ${CMAKE_CURRENT_BINARY_DIR}/paddle/fluid/${module}/pybind.h
DSTS ${dst_dir}/${module})
set(dst_dir "${PADDLE_INSTALL_DIR}/third_party/eigen3")
copy(
inference_lib_dist
SRCS ${EIGEN_INCLUDE_DIR}/Eigen/Core ${EIGEN_INCLUDE_DIR}/Eigen/src
${EIGEN_INCLUDE_DIR}/unsupported/Eigen
DSTS ${dst_dir}/Eigen ${dst_dir}/Eigen ${dst_dir}/unsupported)
set(dst_dir "${PADDLE_INSTALL_DIR}/third_party/dlpack")
copy(
inference_lib_dist
SRCS ${DLPACK_INCLUDE_DIR}/dlpack
DSTS ${dst_dir})
set(dst_dir "${PADDLE_INSTALL_DIR}/third_party/install/zlib")
copy(
inference_lib_dist
SRCS ${ZLIB_INCLUDE_DIR} ${ZLIB_LIBRARIES}
DSTS ${dst_dir} ${dst_dir}/lib)
# CMakeCache Info
copy(
fluid_lib_dist
SRCS ${PADDLE_INFERENCE_INSTALL_DIR}/third_party
${CMAKE_CURRENT_BINARY_DIR}/CMakeCache.txt
DSTS ${PADDLE_INSTALL_DIR} ${PADDLE_INSTALL_DIR})
# paddle fluid version
function(version version_file)
execute_process(
COMMAND ${GIT_EXECUTABLE} log --pretty=format:%H -1
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}
OUTPUT_VARIABLE PADDLE_GIT_COMMIT)
file(
WRITE ${version_file}
"GIT COMMIT ID: ${PADDLE_GIT_COMMIT}\n" "WITH_MKL: ${WITH_MKL}\n"
"WITH_MKLDNN: ${WITH_MKLDNN}\n" "WITH_GPU: ${WITH_GPU}\n"
"WITH_ROCM: ${WITH_ROCM}\n" "WITH_IPU: ${WITH_IPU}\n")
if(WITH_GPU)
file(APPEND ${version_file}
"CUDA version: ${CUDA_VERSION}\n"
"CUDNN version: v${CUDNN_MAJOR_VERSION}.${CUDNN_MINOR_VERSION}\n")
endif()
if(WITH_ROCM)
file(APPEND ${version_file}
"HIP version: v${HIP_MAJOR_VERSION}.${HIP_MINOR_VERSION}\n"
"MIOpen version: v${MIOPEN_MAJOR_VERSION}.${MIOPEN_MINOR_VERSION}\n")
endif()
if(WITH_IPU)
file(APPEND ${version_file} "PopART version: ${POPART_VERSION}\n")
endif()
file(APPEND ${version_file}
"CXX compiler version: ${CMAKE_CXX_COMPILER_VERSION}\n")
if(TENSORRT_FOUND)
file(
APPEND ${version_file}
"WITH_TENSORRT: ${TENSORRT_FOUND}\n"
"TensorRT version: v${TENSORRT_MAJOR_VERSION}.${TENSORRT_MINOR_VERSION}.${TENSORRT_PATCH_VERSION}.${TENSORRT_BUILD_VERSION}\n"
)
endif()
if(WITH_LITE)
file(APPEND ${version_file} "WITH_LITE: ${WITH_LITE}\n"
"LITE_GIT_TAG: ${LITE_GIT_TAG}\n")
endif()
endfunction()
version(${PADDLE_INSTALL_DIR}/version.txt)
version(${PADDLE_INFERENCE_INSTALL_DIR}/version.txt)
version(${PADDLE_INFERENCE_C_INSTALL_DIR}/version.txt)
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# make package for paddle shared library
set(PADDLE_INSTALL_DIR ${PADDLE_BINARY_DIR}/paddle_install_dir)
set(PADDLE_LIB_TEST_DIR ${PADDLE_BINARY_DIR}/test/paddle_lib)
configure_file(${PADDLE_SOURCE_DIR}/cmake/PaddleConfig.cmake.in
${PADDLE_INSTALL_DIR}/cmake/PaddleConfig.cmake @ONLY)
configure_file(${PADDLE_SOURCE_DIR}/test/paddle_lib/CMakeLists.txt.in
${PADDLE_BINARY_DIR}/test/paddle_lib/CMakeLists.txt @ONLY)
version(${PADDLE_INSTALL_DIR}/version.txt)
......@@ -71,3 +71,46 @@ math(EXPR PADDLE_VERSION_INTEGER "${PADDLE_MAJOR_VER} * 1000000
add_definitions(-DPADDLE_VERSION=${PADDLE_VERSION})
add_definitions(-DPADDLE_VERSION_INTEGER=${PADDLE_VERSION_INTEGER})
message(STATUS "Paddle version is ${PADDLE_VERSION}")
# write paddle version
function(version version_file)
execute_process(
COMMAND ${GIT_EXECUTABLE} log --pretty=format:%H -1
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}
OUTPUT_VARIABLE PADDLE_GIT_COMMIT)
file(
WRITE ${version_file}
"Paddle version: ${PADDLE_VERSION}\n"
"GIT COMMIT ID: ${PADDLE_GIT_COMMIT}\n"
"WITH_MKL: ${WITH_MKL}\n"
"WITH_MKLDNN: ${WITH_MKLDNN}\n"
"WITH_GPU: ${WITH_GPU}\n"
"WITH_ROCM: ${WITH_ROCM}\n"
"WITH_IPU: ${WITH_IPU}\n")
if(WITH_GPU)
file(APPEND ${version_file}
"CUDA version: ${CUDA_VERSION}\n"
"CUDNN version: v${CUDNN_MAJOR_VERSION}.${CUDNN_MINOR_VERSION}\n")
endif()
if(WITH_ROCM)
file(APPEND ${version_file}
"HIP version: v${HIP_MAJOR_VERSION}.${HIP_MINOR_VERSION}\n"
"MIOpen version: v${MIOPEN_MAJOR_VERSION}.${MIOPEN_MINOR_VERSION}\n")
endif()
if(WITH_IPU)
file(APPEND ${version_file} "PopART version: ${POPART_VERSION}\n")
endif()
file(APPEND ${version_file}
"CXX compiler version: ${CMAKE_CXX_COMPILER_VERSION}\n")
if(TENSORRT_FOUND)
file(
APPEND ${version_file}
"WITH_TENSORRT: ${TENSORRT_FOUND}\n"
"TensorRT version: v${TENSORRT_MAJOR_VERSION}.${TENSORRT_MINOR_VERSION}.${TENSORRT_PATCH_VERSION}.${TENSORRT_BUILD_VERSION}\n"
)
endif()
if(WITH_LITE)
file(APPEND ${version_file} "WITH_LITE: ${WITH_LITE}\n"
"LITE_GIT_TAG: ${LITE_GIT_TAG}\n")
endif()
endfunction()
......@@ -75,5 +75,8 @@ env_dict={
'PYBIND_INCLUDE_DIR':'@PYBIND_INCLUDE_DIR@',
'WITH_PYTHON':'@WITH_PYTHON@',
'WITH_CINN':'@WITH_CINN@',
'CINN_SOURCE_DIR':'@CINN_SOURCE_DIR@'
'CINN_SOURCE_DIR':'@CINN_SOURCE_DIR@',
'WITH_CPP_DIST':'@WITH_CPP_DIST@',
'PADDLE_INSTALL_DIR':'@PADDLE_INSTALL_DIR@',
'PADDLE_LIB_TEST_DIR':'@PADDLE_LIB_TEST_DIR@'
}
......@@ -782,6 +782,24 @@ if '${WITH_XPU}' == 'ON':
headers += list(find_files('*.h', '${PYBIND_INCLUDE_DIR}', True)) # pybind headers
def get_header_install_dir(header):
if 'pb.h' in header:
install_dir = re.sub('${PADDLE_BINARY_DIR}/', '', header)
elif 'third_party' not in header:
# paddle headers
install_dir = re.sub('@PADDLE_SOURCE_DIR@/', '', header)
print('install_dir: ', install_dir)
if 'fluid/jit' in install_dir:
install_dir = re.sub('fluid/jit', 'jit', install_dir)
print('fluid/jit install_dir: ', install_dir)
else:
# third_party
install_dir = re.sub('${THIRD_PARTY_PATH}', 'third_party', header)
patterns = ['install/mkldnn/include', 'pybind/src/extern_pybind/include', 'third_party/xpu/src/extern_xpu/xpu/include/']
for pattern in patterns:
install_dir = re.sub(pattern, '', install_dir)
return install_dir
class InstallCommand(InstallCommandBase):
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
......@@ -813,21 +831,7 @@ class InstallHeaders(Command):
('force', 'force'))
def mkdir_and_copy_file(self, header):
if 'pb.h' in header:
install_dir = re.sub('${PADDLE_BINARY_DIR}/', '', header)
elif 'third_party' not in header:
# paddle headers
install_dir = re.sub('@PADDLE_SOURCE_DIR@/', '', header)
print('install_dir: ', install_dir)
if 'fluid/jit' in install_dir:
install_dir = re.sub('fluid/jit', 'jit', install_dir)
print('fluid/jit install_dir: ', install_dir)
else:
# third_party
install_dir = re.sub('${THIRD_PARTY_PATH}', 'third_party', header)
patterns = ['install/mkldnn/include', 'pybind/src/extern_pybind/include', 'third_party/xpu/src/extern_xpu/xpu/include/']
for pattern in patterns:
install_dir = re.sub(pattern, '', install_dir)
install_dir = get_header_install_dir(header)
install_dir = os.path.join(self.install_dir, os.path.dirname(install_dir))
if not os.path.exists(install_dir):
self.mkpath(install_dir)
......@@ -885,6 +889,54 @@ if '${WITH_STRIP}' == 'ON':
if os.system(command) != 0:
raise Exception("strip *.so failed, command: %s" % command)
def install_cpp_dist_and_build_test(paddle_install_dir, paddle_lib_test_dir):
"""install cpp distribution and build test target
TODO(huangjiyi):
1. This function will be moved when seperating C++ distribution
installation from python package installation.
2. Reduce the header and library files to be installed.
"""
if '${CMAKE_BUILD_TYPE}' != 'Release':
return
os.makedirs(paddle_install_dir, exist_ok=True)
# install C++ header files
for header in headers:
install_dir = get_header_install_dir(header)
install_dir = os.path.join(
paddle_install_dir, 'include', os.path.dirname(install_dir)
)
os.makedirs(install_dir, exist_ok=True)
shutil.copy(header, install_dir)
# install C++ shared libraries
lib_install_dir = os.path.join(paddle_install_dir, 'lib')
os.makedirs(lib_install_dir, exist_ok=True)
# install libpaddle.ext
paddle_libs = glob.glob('${PADDLE_BINARY_DIR}/paddle/fluid/pybind/${FLUID_CORE_NAME}.*')
for lib in paddle_libs:
shutil.copy(lib, lib_install_dir)
# install dependent libraries
libs_path = package_dir['paddle.libs']
for lib in package_data['paddle.libs']:
lib_path = os.path.join(libs_path, lib)
shutil.copy(lib_path, lib_install_dir)
# build test target
cmake_args = ["cmake", paddle_lib_test_dir, "-B", paddle_lib_test_dir]
if os.getenv("GENERATOR") == "Ninja":
cmake_args.append("-GNinja")
subprocess.check_call(cmake_args)
subprocess.check_call(["cmake", "--build", paddle_lib_test_dir])
# install cpp distribution
if '${WITH_CPP_DIST}' == 'ON':
paddle_install_dir = '${PADDLE_INSTALL_DIR}'
paddle_lib_test_dir = '${PADDLE_LIB_TEST_DIR}'
install_cpp_dist_and_build_test(paddle_install_dir, paddle_lib_test_dir)
with redirect_stdout():
setup(name='${PACKAGE_NAME}',
version='${PADDLE_VERSION}',
......
......@@ -1545,6 +1545,52 @@ Please run 'pip install -r python/requirements.txt' to make sure you have all th
raise RuntimeError(missing_modules.format(dependency=dependency))
def install_cpp_dist_and_build_test(install_dir, lib_test_dir, headers, libs):
"""install cpp distribution and build test target
TODO(huangjiyi):
1. This function will be moved when seperating C++ distribution
installation from python package installation.
2. Reduce the header and library files to be installed.
"""
if env_dict.get("CMAKE_BUILD_TYPE") != 'Release':
return
os.makedirs(install_dir, exist_ok=True)
# install C++ header files
for header in headers:
header_install_dir = get_header_install_dir(header)
header_install_dir = os.path.join(
install_dir, 'include', os.path.dirname(header_install_dir)
)
os.makedirs(header_install_dir, exist_ok=True)
shutil.copy(header, header_install_dir)
# install C++ shared libraries
lib_install_dir = os.path.join(install_dir, 'lib')
os.makedirs(lib_install_dir, exist_ok=True)
# install libpaddle.ext
paddle_libs = glob.glob(
paddle_binary_dir
+ '/paddle/fluid/pybind/'
+ env_dict.get("FLUID_CORE_NAME")
+ '.*'
)
for lib in paddle_libs:
shutil.copy(lib, lib_install_dir)
# install dependent libraries
libs_path = paddle_binary_dir + '/python/paddle/libs'
for lib in libs:
lib_path = os.path.join(libs_path, lib)
shutil.copy(lib_path, lib_install_dir)
# build test target
cmake_args = [CMAKE, lib_test_dir, "-B", lib_test_dir]
if os.getenv("GENERATOR") == "Ninja":
cmake_args.append("-GNinja")
subprocess.check_call(cmake_args)
subprocess.check_call([CMAKE, "--build", lib_test_dir])
def main():
# Parse the command line and check arguments before we proceed with building steps and setup
parse_input_command(filter_args_list)
......@@ -1617,6 +1663,17 @@ def main():
if os.system(command) != 0:
raise Exception("strip *.so failed, command: %s" % command)
# install cpp distribution
if env_dict.get("WITH_CPP_DIST") == 'ON':
paddle_install_dir = env_dict.get("PADDLE_INSTALL_DIR")
paddle_lib_test_dir = env_dict.get("PADDLE_LIB_TEST_DIR")
install_cpp_dist_and_build_test(
paddle_install_dir,
paddle_lib_test_dir,
headers,
package_data['paddle.libs'],
)
setup(
name=package_name,
version=paddle_version,
......
......@@ -135,6 +135,15 @@ if(WITH_TESTING)
endif()
endif()
if(WITH_CPP_DIST)
add_test(NAME test_paddle_lib
COMMAND ${PADDLE_BINARY_DIR}/test/paddle_lib/test_paddle_lib)
if(WITH_GPU)
add_test(NAME test_paddle_lib_gpu
COMMAND ${PADDLE_BINARY_DIR}/test/paddle_lib/test_paddle_lib_gpu)
endif()
endif()
get_property(test_srcs GLOBAL PROPERTY TEST_SRCS)
get_property(test_names GLOBAL PROPERTY TEST_NAMES)
......
cmake_minimum_required(VERSION 3.15)
project(test_paddle_lib)
list(APPEND CMAKE_PREFIX_PATH "@PADDLE_BINARY_DIR@/paddle_install_dir")
find_package(Paddle REQUIRED)
include_directories(${PADDLE_INCLUDE_DIRS})
add_executable(test_paddle_lib test_paddle_lib.cc)
target_link_libraries(test_paddle_lib ${PADDLE_LIBRARIES})
if(@WITH_GPU@)
add_executable(test_paddle_lib_gpu test_paddle_lib_gpu.cc)
target_link_libraries(test_paddle_lib_gpu ${PADDLE_LIBRARIES})
endif()
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cassert>
#include "paddle/extension.h"
int main() {
int data[] = {1, 2, 3, 4};
auto tensor = paddle::from_blob(data, {2, 2}, phi::DataType::INT32);
assert(tensor.numel() == 4);
assert(tensor.dtype() == phi::DataType::INT32);
assert(tensor.is_cpu());
assert(tensor.template data<int>() == data);
}
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cassert>
#include "paddle/extension.h"
int main() {
float data[] = {1., 2., 3., 4.};
auto tensor = paddle::from_blob(data, {2, 2}, phi::DataType::FLOAT32);
auto gpu_tensor =
paddle::experimental::copy_to(tensor, phi::GPUPlace(), false);
assert(gpu_tensor.is_gpu());
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册