diff --git a/CMakeLists.txt b/CMakeLists.txt index 00b034a765eefb963593029c0d77b5dc006a6f23..740a9cef1dda768cc225bebb9d52a15465177f39 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,9 +31,13 @@ option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_F option(WITH_TENSORRT "Compile PaddlePaddle with NVIDIA TensorRT" OFF) option(WITH_XPU "Compile PaddlePaddle with BAIDU KUNLUN XPU" OFF) option(WITH_WIN_DUMP_DBG "Compile with windows core dump debug mode" OFF) +option(WITH_ASCEND "Compile PaddlePaddle with ASCEND" OFF) if (WITH_GPU AND WITH_XPU) message(FATAL_ERROR "Error when compile GPU and XPU at the same time") endif() +if (WITH_GPU AND WITH_ASCEND) + message(FATAL_ERROR "Error when compile GPU and ASCEND at the same time") +endif() # cmake 3.12, 3.13, 3.14 will append gcc link options to nvcc, and nvcc doesn't recognize them. if(WITH_GPU AND (${CMAKE_VERSION} VERSION_GREATER_EQUAL 3.12) AND (${CMAKE_VERSION} VERSION_LESS 3.15)) message(FATAL_ERROR "cmake ${CMAKE_VERSION} is not supported when WITH_GPU=ON because of bug https://cmake.org/pipermail/cmake/2018-September/068195.html. " diff --git a/cmake/configure.cmake b/cmake/configure.cmake index a4b9ec4bef6289a90c985f812f473b68abe0b57e..f2510d1321c3e83dae2373c5d1f48cc7006e8be9 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -78,6 +78,10 @@ if(WITH_BOX_PS) add_definitions(-DPADDLE_WITH_BOX_PS) endif() +if(WITH_ASCEND) + add_definitions(-DPADDLE_WITH_ASCEND) +endif() + if(WITH_XPU) message(STATUS "Compile with XPU!") add_definitions(-DPADDLE_WITH_XPU) diff --git a/cmake/external/ascend.cmake b/cmake/external/ascend.cmake new file mode 100644 index 0000000000000000000000000000000000000000..bcf0c0a0646fc386f41c4b1f35ba773d6a1adb6f --- /dev/null +++ b/cmake/external/ascend.cmake @@ -0,0 +1,61 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(ASCEND_PROJECT "extern_ascend") +IF((NOT DEFINED ASCEND_VER) OR (NOT DEFINED ASCEND_URL)) + MESSAGE(STATUS "use pre defined download url") + SET(ASCEND_VER "0.1.1" CACHE STRING "" FORCE) + SET(ASCEND_NAME "ascend" CACHE STRING "" FORCE) + SET(ASCEND_URL "http://paddle-ascend.bj.bcebos.com/ascend.tar.gz" CACHE STRING "" FORCE) +ENDIF() +MESSAGE(STATUS "ASCEND_NAME: ${ASCEND_NAME}, ASCEND_URL: ${ASCEND_URL}") +SET(ASCEND_SOURCE_DIR "${THIRD_PARTY_PATH}/ascend") +SET(ASCEND_DOWNLOAD_DIR "${ASCEND_SOURCE_DIR}/src/${ASCEND_PROJECT}") +SET(ASCEND_DST_DIR "ascend") +SET(ASCEND_INSTALL_ROOT "${THIRD_PARTY_PATH}/install") +SET(ASCEND_INSTALL_DIR ${ASCEND_INSTALL_ROOT}/${ASCEND_DST_DIR}) +SET(ASCEND_ROOT ${ASCEND_INSTALL_DIR}) +SET(ASCEND_INC_DIR ${ASCEND_ROOT}/include) +SET(ASCEND_LIB_DIR ${ASCEND_ROOT}/lib) +SET(ASCEND_LIB ${ASCEND_LIB_DIR}/libge_runner.so) +SET(ASCEND_GRAPH_LIB ${ASCEND_LIB_DIR}/libgraph.so) +SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH}" "${ASCEND_ROOT}/lib") + +INCLUDE_DIRECTORIES(${ASCEND_INC_DIR}) +FILE(WRITE ${ASCEND_DOWNLOAD_DIR}/CMakeLists.txt + "PROJECT(ASCEND)\n" + "cmake_minimum_required(VERSION 3.0)\n" + "install(DIRECTORY ${ASCEND_NAME}/include ${ASCEND_NAME}/lib \n" + " DESTINATION ${ASCEND_DST_DIR})\n") +ExternalProject_Add( + ${ASCEND_PROJECT} + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${ASCEND_SOURCE_DIR} + DOWNLOAD_DIR ${ASCEND_DOWNLOAD_DIR} + DOWNLOAD_COMMAND wget --no-check-certificate ${ASCEND_URL} -c -q -O ${ASCEND_NAME}.tar.gz + && tar zxvf ${ASCEND_NAME}.tar.gz + DOWNLOAD_NO_PROGRESS 1 + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${ASCEND_INSTALL_ROOT} + CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${ASCEND_INSTALL_ROOT} +) +ADD_LIBRARY(ascend SHARED IMPORTED GLOBAL) +SET_PROPERTY(TARGET ascend PROPERTY IMPORTED_LOCATION ${ASCEND_LIB}) + +ADD_LIBRARY(ascend_graph SHARED IMPORTED GLOBAL) +SET_PROPERTY(TARGET ascend_graph PROPERTY IMPORTED_LOCATION ${ASCEND_GRAPH_LIB}) +ADD_DEPENDENCIES(ascend ascend_graph ${ASCEND_PROJECT}) + diff --git a/cmake/third_party.cmake b/cmake/third_party.cmake index 3fc04b36a2c2f7d78ba4932f03503f25812aac24..65600ba7fb76ded2eced1735677db8d6c21752f6 100644 --- a/cmake/third_party.cmake +++ b/cmake/third_party.cmake @@ -280,6 +280,11 @@ if(WITH_BOX_PS) list(APPEND third_party_deps extern_box_ps) endif(WITH_BOX_PS) +if(WITH_ASCEND) + include(external/ascend) + list(APPEND third_party_deps extern_ascend) +endif (WITH_ASCEND) + if (WITH_PSCORE) include(external/snappy) list(APPEND third_party_deps extern_snappy) diff --git a/paddle/fluid/framework/fleet/CMakeLists.txt b/paddle/fluid/framework/fleet/CMakeLists.txt index 106685cdd9d776f9bc13ab4f233a193160fb5af0..df6c4b264b737accf99214a0be1fc45297e8424a 100644 --- a/paddle/fluid/framework/fleet/CMakeLists.txt +++ b/paddle/fluid/framework/fleet/CMakeLists.txt @@ -31,3 +31,7 @@ endif(WITH_GLOO) cc_library(heter_wrapper SRCS heter_wrapper.cc DEPS framework_proto device_context heter_service_proto) cc_test(test_fleet SRCS test_fleet.cc DEPS fleet_wrapper gloo_wrapper fs shell) + +if(WITH_ASCEND) + cc_library(ascend_wrapper SRCS ascend_wrapper.cc DEPS framework_proto lod_tensor ascend ascend_graph) +endif(WITH_ASCEND) diff --git a/paddle/fluid/framework/fleet/ascend_wrapper.cc b/paddle/fluid/framework/fleet/ascend_wrapper.cc new file mode 100644 index 0000000000000000000000000000000000000000..d1b2f51f700363cf319344ab35b10af545c0373a --- /dev/null +++ b/paddle/fluid/framework/fleet/ascend_wrapper.cc @@ -0,0 +1,22 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifdef PADDLE_WITH_ASCEND +#include "paddle/fluid/framework/fleet/ascend_wrapper.h" +namespace paddle { +namespace framework { +std::shared_ptr AscendInstance::ascend_instance_ = nullptr; +} // end namespace framework +} // end namespace paddle +#endif diff --git a/paddle/fluid/framework/fleet/ascend_wrapper.h b/paddle/fluid/framework/fleet/ascend_wrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..da79fccb8ca69fac0f34f8092f296b9923e5f849 --- /dev/null +++ b/paddle/fluid/framework/fleet/ascend_wrapper.h @@ -0,0 +1,183 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#ifdef PADDLE_WITH_ASCEND +#include + +#include +#include +#include +#include + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/place.h" +#include "paddle/fluid/platform/timer.h" + +#include "ge/ge_api.h" +#include "ge/ge_api_types.h" +#include "graph/attr_value.h" +#include "graph/tensor.h" +#include "graph/types.h" + +namespace paddle { +namespace framework { + +// typedef std::vector AscendGraphDesc; +typedef ge::Graph AscendGraphDesc; + +class AscendInstance { + public: + virtual ~AscendInstance() {} + AscendInstance() {} + + std::map GetDefaultInitSessionOptions() { + std::map init_options; + init_options["a"] = "b"; + init_options["ge.trainFlag"] = "1"; + return init_options; + } + + // add other parameters here to init + void InitGlobalResouces() { + session_.reset(new ge::Session(GetDefaultInitSessionOptions())); + VLOG(1) << "InitGlobalResouces Done"; + } + + static std::shared_ptr GetInstance() { + if (nullptr == ascend_instance_) { + ascend_instance_.reset(new paddle::framework::AscendInstance()); + VLOG(1) << "Initialize AscendInstance Done"; + } + return ascend_instance_; + } + + void AddAscendSubgraph(int graph_idx, const AscendGraphDesc &graph) { + ge::Status status = session_->AddGraph(graph_idx, graph); + PADDLE_ENFORCE_EQ(status, ge::SUCCESS, + paddle::platform::errors::PreconditionNotMet( + "Calling addGraph of graph engine failed, please " + "check Ascend Log.")); + VLOG(1) << "AddAscendSubgraph " << graph_idx << " Done"; + } + + ge::DataType VarTypeToGeType(proto::VarType::Type type) { + if (type == proto::VarType::FP16) { + return ge::DataType::DT_FLOAT16; + } else if (type == proto::VarType::FP32) { + return ge::DataType::DT_FLOAT; + } else if (type == proto::VarType::FP64) { + return ge::DataType::DT_DOUBLE; + } else if (type == proto::VarType::INT32) { + return ge::DataType::DT_INT32; + } else if (type == proto::VarType::INT64) { + return ge::DataType::DT_INT64; + } else { + PADDLE_THROW(platform::errors::Unimplemented( + "Not support %s as tensor type.", DataTypeToString(type))); + } + } + int GeTypeSize(proto::VarType::Type type) { + if (type == proto::VarType::FP16) { + return 2; + } else if (type == proto::VarType::FP32) { + return 4; + } else if (type == proto::VarType::FP64) { + return 8; + } else if (type == proto::VarType::INT32) { + return 4; + } else if (type == proto::VarType::INT64) { + return 8; + } else { + PADDLE_THROW(platform::errors::Unimplemented( + "Not support %s as tensor type.", DataTypeToString(type))); + } + } + ge::Tensor ConvertToGeTensor(const Tensor *tensor) { + auto numel = tensor->numel(); + std::vector vec_dim; + auto dimen = arity(tensor->dims()); + for (auto i = 0; i < dimen; ++i) { + vec_dim.push_back(tensor->dims()[i]); + } + // For Debug + // VLOG(1) << "input numel: " << numel << ", dimen is " << vec_dim.size() << + // ", and shape is"; + // for (const auto e : vec_dim) { + // VLOG(0) << e; + // } + + ge::Shape shape(vec_dim); + ge::TensorDesc tensor_desc(shape, ge::Format::FORMAT_ND, + VarTypeToGeType(tensor->type())); + tensor_desc.SetRealDimCnt(vec_dim.size()); + + const uint8_t *data = + reinterpret_cast(tensor->data()); + std::vector dst(numel * GeTypeSize(tensor->type())); + memcpy(dst.data(), data, GeTypeSize(tensor->type()) * numel); + ge::Tensor ge_tensor(tensor_desc, dst); + return ge_tensor; + } + + void RunAscendSubgraph(int graph_idx, + const std::vector &inputs, + std::vector *outputs) { + VLOG(1) << "Ascend Graph[" << graph_idx << "] is about to run."; + // Convert paddle Tensor to GE Tensor + std::vector ge_inputs; + for (const auto &e : inputs) { + ge_inputs.push_back(ConvertToGeTensor(e)); + } + + // Run Graph + std::vector ge_outputs; + ge::Status status = session_->RunGraph(graph_idx, ge_inputs, ge_outputs); + PADDLE_ENFORCE_EQ(status, ge::SUCCESS, + paddle::platform::errors::PreconditionNotMet( + "Calling RunGraph of graph engine failed, please " + "check Ascend Log.")); + VLOG(1) << "Run Ascend Graph[" << graph_idx << "] Done"; + + // change tensor back, note all tensor's type computed in GE is uint8 + for (size_t i = 0; i < ge_outputs.size(); ++i) { + const uint8_t *ret_data = ge_outputs[i].GetData(); + size_t size = ge_outputs[i].GetSize(); + VLOG(1) << "GE Tensor size of the " << i << "th output var is " << size; + auto *dst = (*outputs)[i]->mutable_data({(int64_t)size}, + platform::CPUPlace()); + memcpy(dst, ret_data, size); + + // Following for debug: + // VLOG(0) << "output for " << i << " var: "; + // float *tmp = reinterpret_cast(dst); + // for (size_t j = 0; j < size / 4; ++j) { + // printf("%f ", tmp[j]); + // } + // printf("\n"); + } + } + + protected: + std::shared_ptr session_; + + private: + static std::shared_ptr ascend_instance_; +}; +} // end namespace framework +} // end namespace paddle +#endif diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 1e8fcd65d7283f22c19654fb2fafc6867b35996e..718026fe0aa4fa89e87210a22219f5438c38b2be 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -106,6 +106,9 @@ set(COMMON_OP_DEPS ${COMMON_OP_DEPS} device_memory_aligment) set(COMMON_OP_DEPS ${COMMON_OP_DEPS} layer) set(COMMON_OP_DEPS ${COMMON_OP_DEPS} tensor_formatter) set(COMMON_OP_DEPS ${COMMON_OP_DEPS} op_version_registry) +if (WITH_ASCEND) + set(COMMON_OP_DEPS ${COMMON_OP_DEPS} ascend_wrapper) +endif() # FIXME(typhoonzero): operator deps may not needed. # op_library(lod_tensor_to_array_op DEPS lod_rank_table_op) diff --git a/paddle/fluid/operators/ascend_trigger_op.cc b/paddle/fluid/operators/ascend_trigger_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..b699ceec87190f8ae17d0d6137c02067bc4dfd10 --- /dev/null +++ b/paddle/fluid/operators/ascend_trigger_op.cc @@ -0,0 +1,52 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/ascend_trigger_op.h" + +namespace paddle { +namespace operators { + +class AscendTriggerOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override {} + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType(framework::proto::VarType::FP32, + ctx.device_context()); + } +}; + +class AscendTriggerOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("FeedList", "FeedList of Ascend SubGraph").AsDuplicable(); + AddOutput("FetchList", "FetchList of Ascend SubGraph").AsDuplicable(); + AddAttr("graph_idx", "(int, the graph index").SetDefault(-1); + AddComment(R"DOC( +Trigger Ascend SubGraph + +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(ascend_trigger, ops::AscendTriggerOp, + ops::AscendTriggerOpMaker); +REGISTER_OP_CPU_KERNEL(ascend_trigger, ops::AscendTriggerCPUKernel) diff --git a/paddle/fluid/operators/ascend_trigger_op.h b/paddle/fluid/operators/ascend_trigger_op.h new file mode 100644 index 0000000000000000000000000000000000000000..eaa79da2ba8ee02e782befb5fddb40920c5ec6ff --- /dev/null +++ b/paddle/fluid/operators/ascend_trigger_op.h @@ -0,0 +1,46 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include "paddle/fluid/framework/op_registry.h" +#ifdef PADDLE_WITH_ASCEND +#include "paddle/fluid/framework/fleet/ascend_wrapper.h" +#include "paddle/fluid/framework/tensor.h" +#endif + +namespace paddle { +namespace operators { + +template +class AscendTriggerCPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { +#ifdef PADDLE_WITH_ASCEND + auto ascend_ptr = paddle::framework::AscendInstance::GetInstance(); + auto graph_idx = ctx.Attr("graph_idx"); + VLOG(4) << "AscendTrigger Kernel, begin to run graph: " << graph_idx; + auto inputs = ctx.MultiInput("FeedList"); + auto outputs = ctx.MultiOutput("FetchList"); + ascend_ptr->RunAscendSubgraph(graph_idx, inputs, &outputs); +#else + PADDLE_THROW(platform::errors::PreconditionNotMet( + "Please compile WITH_ASCEND option to enable ascend_trigger op")); +#endif + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/fluid/tests/unittests/test_ascend_trigger.py b/python/paddle/fluid/tests/unittests/test_ascend_trigger.py new file mode 100644 index 0000000000000000000000000000000000000000..644b550bc426ed07117bfe8a3fc2f72b1802c5ca --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_ascend_trigger.py @@ -0,0 +1,49 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.fluid as fluid +import paddle.fluid.layers as layers +import unittest + + +class TestAscendTriggerOP(unittest.TestCase): + """ TestCases for ascend_trigger op""" + + def test_ascend_trigger_op(self): + paddle.enable_static() + program = fluid.Program() + block = program.global_block() + with fluid.program_guard(program): + x = fluid.data(name='x', shape=[1], dtype='int64', lod_level=0) + y = fluid.data(name='y', shape=[1], dtype='int64', lod_level=0) + block.append_op( + type="ascend_trigger", + inputs={"FeedList": [x]}, + outputs={"FetchList": [y]}, + attrs={'graph_idx': 0}) + + exe = paddle.static.Executor(paddle.CPUPlace()) + try: + exe.run(program) + except RuntimeError as e: + pass + except: + self.assertTrue(False) + + paddle.disable_static() + + +if __name__ == '__main__': + unittest.main()