// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/device_memory_aligment.h" #ifdef PADDLE_WITH_ASCEND_CL #include "paddle/fluid/operators/npu_op_runner.h" #endif namespace paddle { namespace operators { template struct FillConstantVisitor { FillConstantVisitor(const DeviceContext &dev_ctx, framework::LoDTensor *tensor, const float value) : dev_ctx_(dev_ctx), tensor_(tensor), value_(value) {} template void apply(typename std::enable_if::value || std::is_same::value>::type * = nullptr) const { PADDLE_THROW(platform::errors::InvalidArgument( "Not support data type for set_constant attr")); } template void apply(typename std::enable_if::value || std::is_same::value)>::type * = nullptr) const { #ifdef PADDLE_WITH_ASCEND_CL if (platform::is_npu_place(dev_ctx_.GetPlace())) { FillNpuTensorWithConstant(tensor_, static_cast(value_)); } else { math::SetConstant set_constant; set_constant(dev_ctx_, tensor_, static_cast(value_)); } #else math::SetConstant set_constant; set_constant(dev_ctx_, tensor_, static_cast(value_)); #endif } const DeviceContext &dev_ctx_; framework::LoDTensor *tensor_; float value_; }; template class CoalesceTensorOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { auto in_var_names = context.InputNames("Input"); auto out_var_names = context.OutputNames("Output"); auto &in_vars = context.MultiInputVar("Input"); auto out_vars = context.MultiOutputVar("Output"); PADDLE_ENFORCE_GT(in_var_names.size(), static_cast(0), platform::errors::InvalidArgument( "The CoalesceTensor operator has no input.")); PADDLE_ENFORCE_EQ(in_var_names.size(), out_var_names.size(), platform::errors::InvalidArgument( "The number of CoalesceTensor operator's input and " "output is not match, " "input number is %u, output number is %u.", in_var_names.size(), out_var_names.size())); // Input & Output check: only support LoDTensor for (size_t i = 0; i < in_var_names.size(); ++i) { PADDLE_ENFORCE_NOT_NULL( in_vars[i], platform::errors::NotFound("The input variable %s of CoalesceTensor " "operator does not exist.", in_var_names[i])); PADDLE_ENFORCE_NOT_NULL( out_vars[i], platform::errors::NotFound("The output variable %s of CoalesceTensor " "operator does not exist.", out_var_names[i])); PADDLE_ENFORCE_EQ(in_vars[i]->IsType(), true, platform::errors::InvalidArgument( "The input variable %s of CoalesceTensor operator " "is not LoDTensor.", in_var_names[i])); PADDLE_ENFORCE_EQ(out_vars[i]->IsType(), true, platform::errors::InvalidArgument( "The output variable %s of CoalesceTensor operator " "is not LoDTensor.", out_var_names[i])); } auto in_tensors = context.MultiInput("Input"); bool use_align = context.Attr("use_align"); auto align_size = context.Attr("align_size"); auto size_of_dtype = context.Attr("user_defined_size_of_dtype"); if (context.Attr("check_name")) { for (size_t i = 0; i < in_var_names.size(); ++i) { PADDLE_ENFORCE_EQ( in_var_names[i], out_var_names[i], platform::errors::InvalidArgument( "The input and output variable of CoalesceTensor operator is " "different, %dth input is %s, %dth output is %s.", i, in_var_names[i], i, out_var_names[i])); } } else { // Init the output as input for (size_t i = 0; i < in_tensors.size(); ++i) { out_vars[i]->GetMutable()->Resize( in_tensors[i]->dims()); } } auto &dev_ctx = context.template device_context(); // Get numel and dtype size_t numel = 0; auto dtype = static_cast( context.Attr("dtype")); if (size_of_dtype == -1) { size_of_dtype = framework::SizeOfType(dtype); } GetMemSizeAndDtype(in_tensors, in_var_names, &numel, size_of_dtype, context.GetPlace(), use_align, align_size); // Alloc the continuous space auto fused_tensor = context.Output("FusedOutput"); fused_tensor->Resize(framework::make_ddim({static_cast(numel)})) .mutable_data(context.GetPlace(), dtype); // Init the continuous space auto out_tensors = context.MultiOutput("Output"); size_t offset = 0; if (context.Attr("copy_data")) { for (size_t i = 0; i < in_var_names.size(); ++i) { size_t len = static_cast(in_tensors[i]->numel()); auto sub_tensor = fused_tensor->Slice( static_cast(offset), static_cast(offset + len)); framework::TensorCopy(*in_tensors[i], context.GetPlace(), dev_ctx, &sub_tensor); offset += use_align ? platform::Alignment(len * size_of_dtype, context.GetPlace(), align_size) / size_of_dtype : len; } } else if (context.Attr("set_constant")) { framework::VisitDataType( dtype, FillConstantVisitor( dev_ctx, fused_tensor, context.Attr("constant"))); } else if (context.Attr("persist_output")) { for (size_t i = 0; i < out_var_names.size(); ++i) { size_t len = static_cast(out_tensors[i]->numel()); auto sub_tensor = fused_tensor->Slice( static_cast(offset), static_cast(offset + len)); // some var may not persistable, or persistable var may not init if (out_tensors[i]->IsInitialized()) { framework::TensorCopy(*out_tensors[i], context.GetPlace(), dev_ctx, &sub_tensor); } offset += use_align ? platform::Alignment(len * size_of_dtype, context.GetPlace(), align_size) / size_of_dtype : len; } } // Make the outputs point to the continuous space. offset = 0; std::stringstream ss; ss << "alloc_space_for_vars: "; for (size_t i = 0; i < out_tensors.size(); ++i) { size_t len = static_cast(out_tensors[i]->numel()); auto dim = out_tensors[i]->dims(); VLOG(4) << len << " " << dim << " " << offset; out_tensors[i] ->ShareDataWith(fused_tensor->Slice( static_cast(offset), static_cast(offset + len))) .Resize(dim); len = use_align ? platform::Alignment(len * size_of_dtype, context.GetPlace(), align_size) / size_of_dtype : len; ss << "output(" << out_var_names[i] << ") dim:(" << dim << ")" << " address: " << out_tensors[i]->data() << " len: " << len << ", "; offset += len; } PADDLE_ENFORCE_EQ( (int64_t)offset, fused_tensor->numel(), platform::errors::InvalidArgument( "The alloc_space_for_vars's offset: %s is unequal with " "fused_tensor's numel: %s.", offset, fused_tensor->numel())); VLOG(10) << ss.str(); } private: void GetMemSizeAndDtype( const std::vector &lod_tensors, const std::vector var_names, size_t *numel, const size_t &size_of_dtype, const platform::Place &place, const bool use_align = true, const int align_size = -1) const { PADDLE_ENFORCE_EQ( lod_tensors.size(), var_names.size(), platform::errors::InvalidArgument( "The number of input tensor and variable does not match, the " "number of input tensor is %u, the number of input variable is %u.", lod_tensors.size(), var_names.size())); *numel = 0; std::stringstream ss; ss << "alloc_space_for_vars: "; for (size_t i = 0; i < var_names.size(); ++i) { PADDLE_ENFORCE_EQ(lod_tensors[i]->IsInitialized(), true, platform::errors::InvalidArgument( "Tensor `%s` is not initialized.", var_names[i])); auto size = lod_tensors[i]->numel(); PADDLE_ENFORCE_GT( size, 0, platform::errors::InvalidArgument( "The number of tensor `%s`'s elements is 0.", var_names[i])); auto len = use_align ? platform::Alignment(static_cast(size) * size_of_dtype, place, align_size) / size_of_dtype : static_cast(size); VLOG(4) << size << " " << len; ss << "input(" << var_names[i] << ") dim:(" << lod_tensors[i]->dims() << ") " << " addres:" << lod_tensors[i]->data() << " len: " << len << ", "; *numel += len; } VLOG(10) << ss.str(); } }; class CoalesceTensorOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { if (ctx->IsRuntime()) { return; } auto use_align = ctx->Attrs().Get("use_align"); auto align_size = ctx->Attrs().Get("align_size"); auto size_of_dtype = ctx->Attrs().Get("user_defined_size_of_dtype"); auto dtype = static_cast( ctx->Attrs().Get("dtype")); if (size_of_dtype == -1) { size_of_dtype = framework::SizeOfType(dtype); } auto alignment = [](size_t size, size_t align_size) { size_t remaining = size % align_size; auto aligned_size = remaining == 0 ? size : size + (align_size - remaining); VLOG(4) << remaining << " " << size << " " << align_size << " " << aligned_size; return aligned_size; }; VLOG(4) << "align_size: " << align_size; if (use_align && align_size > 0) { int64_t numel = 0; auto dims = ctx->GetInputsDim("Input"); for (const auto &dim : dims) { auto size = framework::product(dim); auto len = use_align ? alignment(static_cast(size) * size_of_dtype, align_size) / size_of_dtype : static_cast(size); numel += len; } ctx->SetOutputDim("FusedOutput", framework::make_ddim({numel})); VLOG(4) << "FusedOutput size:" << framework::make_ddim({numel}); } } protected: framework::OpKernelType GetKernelTypeForVar( const std::string &var_name, const framework::Tensor &tensor, const framework::OpKernelType &expected_kernel_type) const override { return framework::OpKernelType(expected_kernel_type.data_type_, expected_kernel_type.place_, tensor.layout()); } }; class CoalesceTensorOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("Input", "(vector) The input tensors of" " coalesce_tensor operator.") .AsDuplicable(); AddOutput("Output", "(vector) The output " "tensors of coalesce_tensor operator. And the address " "of output tensors are continuous, they are sliced from the " "tensor of FusedOutput.") .AsDuplicable(); AddOutput("FusedOutput", "(LoDTensor) The output tensor " "of coalesce_tensor operator. And the tensors of" " Output is sliced from the tensor of FusedOutput."); AddAttr("dtype", "The output data type."); AddAttr("copy_data", "Whether to copy the Input value to Output.") .SetDefault(false); AddAttr("set_constant", "Whether to set the Output with a constant value.") .SetDefault(false); AddAttr("persist_output", "Whether to persist the original Output value.") .SetDefault(false); AddAttr("constant", "If set_constant is true, the constant value will be used " "to set the Output.") .SetDefault(0.0); AddAttr("check_name", "Whether to check the name of Input and Output to ensure " "they are the same separately.") .SetDefault(false); AddAttr("use_align", "Whether to consider memory chunk and take alignment into " "account for inputs and outputs.") .SetDefault(true); AddAttr("align_size", "The alignment size when use_align is True") .SetDefault(-1); AddAttr("user_defined_size_of_dtype", "The user defined size of dtype. This is used to coalesce " "grad vars and merged_grad vars at the same time. For some " "strategy, the dtype of fused_grad_vars and the dtype of " "fused_grad_merged_vars are not identical, which will cause " "the shape of these two coalesced vars are different. To " "make sure the shape of these two vars are identical with " "each other, this attr is added.") .SetDefault(-1); AddComment(R"DOC( CoalesceTensor Operator. coalesce_tensor is used to make the address of Output continuous according to the Input. This Op will alloc a big tensor according to the tensors of Input, the dtype is the same with those input tensors, the size is the sum of those input tensors' numel, and the dim of the big tensor is {sum(numel)}. And the big tensor is stored in FusedOutput. The tensors of Output are sliced from the tensor of FusedOutput. Note that, the dtype of Input should be the same, and the dim of Input and Output should equal. The tensors of Input and Output could be the same or different. And coalesce_tensor allows copying the value of Input to Output, or setting the Output with a constant value, or persist the original Output value. )DOC"); } }; } // namespace operators } // namespace paddle REGISTER_OPERATOR(coalesce_tensor, paddle::operators::CoalesceTensorOp, paddle::operators::CoalesceTensorOpMaker); namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CPU_KERNEL( coalesce_tensor, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) REGISTER_OP_CUDA_KERNEL( coalesce_tensor, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel); #endif #if defined(PADDLE_WITH_ASCEND_CL) REGISTER_OP_CUDA_KERNEL( coalesce_tensor, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel); #endif #ifdef PADDLE_WITH_XPU REGISTER_OP_XPU_KERNEL( coalesce_tensor, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel); #endif #if defined(PADDLE_WITH_ASCEND_CL) REGISTER_OP_NPU_KERNEL( coalesce_tensor, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel, ops::CoalesceTensorOpKernel); #endif REGISTER_OP_VERSION(coalesce_tensor) .AddCheckpoint( R"ROC( Upgrade coalesce_tensor: add a new attribute [use_align].)ROC", paddle::framework::compatible::OpVersionDesc().NewAttr( "use_align", "In order to optionally take memory alignment into account when " "coalescing tensors. The default value is true to be compatible " "with before.", true)) .AddCheckpoint( R"ROC( Upgrade coalesce_tensor: add a new attribute [align_size].)ROC", paddle::framework::compatible::OpVersionDesc().NewAttr( "align_size", "In order to optionally take memory alignment into account when " "coalescing tensors. The default value is -1 and use the default " "align_size " "of each place to be compatible with before.", -1));