coalesce_tensor_op.cc 9.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include <sstream>
16 17 18 19 20
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/var_type.h"
#include "paddle/fluid/operators/math/math_function.h"
21
#include "paddle/fluid/platform/device_memory_aligment.h"
22 23 24 25 26 27 28 29

namespace paddle {
namespace operators {

static framework::proto::VarType::Type kDefaultDtype =
    framework::proto::VarType::Type::VarType_Type_BOOL;

template <typename DeviceContext, typename T>
30
class CoalesceTensorOp : public framework::OpKernel<T> {
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
 public:
  void Compute(const framework::ExecutionContext &context) const override {
    auto &in_var_names = context.Inputs("Input");
    auto &out_var_names = context.Outputs("Output");
    auto &in_vars = context.MultiInputVar("Input");
    auto out_vars = context.MultiOutputVar("Output");

    PADDLE_ENFORCE_GT(in_var_names.size(), static_cast<size_t>(0));
    PADDLE_ENFORCE_EQ(in_var_names.size(), out_var_names.size());

    for (size_t i = 0; i < in_var_names.size(); ++i) {
      // Only support LoDTensor
      PADDLE_ENFORCE_NOT_NULL(in_vars[i], "%s should not be nullptr,",
                              in_var_names[i]);
      PADDLE_ENFORCE_NOT_NULL(out_vars[i], "%s should not be nullptr,",
                              out_var_names[i]);
      PADDLE_ENFORCE(in_vars[i]->IsType<framework::LoDTensor>());
      PADDLE_ENFORCE(out_vars[i]->IsType<framework::LoDTensor>());
    }

    auto in_tensors = context.MultiInput<framework::LoDTensor>("Input");

    if (context.Attr<bool>("check_name")) {
      for (size_t i = 0; i < in_var_names.size(); ++i) {
        PADDLE_ENFORCE_EQ(in_var_names[i], out_var_names[i]);
      }
    } else {
      // Init the output as input
      for (size_t i = 0; i < in_tensors.size(); ++i) {
        out_vars[i]->GetMutable<framework::LoDTensor>()->Resize(
            in_tensors[i]->dims());
      }
    }

    auto &dev_ctx = context.template device_context<DeviceContext>();

    // Get numel and dtype
    size_t numel = 0;
    auto dtype = kDefaultDtype;
C
chengduo 已提交
70 71
    GetMemSizeAndDtype(in_tensors, in_var_names, &numel, &dtype,
                       context.GetPlace());
72 73 74 75 76 77 78 79

    // Alloc the continuous space
    auto fused_tensor = context.Output<framework::LoDTensor>("FusedOutput");
    fused_tensor->Resize(framework::make_ddim({static_cast<int64_t>(numel)}))
        .mutable_data(context.GetPlace(), dtype);

    // Init the continuous space
    auto out_tensors = context.MultiOutput<framework::LoDTensor>("Output");
C
chengduo 已提交
80 81
    size_t offset = 0;
    size_t size_of_dtype = framework::SizeOfType(dtype);
82 83
    if (context.Attr<bool>("copy_data")) {
      for (size_t i = 0; i < in_var_names.size(); ++i) {
C
chengduo 已提交
84 85 86 87
        size_t len = static_cast<size_t>(in_tensors[i]->numel());
        auto sub_tensor = fused_tensor->Slice(
            static_cast<int64_t>(offset), static_cast<int64_t>(offset + len));
        framework::TensorCopy(*in_tensors[i], context.GetPlace(), dev_ctx,
88
                              &sub_tensor);
C
chengduo 已提交
89

90 91
        offset += platform::Alignment(len * size_of_dtype, context.GetPlace()) /
                  size_of_dtype;
92 93 94 95 96 97 98 99 100
      }
    } else if (context.Attr<bool>("set_constant")) {
      math::SetConstant<DeviceContext, T> set_constant;
      set_constant(dev_ctx, fused_tensor,
                   static_cast<T>(context.Attr<float>("constant")));
    }

    // Make the outputs point to the continuous space.
    offset = 0;
101 102
    std::stringstream ss;
    ss << "alloc_space_for_vars: ";
103
    for (size_t i = 0; i < out_tensors.size(); ++i) {
C
chengduo 已提交
104
      size_t len = static_cast<size_t>(out_tensors[i]->numel());
105 106
      auto dim = out_tensors[i]->dims();
      out_tensors[i]
C
chengduo 已提交
107 108
          ->ShareDataWith(fused_tensor->Slice(
              static_cast<int64_t>(offset), static_cast<int64_t>(offset + len)))
109
          .Resize(dim);
110 111
      len = platform::Alignment(len * size_of_dtype, context.GetPlace()) /
            size_of_dtype;
112
      offset += len;
113 114
      ss << "output(" << out_var_names[i] << ")  dim:(" << dim << ")"
         << " address: " << out_tensors[i]->data<void>() << ", ";
115
    }
116
    VLOG(10) << ss.str();
117 118
  }

C
chengduo 已提交
119
 private:
120 121 122
  void GetMemSizeAndDtype(
      const std::vector<const framework::LoDTensor *> &lod_tensors,
      const std::vector<std::string> var_names, size_t *numel,
C
chengduo 已提交
123 124
      framework::proto::VarType::Type *dtype,
      const platform::Place &place) const {
125 126
    PADDLE_ENFORCE_EQ(lod_tensors.size(), var_names.size());
    *numel = 0;
C
chengduo 已提交
127
    size_t size_of_dtype = 0;
128 129 130

    std::stringstream ss;
    ss << "alloc_space_for_vars: ";
131 132 133 134 135 136 137 138 139
    for (size_t i = 0; i < var_names.size(); ++i) {
      PADDLE_ENFORCE(lod_tensors[i]->IsInitialized(), "%s is not initialized.",
                     var_names[i]);

      auto p_dtype = lod_tensors[i]->type();
      if (*dtype == kDefaultDtype) {
        PADDLE_ENFORCE_NE(p_dtype, kDefaultDtype, "%s's type should not be %s.",
                          var_names[i], kDefaultDtype);
        *dtype = p_dtype;
C
chengduo 已提交
140
        size_of_dtype = framework::SizeOfType(p_dtype);
141 142 143 144 145
      }
      PADDLE_ENFORCE_EQ(p_dtype, *dtype, "Input vars is not equal.");

      auto size = lod_tensors[i]->numel();
      PADDLE_ENFORCE_GT(size, 0);
146 147
      ss << "input(" << var_names[i] << ") dim:(" << lod_tensors[i]->dims()
         << "), ";
148 149
      *numel += platform::Alignment(static_cast<size_t>(size) * size_of_dtype,
                                    place) /
C
chengduo 已提交
150
                size_of_dtype;
151
    }
152 153

    VLOG(10) << ss.str();
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
  }
};

class AllocContinuousSpaceOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext *ctx) const override {}
};

class AllocContinuousSpaceOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("Input",
             "(vector<LoDTensor>) The input tensors of"
169
             " coalesce_tensor operator.")
170 171 172
        .AsDuplicable();
    AddOutput("Output",
              "(vector<LoDTensor>) The output "
173
              "tensors of coalesce_tensor operator. And the address "
174 175 176 177 178
              "of output tensors are continuous, they are sliced from the "
              "tensor of FusedOutput.")
        .AsDuplicable();
    AddOutput("FusedOutput",
              "(LoDTensor) The output tensor "
179
              "of coalesce_tensor operator. And the tensors of"
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
              " Output is sliced from the tensor of FusedOutput.");
    AddAttr<bool>("copy_data", "Whether to copy the Input value to Output.")
        .SetDefault(false);
    AddAttr<bool>("set_constant",
                  "Whether to set the Output with a constant value.")
        .SetDefault(false);
    AddAttr<float>("constant",
                   "If set_constant is true, the constant value will be used "
                   "to set the Output.")
        .SetDefault(0.0);
    AddAttr<bool>("check_name",
                  "Whether to check the name of Input and Output to ensure "
                  "they are the same separately.")
        .SetDefault(false);
    AddComment(R"DOC(
AllocContinuousSpace Operator.

197
coalesce_tensor is used to make the address of Output
198 199 200 201 202 203 204 205
continuous according to the Input. This Op will alloc a big tensor
according to the tensors of Input, the dtype is the same with those input tensors,
the size is the sum of those input tensors' numel, and the dim of the big
tensor is {sum(numel)}. And the big tensor is stored in FusedOutput.
The tensors of Output are sliced from the tensor of FusedOutput.
Note that, the dtype of Input should be the same, and the dim of Input
and Output should equal.
The tensors of Input and Output could be the same or different. And
206
coalesce_tensor allows copying the value of Input to Output, or
207 208 209 210 211 212 213 214 215
setting the Output with a constant value.

)DOC");
  }
};

}  // namespace operators
}  // namespace paddle

216
REGISTER_OPERATOR(coalesce_tensor, paddle::operators::AllocContinuousSpaceOp,
217 218
                  paddle::operators::AllocContinuousSpaceOpMaker);
namespace ops = paddle::operators;
219
namespace plat = paddle::platform;
220
REGISTER_OP_CPU_KERNEL(
221 222 223 224 225
    coalesce_tensor,
    ops::CoalesceTensorOp<paddle::platform::CPUDeviceContext, plat::float16>,
    ops::CoalesceTensorOp<paddle::platform::CPUDeviceContext, int>,
    ops::CoalesceTensorOp<paddle::platform::CPUDeviceContext, float>,
    ops::CoalesceTensorOp<paddle::platform::CPUDeviceContext, double>);
226 227 228

#ifdef PADDLE_WITH_CUDA
REGISTER_OP_CUDA_KERNEL(
229 230 231 232 233
    coalesce_tensor,
    ops::CoalesceTensorOp<paddle::platform::CUDADeviceContext, plat::float16>,
    ops::CoalesceTensorOp<paddle::platform::CUDADeviceContext, int>,
    ops::CoalesceTensorOp<paddle::platform::CUDADeviceContext, float>,
    ops::CoalesceTensorOp<paddle::platform::CUDADeviceContext, double>);
234
#endif