coalesce_tensor_op.cc 9.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include <sstream>
16 17 18 19 20
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/var_type.h"
#include "paddle/fluid/operators/math/math_function.h"
21
#include "paddle/fluid/platform/device_memory_aligment.h"
22 23 24 25 26

namespace paddle {
namespace operators {

template <typename DeviceContext, typename T>
27
class CoalesceTensorOpKernel : public framework::OpKernel<T> {
28 29
 public:
  void Compute(const framework::ExecutionContext &context) const override {
H
hong 已提交
30 31
    auto in_var_names = context.InputNames("Input");
    auto out_var_names = context.OutputNames("Output");
32 33 34
    auto &in_vars = context.MultiInputVar("Input");
    auto out_vars = context.MultiOutputVar("Output");

35 36 37 38 39
    PADDLE_ENFORCE_GT(in_var_names.size(), static_cast<size_t>(0),
                      "The CoalesceTensorOp has no input.");
    PADDLE_ENFORCE_EQ(
        in_var_names.size(), out_var_names.size(),
        "The number of CoalesceTensorOp's input and output is not match.");
40

41
    // Input & Output check: only support LoDTensor
42
    for (size_t i = 0; i < in_var_names.size(); ++i) {
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
      PADDLE_ENFORCE_NOT_NULL(
          in_vars[i],
          "The input variable %s of CoalesceTensorOp does not exist.",
          in_var_names[i]);
      PADDLE_ENFORCE_NOT_NULL(
          out_vars[i],
          "The output variable %s of CoalesceTensorOp does not exist.",
          out_var_names[i]);
      PADDLE_ENFORCE_EQ(
          in_vars[i]->IsType<framework::LoDTensor>(), true,
          "The input variable %s of CoalesceTensorOp is not LoDTensor.",
          in_var_names[i]);
      PADDLE_ENFORCE_EQ(
          out_vars[i]->IsType<framework::LoDTensor>(), true,
          "The output variable %s of CoalesceTensorOp is not LoDTensor.",
          in_var_names[i]);
59 60 61 62 63 64
    }

    auto in_tensors = context.MultiInput<framework::LoDTensor>("Input");

    if (context.Attr<bool>("check_name")) {
      for (size_t i = 0; i < in_var_names.size(); ++i) {
65 66 67
        PADDLE_ENFORCE_EQ(
            in_var_names[i], out_var_names[i],
            "The input and output variable of CoalesceTensorOp is different.");
68 69 70 71 72 73 74 75 76 77 78 79 80
      }
    } else {
      // Init the output as input
      for (size_t i = 0; i < in_tensors.size(); ++i) {
        out_vars[i]->GetMutable<framework::LoDTensor>()->Resize(
            in_tensors[i]->dims());
      }
    }

    auto &dev_ctx = context.template device_context<DeviceContext>();

    // Get numel and dtype
    size_t numel = 0;
81 82 83 84
    auto dtype = static_cast<framework::proto::VarType::Type>(
        context.Attr<int>("dtype"));
    size_t size_of_dtype = framework::SizeOfType(dtype);
    GetMemSizeAndDtype(in_tensors, in_var_names, &numel, size_of_dtype,
C
chengduo 已提交
85
                       context.GetPlace());
86 87 88 89 90 91 92 93

    // Alloc the continuous space
    auto fused_tensor = context.Output<framework::LoDTensor>("FusedOutput");
    fused_tensor->Resize(framework::make_ddim({static_cast<int64_t>(numel)}))
        .mutable_data(context.GetPlace(), dtype);

    // Init the continuous space
    auto out_tensors = context.MultiOutput<framework::LoDTensor>("Output");
C
chengduo 已提交
94
    size_t offset = 0;
95 96
    if (context.Attr<bool>("copy_data")) {
      for (size_t i = 0; i < in_var_names.size(); ++i) {
C
chengduo 已提交
97 98 99 100
        size_t len = static_cast<size_t>(in_tensors[i]->numel());
        auto sub_tensor = fused_tensor->Slice(
            static_cast<int64_t>(offset), static_cast<int64_t>(offset + len));
        framework::TensorCopy(*in_tensors[i], context.GetPlace(), dev_ctx,
101
                              &sub_tensor);
C
chengduo 已提交
102

103 104
        offset += platform::Alignment(len * size_of_dtype, context.GetPlace()) /
                  size_of_dtype;
105 106 107 108 109 110 111 112 113
      }
    } else if (context.Attr<bool>("set_constant")) {
      math::SetConstant<DeviceContext, T> set_constant;
      set_constant(dev_ctx, fused_tensor,
                   static_cast<T>(context.Attr<float>("constant")));
    }

    // Make the outputs point to the continuous space.
    offset = 0;
114 115
    std::stringstream ss;
    ss << "alloc_space_for_vars: ";
116
    for (size_t i = 0; i < out_tensors.size(); ++i) {
C
chengduo 已提交
117
      size_t len = static_cast<size_t>(out_tensors[i]->numel());
118 119
      auto dim = out_tensors[i]->dims();
      out_tensors[i]
C
chengduo 已提交
120 121
          ->ShareDataWith(fused_tensor->Slice(
              static_cast<int64_t>(offset), static_cast<int64_t>(offset + len)))
122
          .Resize(dim);
123 124
      len = platform::Alignment(len * size_of_dtype, context.GetPlace()) /
            size_of_dtype;
125
      offset += len;
126 127
      ss << "output(" << out_var_names[i] << ")  dim:(" << dim << ")"
         << " address: " << out_tensors[i]->data<void>() << ", ";
128
    }
129
    VLOG(10) << ss.str();
130 131
  }

C
chengduo 已提交
132
 private:
133 134 135
  void GetMemSizeAndDtype(
      const std::vector<const framework::LoDTensor *> &lod_tensors,
      const std::vector<std::string> var_names, size_t *numel,
136
      const size_t &size_of_dtype, const platform::Place &place) const {
137 138
    PADDLE_ENFORCE_EQ(lod_tensors.size(), var_names.size());
    *numel = 0;
139 140
    std::stringstream ss;
    ss << "alloc_space_for_vars: ";
141
    for (size_t i = 0; i < var_names.size(); ++i) {
142 143
      PADDLE_ENFORCE_EQ(lod_tensors[i]->IsInitialized(), true,
                        "%s is not initialized.", var_names[i]);
144 145 146

      auto size = lod_tensors[i]->numel();
      PADDLE_ENFORCE_GT(size, 0);
147
      ss << "input(" << var_names[i] << ") dim:(" << lod_tensors[i]->dims()
148 149
         << ") "
         << " addres:" << lod_tensors[i]->data<void>() << ", ";
150 151
      *numel += platform::Alignment(static_cast<size_t>(size) * size_of_dtype,
                                    place) /
C
chengduo 已提交
152
                size_of_dtype;
153
    }
154 155

    VLOG(10) << ss.str();
156 157 158
  }
};

159
class CoalesceTensorOp : public framework::OperatorWithKernel {
160 161 162 163
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext *ctx) const override {}
164 165 166 167 168 169 170 171 172

 protected:
  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   expected_kernel_type.place_,
                                   tensor.layout());
  }
173 174
};

175
class CoalesceTensorOpMaker : public framework::OpProtoAndCheckerMaker {
176 177 178 179
 public:
  void Make() override {
    AddInput("Input",
             "(vector<LoDTensor>) The input tensors of"
180
             " coalesce_tensor operator.")
181 182 183
        .AsDuplicable();
    AddOutput("Output",
              "(vector<LoDTensor>) The output "
184
              "tensors of coalesce_tensor operator. And the address "
185 186 187 188 189
              "of output tensors are continuous, they are sliced from the "
              "tensor of FusedOutput.")
        .AsDuplicable();
    AddOutput("FusedOutput",
              "(LoDTensor) The output tensor "
190
              "of coalesce_tensor operator. And the tensors of"
191
              " Output is sliced from the tensor of FusedOutput.");
192
    AddAttr<int>("dtype", "The output data type.");
193 194 195 196 197 198 199 200 201 202 203 204 205 206
    AddAttr<bool>("copy_data", "Whether to copy the Input value to Output.")
        .SetDefault(false);
    AddAttr<bool>("set_constant",
                  "Whether to set the Output with a constant value.")
        .SetDefault(false);
    AddAttr<float>("constant",
                   "If set_constant is true, the constant value will be used "
                   "to set the Output.")
        .SetDefault(0.0);
    AddAttr<bool>("check_name",
                  "Whether to check the name of Input and Output to ensure "
                  "they are the same separately.")
        .SetDefault(false);
    AddComment(R"DOC(
207
CoalesceTensor Operator.
208

209
coalesce_tensor is used to make the address of Output
210 211 212 213 214 215 216 217
continuous according to the Input. This Op will alloc a big tensor
according to the tensors of Input, the dtype is the same with those input tensors,
the size is the sum of those input tensors' numel, and the dim of the big
tensor is {sum(numel)}. And the big tensor is stored in FusedOutput.
The tensors of Output are sliced from the tensor of FusedOutput.
Note that, the dtype of Input should be the same, and the dim of Input
and Output should equal.
The tensors of Input and Output could be the same or different. And
218
coalesce_tensor allows copying the value of Input to Output, or
219 220 221 222 223 224 225 226 227
setting the Output with a constant value.

)DOC");
  }
};

}  // namespace operators
}  // namespace paddle

228 229
REGISTER_OPERATOR(coalesce_tensor, paddle::operators::CoalesceTensorOp,
                  paddle::operators::CoalesceTensorOpMaker);
230
namespace ops = paddle::operators;
231
namespace plat = paddle::platform;
232
REGISTER_OP_CPU_KERNEL(
233
    coalesce_tensor,
234 235 236
    ops::CoalesceTensorOpKernel<paddle::platform::CPUDeviceContext, int>,
    ops::CoalesceTensorOpKernel<paddle::platform::CPUDeviceContext, float>,
    ops::CoalesceTensorOpKernel<paddle::platform::CPUDeviceContext, double>);
237 238 239

#ifdef PADDLE_WITH_CUDA
REGISTER_OP_CUDA_KERNEL(
240
    coalesce_tensor,
241 242 243 244 245
    ops::CoalesceTensorOpKernel<paddle::platform::CUDADeviceContext,
                                plat::float16>,
    ops::CoalesceTensorOpKernel<paddle::platform::CUDADeviceContext, int>,
    ops::CoalesceTensorOpKernel<paddle::platform::CUDADeviceContext, float>,
    ops::CoalesceTensorOpKernel<paddle::platform::CUDADeviceContext, double>);
246
#endif