global_gather_op.cc 4.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/collective/global_gather_op.h"

namespace paddle {
namespace operators {

class GlobalGatherOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "GlobalGather");
26 27 28 29
    OP_INOUT_CHECK(
        ctx->HasInput("local_count"), "Input", "local_count", "GlobalGather");
    OP_INOUT_CHECK(
        ctx->HasInput("global_count"), "Input", "global_count", "GlobalGather");
30 31 32
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "GlobalGather");
    int ring_id = ctx->Attrs().Get<int>("ring_id");
    PADDLE_ENFORCE_GE(
33 34
        ring_id,
        0,
35 36 37 38 39 40
        platform::errors::InvalidArgument(
            "The ring_id (%d) for global gather op must be non-negative.",
            ring_id));
    auto input_dims = ctx->GetInputDim("X");
    auto ndim_input = input_dims.size();
    // dim check
41 42
    PADDLE_ENFORCE_EQ(ndim_input,
                      2,
43 44 45 46
                      platform::errors::InvalidArgument(
                          "The input tensor's dimension must be 2. "
                          "But received input's dimension = %d.",
                          ndim_input));
47
    framework::DDim out_dims = phi::make_ddim({-1, -1});
48 49 50 51
    ctx->SetOutputDim("Out", out_dims);
  }

 protected:
52
  phi::KernelKey GetExpectedKernelType(
53
      const framework::ExecutionContext& ctx) const override {
54 55
    return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(ctx, "X"),
                          ctx.GetPlace());
56 57 58 59 60
  }
};

class GlobalGatherOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
61
  void Make() override {
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
    AddInput("X", "(Tensor) tensor send.");
    AddInput("local_count",
             "(Tensor) Tensor which has n_expert * world_size elements that "
             "indicates"
             "how many data needed to be received from each expert.");
    AddInput("global_count",
             "(Tensor) Tensor which has n_expert * world_size elements that "
             "indicates"
             "how many data needed to be sent to each expert.");
    AddOutput("Out", "(Tensor) the result of global_gather.");
    AddAttr<int>("ring_id", "(int default 0) nccl communication ring id.")
        .SetDefault(0);
    AddAttr<bool>(
        "use_calc_stream",
        "(bool default false) eject CUDA operations to calculation stream.")
        .SetDefault(false);
    AddComment(R"DOC(
Global Gather Operator
C
co63oc 已提交
80
Gather data in X to n_expert * world_size experts according to
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
local_count and receive tensors from n_expert * world_size experts according
to global_count.
)DOC");
  }
};

template <typename T>
class GlobalGatherOpGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> retv) const override {
    retv->SetType("global_scatter");
    retv->SetInput("X", this->OutputGrad("Out"));
    retv->SetInput("local_count", this->Input("local_count"));
    retv->SetInput("global_count", this->Input("global_count"));
    retv->SetOutput("Out", this->InputGrad("X"));
    retv->SetAttrMap(this->Attrs());
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
namespace plat = paddle::platform;
108 109 110
REGISTER_OPERATOR(global_gather,
                  ops::GlobalGatherOp,
                  ops::GlobalGatherOpMaker,
111 112 113
                  ops::GlobalGatherOpGradMaker<paddle::framework::OpDesc>,
                  ops::GlobalGatherOpGradMaker<paddle::imperative::OpBase>)

H
huangjiyi 已提交
114 115 116 117 118 119 120 121 122
PD_REGISTER_STRUCT_KERNEL(global_gather,
                          CPU,
                          ALL_LAYOUT,
                          ops::GlobalGatherOpCPUKernel,
                          float,
                          double,
                          int,
                          int64_t,
                          plat::float16) {}