gather_op.h 4.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Z
zchen0211 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
Y
Yi Wang 已提交
16 17
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
18 19
#include "paddle/fluid/operators/gather.h"
#include "paddle/fluid/operators/scatter.h"
Z
zchen0211 已提交
20 21 22 23 24 25

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;

Z
zchen0211 已提交
26
template <typename T>
Y
Yu Yang 已提交
27
class GatherOpKernel : public framework::OpKernel<T> {
Z
zchen0211 已提交
28
 public:
Z
zchen0211 已提交
29
  void Compute(const framework::ExecutionContext &ctx) const override {
30 31 32
    PADDLE_ENFORCE_EQ(
        platform::is_cpu_place(ctx.GetPlace()), true,
        platform::errors::PreconditionNotMet("This kernel only runs on CPU."));
Z
zchen0211 已提交
33 34 35 36 37

    auto *x = ctx.Input<Tensor>("X");
    auto *index = ctx.Input<Tensor>("Index");
    auto *output = ctx.Output<Tensor>("Out");

38 39
    int axis = ctx.Attr<int>("axis");
    // get axis from tensor
40
    if (ctx.HasInput("Axis")) {
41 42 43 44 45 46
      const Tensor *axis_tensor = ctx.Input<Tensor>("Axis");
      const auto &axis_type = axis_tensor->type();
      if (axis_type == framework::proto::VarType::INT32) {
        axis = static_cast<int>(axis_tensor->data<int32_t>()[0]);
      } else if (axis_type == framework::proto::VarType::INT64) {
        axis = static_cast<int>(axis_tensor->data<int64_t>()[0]);
47
      }
48 49 50 51 52 53 54 55
    }
    const auto &place = ctx.GetPlace();
    const auto &index_type = index->type();
    if (axis != 0) {
      if (index_type == framework::proto::VarType::INT32) {
        GatherV2Function<T, int32_t>(x, index, axis, output, place);
      } else if (index_type == framework::proto::VarType::INT64) {
        GatherV2Function<T, int64_t>(x, index, axis, output, place);
56 57 58 59
      }
      return;
    }

Z
zchen0211 已提交
60
    output->mutable_data<T>(ctx.GetPlace());
61
    if (x->numel() == 0) return;
62 63 64 65 66
    if (index_type == framework::proto::VarType::INT32) {
      CPUGather<T, int>(ctx.device_context(), *x, *index, output);
    } else if (index_type == framework::proto::VarType::INT64) {
      CPUGather<T, int64_t>(ctx.device_context(), *x, *index, output);
    }
Z
zchen0211 已提交
67 68 69
  }
};

Z
zchen0211 已提交
70
template <typename T>
Y
Yu Yang 已提交
71
class GatherGradientOpKernel : public framework::OpKernel<T> {
Z
zchen0211 已提交
72
 public:
Z
zchen0211 已提交
73
  void Compute(const framework::ExecutionContext &ctx) const override {
74 75 76
    PADDLE_ENFORCE_EQ(
        platform::is_cpu_place(ctx.GetPlace()), true,
        platform::errors::PreconditionNotMet("This kernel only runs on CPU."));
Z
zchen0211 已提交
77

78
    auto *index = ctx.Input<Tensor>("Index");
Z
zchen0211 已提交
79 80
    auto *dX = ctx.Output<Tensor>(framework::GradVarName("X"));
    auto *dO = ctx.Input<Tensor>(framework::GradVarName("Out"));
Z
zchen0211 已提交
81

82
    int axis = ctx.Attr<int>("axis");
83
    if (ctx.HasInput("Axis")) {
84 85 86 87 88 89
      const Tensor *axis_tensor = ctx.Input<Tensor>("Axis");
      const auto &axis_type = axis_tensor->type();
      if (axis_type == framework::proto::VarType::INT32) {
        axis = static_cast<int>(axis_tensor->data<int32_t>()[0]);
      } else if (axis_type == framework::proto::VarType::INT64) {
        axis = static_cast<int>(axis_tensor->data<int64_t>()[0]);
90
      }
91 92 93 94 95 96 97 98
    }
    const auto &index_type = index->type();

    if (axis != 0) {
      if (index_type == framework::proto::VarType::INT32) {
        GatherV2GradFunction<T, int32_t>(dO, index, axis, dX, ctx.GetPlace());
      } else if (index_type == framework::proto::VarType::INT64) {
        GatherV2GradFunction<T, int64_t>(dO, index, axis, dX, ctx.GetPlace());
99 100 101 102
      }
      return;
    }

Z
zchen0211 已提交
103
    dX->mutable_data<T>(ctx.GetPlace());
Z
zchen0211 已提交
104
    auto dxt = framework::EigenVector<T>::Flatten(*dX);
Q
QI JUN 已提交
105 106
    auto &place = *ctx.template device_context<platform::CPUDeviceContext>()
                       .eigen_device();
Z
zchen0211 已提交
107
    dxt.device(place) = dxt.constant(static_cast<T>(0));
108
    if (dO->numel() == 0) return;
109
    bool overwrite = ctx.Attr<bool>("overwrite");
110 111

    if (index_type == framework::proto::VarType::INT32) {
112 113 114 115 116
      if (overwrite) {
        ScatterAssign<T, int32_t>(ctx.device_context(), *dO, *index, dX);
      } else {
        ScatterAssignAdd<T, int32_t>(ctx, *dO, *index, dX);
      }
117
    } else if (index_type == framework::proto::VarType::INT64) {
118 119 120 121 122
      if (overwrite) {
        ScatterAssign<T, int64_t>(ctx.device_context(), *dO, *index, dX);
      } else {
        ScatterAssignAdd<T, int64_t>(ctx, *dO, *index, dX);
      }
123
    }
Z
zchen0211 已提交
124 125 126 127 128
  }
};

}  // namespace operators
}  // namespace paddle