lookup_table_op.h 10.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
Luo Tao 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15 16

#pragma once

17 18 19
#include <string>
#include <vector>

Y
Yi Wang 已提交
20 21 22
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_registry.h"
23
#include "paddle/fluid/framework/selected_rows_utils.h"
24
#include "paddle/phi/kernels/funcs/blas/blas.h"
25 26 27 28

namespace paddle {
namespace operators {

29
using Tensor = phi::DenseTensor;
30
using LoDTensor = phi::DenseTensor;
31
using SelectedRows = phi::SelectedRows;
32 33
using DDim = framework::DDim;

Q
qiaolongfei 已提交
34
constexpr int64_t kNoPadding = -1;
35 36

template <typename T>
Y
Yu Yang 已提交
37
class LookupTableKernel : public framework::OpKernel<T> {
38
 public:
39
  void Compute(const framework::ExecutionContext &context) const override {
40 41
    auto *ids_t = context.Input<LoDTensor>("Ids");      // int tensor
    auto *output_t = context.Output<LoDTensor>("Out");  // float tensor
42
    auto *table_var = context.InputVar("W");
43

H
hong 已提交
44 45 46
    auto id_name = context.InputNames("Ids").front();
    auto embedding_name = context.InputNames("W").front();
    auto out_name = context.OutputNames("Out").front();
Q
Qiao Longfei 已提交
47

48 49
    int64_t padding_idx = context.Attr<int64_t>("padding_idx");
    bool is_test = context.Attr<bool>("is_test");
Q
Qiao Longfei 已提交
50

51 52
    int64_t *ids = const_cast<int64_t *>(ids_t->data<int64_t>());
    int64_t ids_numel = ids_t->numel();
Q
Qiao Longfei 已提交
53

54 55 56 57 58 59 60 61 62 63 64 65 66
    if (table_var->IsType<LoDTensor>()) {
      auto *table_t = context.Input<LoDTensor>("W");
      int64_t row_number = table_t->dims()[0];
      int64_t row_width = table_t->dims()[1];

      auto *table = table_t->data<T>();
      auto *output = output_t->mutable_data<T>(context.GetPlace());

      for (int64_t i = 0; i < ids_numel; ++i) {
        if (padding_idx != kNoPadding && ids[i] == padding_idx) {
          memset(output + i * row_width, 0, row_width * sizeof(T));
        } else {
          PADDLE_ENFORCE_LT(
67 68
              ids[i],
              row_number,
69 70 71 72
              platform::errors::InvalidArgument(
                  "Variable value (input) of OP(fluid.layers.embedding) "
                  "expected >= 0 and < %ld, but got %ld. Please check input "
                  "value.",
73 74
                  row_number,
                  ids[i]));
75
          PADDLE_ENFORCE_GE(
76 77
              ids[i],
              0,
78 79 80 81
              platform::errors::InvalidArgument(
                  "Variable value (input) of OP(fluid.layers.embedding) "
                  "expected >= 0 and < %ld, but got %ld. Please check input "
                  "value.",
82 83 84 85
                  row_number,
                  ids[i]));
          memcpy(output + i * row_width,
                 table + ids[i] * row_width,
86
                 row_width * sizeof(T));
87
        }
88 89
      }

90 91
    } else if (table_var->IsType<phi::SelectedRows>()) {
      const auto &table_t = table_var->Get<phi::SelectedRows>();
92 93 94
      int64_t row_width = table_t.value().dims()[1];
      const auto *table = table_t.value().data<T>();
      auto *output = output_t->mutable_data<T>(context.GetPlace());
95 96
      auto input_data_type =
          framework::TransToProtoVarType(table_t.value().dtype());
97 98 99 100 101
      for (int64_t i = 0; i < ids_numel; ++i) {
        if (padding_idx != kNoPadding && ids[i] == padding_idx) {
          memset(output + i * row_width, 0, row_width * sizeof(T));
        } else {
          PADDLE_ENFORCE_GE(
102 103
              ids[i],
              0,
104 105 106 107 108 109 110 111
              platform::errors::InvalidArgument(
                  "Variable value (input) of OP(fluid.layers.embedding) "
                  "expected >= 0. But received %ld",
                  ids[i]));
          if (is_test) {
            auto id_index = table_t.GetIndexFromId(ids[i]);

            if (id_index != -1) {
112
              if (input_data_type == framework::proto::VarType::INT8 ||
113
                  input_data_type == framework::proto::VarType::INT16 ||
114
                  input_data_type == framework::proto::VarType::BF16) {
115 116
                memcpy(output + i * row_width,
                       table + id_index * row_width,
117 118
                       row_width * sizeof(T));
              } else {
L
Leo Chen 已提交
119
                auto blas = phi::funcs::GetBlas<phi::CPUContext, T>(context);
120 121
                blas.VCOPY(row_width,
                           table + id_index * row_width,
122 123 124 125 126
                           output + i * row_width);
              }
            } else {
              memset(output + i * row_width, 0, row_width * sizeof(T));
            }
Q
Qiao Longfei 已提交
127
          } else {
128
            auto id_index = table_t.Index(ids[i]);
129
            PADDLE_ENFORCE_GE(
130 131
                ids[i],
                0,
132 133 134 135
                platform::errors::InvalidArgument(
                    "Variable value (input) of OP(fluid.layers.embedding) "
                    "expected >= 0. But received %ld",
                    ids[i]));
136
            PADDLE_ENFORCE_GE(
137 138
                id_index,
                0,
139 140 141
                platform::errors::InvalidArgument(
                    "the input key should be exists. But received %d.",
                    id_index));
142

143
            if (input_data_type == framework::proto::VarType::INT8 ||
144
                input_data_type == framework::proto::VarType::INT16 ||
145
                input_data_type == framework::proto::VarType::BF16) {
146 147
              memcpy(output + i * row_width,
                     table + id_index * row_width,
148 149
                     row_width * sizeof(T));
            } else {
L
Leo Chen 已提交
150
              auto blas = phi::funcs::GetBlas<phi::CPUContext, T>(context);
151 152
              blas.VCOPY(row_width,
                         table + id_index * row_width,
153 154
                         output + i * row_width);
            }
Q
Qiao Longfei 已提交
155
          }
156 157
        }
      }
158 159 160 161 162
    }
  }
};

template <typename T>
Y
Yu Yang 已提交
163
class LookupTableGradKernel : public framework::OpKernel<T> {
164
 public:
165
  void Compute(const framework::ExecutionContext &context) const override {
Q
qiaolongfei 已提交
166 167 168 169
    auto *table_var = context.InputVar("W");
    DDim table_dim;
    if (table_var->IsType<LoDTensor>()) {
      table_dim = context.Input<LoDTensor>("W")->dims();
170 171
    } else if (table_var->IsType<phi::SelectedRows>()) {
      auto *table_t = context.Input<phi::SelectedRows>("W");
Q
qiaolongfei 已提交
172 173
      table_dim = table_t->value().dims();
    } else {
174
      PADDLE_THROW(platform::errors::InvalidArgument(
Q
qiaolongfei 已提交
175
          "The parameter W of a LookupTable "
176
          "must be either LoDTensor or SelectedRows"));
Q
qiaolongfei 已提交
177 178
    }

179
    int64_t padding_idx = context.Attr<int64_t>("padding_idx");
180
    bool is_sparse = context.Attr<bool>("is_sparse");
181 182
    // Since paddings are not trainable and fixed in forward, the gradient of
    // paddings makes no sense and we don't deal with it in backward.
183
    if (is_sparse) {
184 185
      auto *ids = context.Input<LoDTensor>("Ids");
      auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
186
      auto *d_table =
187
          context.Output<phi::SelectedRows>(framework::GradVarName("W"));
188

189
      auto *ids_data = ids->data<int64_t>();
190
      int64_t ids_num = ids->numel();
191

M
minqiyang 已提交
192
      std::vector<int64_t> new_rows;
M
minqiyang 已提交
193 194
      new_rows.resize(ids_num);
      std::memcpy(&new_rows[0], ids_data, ids_num * sizeof(int64_t));
195
      d_table->set_rows(new_rows);
196

197
      auto *d_table_value = d_table->mutable_value();
198
      d_table_value->Resize({ids_num, table_dim[1]});
199 200 201 202 203 204 205 206
      d_table_value->mutable_data<T>(context.GetPlace());
      d_table->set_height(table_dim[0]);

      auto *d_output_data = d_output->data<T>();
      auto *d_table_data = d_table_value->data<T>();

      auto d_output_dims = d_output->dims();
      auto d_output_dims_2d =
207
          phi::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
208 209
      PADDLE_ENFORCE_EQ(d_table_value->dims(),
                        d_output_dims_2d,
210 211 212 213 214
                        platform::errors::InvalidArgument(
                            "ShapeError: The shape of lookup_table@Grad and "
                            "output@Grad should be same. "
                            "But received lookup_table@Grad's shape = [%s], "
                            "output@Grad's shape = [%s].",
215 216
                            d_table_value->dims(),
                            d_output_dims_2d));
217
      memcpy(d_table_data, d_output_data, sizeof(T) * d_output->numel());
218
    } else {
219 220 221
      auto *ids = context.Input<LoDTensor>("Ids");
      auto *d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
      auto *d_table = context.Output<LoDTensor>(framework::GradVarName("W"));
222

223
      auto *ids_data = ids->data<int64_t>();
224

225 226
      int64_t N = table_dim[0];
      int64_t D = table_dim[1];
227

228 229
      auto *d_output_data = d_output->data<T>();
      auto *d_table_data = d_table->mutable_data<T>(context.GetPlace());
230

231 232
      memset(d_table_data, 0, d_table->numel() * sizeof(T));

233
      for (int64_t i = 0; i < ids->numel(); ++i) {
Q
Qiao Longfei 已提交
234 235 236 237
        if (padding_idx != kNoPadding && ids_data[i] == padding_idx) {
          // the gradient of padding_idx should be 0, already done by memset, so
          // do nothing.
        } else {
238
          PADDLE_ENFORCE_LT(
239 240
              ids_data[i],
              N,
241 242 243 244
              platform::errors::InvalidArgument(
                  "Variable value (input) of OP(fluid.layers.embedding) "
                  "expected >= 0 and < %ld, but got %ld. Please check input "
                  "value.",
245 246
                  N,
                  ids_data[i]));
247
          PADDLE_ENFORCE_GE(
248 249
              ids_data[i],
              0,
250 251 252 253
              platform::errors::InvalidArgument(
                  "Variable value (input) of OP(fluid.layers.embedding) "
                  "expected >= 0 and < %ld, but got %ld. Please check input"
                  "value.",
254 255
                  N,
                  ids_data[i]));
256 257 258
          for (int j = 0; j < D; ++j) {
            d_table_data[ids_data[i] * D + j] += d_output_data[i * D + j];
          }
259
        }
260 261 262 263 264 265 266
      }
    }
  }
};

}  // namespace operators
}  // namespace paddle