lookup_table_op.h 5.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
Luo Tao 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15 16

#pragma once

Y
Yi Wang 已提交
17 18 19 20
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/selected_rows.h"
21 22 23 24

namespace paddle {
namespace operators {

C
chengduoZH 已提交
25
using Tensor = framework::Tensor;
F
fengjiayi 已提交
26
using LoDTensor = framework::LoDTensor;
27
using SelectedRows = framework::SelectedRows;
28 29

template <typename T>
Y
Yu Yang 已提交
30
class LookupTableKernel : public framework::OpKernel<T> {
31 32
 public:
  void Compute(const framework::ExecutionContext& context) const override {
C
chengduoZH 已提交
33 34 35
    auto* table_t = context.Input<LoDTensor>("W");
    auto* output_t = context.Output<Tensor>("Out");
    auto* ids_var = context.InputVar("Ids");
C
chengduoZH 已提交
36 37 38

    int64_t* ids;
    int64_t ids_numel;
C
chengduoZH 已提交
39 40 41 42
    // The type of Ids(Input) is SelectedRows or LoDTensor, when Ids's type
    // is LoDTensor, this tensor contains the ids to be looked up in W;
    // when Ids's type is SelectedRows, the rows of Ids contains the
    // ids to be looked up in W.
C
chengduoZH 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55
    if (ids_var->IsType<LoDTensor>()) {
      auto* ids_t = context.Input<LoDTensor>("Ids");
      ids = const_cast<int64_t*>(ids_t->data<int64_t>());
      ids_numel = ids_t->numel();
    } else if (ids_var->IsType<SelectedRows>()) {
      auto* ids_t = context.Input<SelectedRows>("Ids");
      ids = const_cast<int64_t*>(ids_t->rows().data());
      ids_numel = ids_t->rows().size();
      output_t->Resize({ids_numel, table_t->dims()[1]});
    } else {
      PADDLE_THROW("Unsupported Variable Type of Ids");
    }

56
    int64_t padding_idx = context.Attr<int64_t>("padding_idx");
57

F
fengjiayi 已提交
58 59
    int N = table_t->dims()[0];
    int D = table_t->dims()[1];
F
fengjiayi 已提交
60 61
    auto* table = table_t->data<T>();
    auto* output = output_t->mutable_data<T>(context.GetPlace());
62 63

    if (padding_idx == -1) {
C
chengduoZH 已提交
64
      for (int64_t i = 0; i < ids_numel; ++i) {
65 66 67 68
        PADDLE_ENFORCE_LT(ids[i], N);
        PADDLE_ENFORCE_GE(ids[i], 0);
        memcpy(output + i * D, table + ids[i] * D, D * sizeof(T));
      }
69
    } else {
C
chengduoZH 已提交
70
      for (int64_t i = 0; i < ids_numel; ++i) {
71 72 73 74 75 76 77 78
        if (ids[i] == padding_idx) {
          memset(output + i * D, 0, D * sizeof(T));
        } else {
          PADDLE_ENFORCE_LT(ids[i], N);
          PADDLE_ENFORCE_GE(ids[i], 0);
          memcpy(output + i * D, table + ids[i] * D, D * sizeof(T));
        }
      }
79 80 81 82 83
    }
  }
};

template <typename T>
Y
Yu Yang 已提交
84
class LookupTableGradKernel : public framework::OpKernel<T> {
85 86
 public:
  void Compute(const framework::ExecutionContext& context) const override {
87
    bool is_sparse = context.Attr<bool>("is_sparse");
88 89
    // Since paddings are not trainable and fixed in forward, the gradient of
    // paddings makes no sense and we don't deal with it in backward.
90
    if (is_sparse) {
F
fengjiayi 已提交
91 92 93
      auto* ids = context.Input<LoDTensor>("Ids");
      auto* table = context.Input<LoDTensor>("W");
      auto* d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
94
      auto* d_table = context.Output<SelectedRows>(framework::GradVarName("W"));
95

96 97
      auto* ids_data = ids->data<int64_t>();
      auto ids_dim = ids->dims();
98

99 100 101 102 103 104
      framework::Vector<int64_t> new_rows;
      new_rows.reserve(ids_dim[0]);
      for (int64_t i = 0; i < ids_dim[0]; i++) {
        new_rows.push_back(ids_data[i]);
      }
      d_table->set_rows(new_rows);
105

106 107 108 109 110 111 112 113 114 115 116 117
      auto* d_table_value = d_table->mutable_value();
      d_table_value->Resize({ids_dim[0], table->dims()[1]});
      d_table_value->mutable_data<T>(context.GetPlace());

      d_table->set_height(table->dims()[0]);

      auto* d_output_data = d_output->data<T>();
      auto* d_table_data = d_table_value->data<T>();

      PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output->dims());
      memcpy(d_table_data, d_output_data, sizeof(T) * d_output->numel());
    } else {
F
fengjiayi 已提交
118 119 120 121
      auto* ids = context.Input<LoDTensor>("Ids");
      auto* d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
      auto* d_table = context.Output<LoDTensor>(framework::GradVarName("W"));
      auto* table = context.Input<LoDTensor>("W");
122 123 124 125 126 127 128 129 130 131

      auto* ids_data = ids->data<int64_t>();
      auto ids_dim = ids->dims();

      int N = table->dims()[0];
      int D = d_output->dims()[1];

      auto* d_output_data = d_output->data<T>();
      auto* d_table_data = d_table->mutable_data<T>(context.GetPlace());

132 133
      memset(d_table_data, 0, d_table->numel() * sizeof(T));

134 135 136 137
      for (int64_t i = 0; i < ids->numel(); ++i) {
        PADDLE_ENFORCE_LT(ids_data[i], N);
        PADDLE_ENFORCE_GE(ids_data[i], 0);
        for (int j = 0; j < D; ++j) {
138
          d_table_data[ids_data[i] * D + j] += d_output_data[i * D + j];
139
        }
140 141 142 143 144 145 146
      }
    }
  }
};

}  // namespace operators
}  // namespace paddle