lookup_table_op.h 5.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
Luo Tao 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15 16

#pragma once

Y
Yi Wang 已提交
17 18 19 20
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/selected_rows.h"
21 22 23 24

namespace paddle {
namespace operators {

C
chengduoZH 已提交
25
using Tensor = framework::Tensor;
F
fengjiayi 已提交
26
using LoDTensor = framework::LoDTensor;
27
using SelectedRows = framework::SelectedRows;
28 29

template <typename T>
Y
Yu Yang 已提交
30
class LookupTableKernel : public framework::OpKernel<T> {
31 32
 public:
  void Compute(const framework::ExecutionContext& context) const override {
C
chengduoZH 已提交
33 34 35
    auto* table_t = context.Input<LoDTensor>("W");
    auto* ids_var = context.InputVar("Ids");
    Tensor* output_t = context.Output<Tensor>("Out");
C
chengduoZH 已提交
36 37 38

    int64_t* ids;
    int64_t ids_numel;
39

C
chengduoZH 已提交
40 41 42 43
    // The type of Ids(Input) is SelectedRows or LoDTensor, when Ids's type
    // is LoDTensor, this tensor contains the ids to be looked up in W;
    // when Ids's type is SelectedRows, the rows of Ids contains the
    // ids to be looked up in W.
C
chengduoZH 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56
    if (ids_var->IsType<LoDTensor>()) {
      auto* ids_t = context.Input<LoDTensor>("Ids");
      ids = const_cast<int64_t*>(ids_t->data<int64_t>());
      ids_numel = ids_t->numel();
    } else if (ids_var->IsType<SelectedRows>()) {
      auto* ids_t = context.Input<SelectedRows>("Ids");
      ids = const_cast<int64_t*>(ids_t->rows().data());
      ids_numel = ids_t->rows().size();
      output_t->Resize({ids_numel, table_t->dims()[1]});
    } else {
      PADDLE_THROW("Unsupported Variable Type of Ids");
    }

57
    int64_t padding_idx = context.Attr<int64_t>("padding_idx");
58

F
fengjiayi 已提交
59 60
    int N = table_t->dims()[0];
    int D = table_t->dims()[1];
F
fengjiayi 已提交
61 62
    auto* table = table_t->data<T>();
    auto* output = output_t->mutable_data<T>(context.GetPlace());
63 64

    if (padding_idx == -1) {
C
chengduoZH 已提交
65
      for (int64_t i = 0; i < ids_numel; ++i) {
66 67 68 69
        PADDLE_ENFORCE_LT(ids[i], N);
        PADDLE_ENFORCE_GE(ids[i], 0);
        memcpy(output + i * D, table + ids[i] * D, D * sizeof(T));
      }
70
    } else {
C
chengduoZH 已提交
71
      for (int64_t i = 0; i < ids_numel; ++i) {
72 73 74 75 76 77 78 79
        if (ids[i] == padding_idx) {
          memset(output + i * D, 0, D * sizeof(T));
        } else {
          PADDLE_ENFORCE_LT(ids[i], N);
          PADDLE_ENFORCE_GE(ids[i], 0);
          memcpy(output + i * D, table + ids[i] * D, D * sizeof(T));
        }
      }
80 81 82 83 84
    }
  }
};

template <typename T>
Y
Yu Yang 已提交
85
class LookupTableGradKernel : public framework::OpKernel<T> {
86 87
 public:
  void Compute(const framework::ExecutionContext& context) const override {
88
    bool is_sparse = context.Attr<bool>("is_sparse");
89 90
    // Since paddings are not trainable and fixed in forward, the gradient of
    // paddings makes no sense and we don't deal with it in backward.
91
    if (is_sparse) {
F
fengjiayi 已提交
92 93 94
      auto* ids = context.Input<LoDTensor>("Ids");
      auto* table = context.Input<LoDTensor>("W");
      auto* d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
95
      auto* d_table = context.Output<SelectedRows>(framework::GradVarName("W"));
96

97 98
      auto* ids_data = ids->data<int64_t>();
      auto ids_dim = ids->dims();
99

100 101 102 103 104 105
      framework::Vector<int64_t> new_rows;
      new_rows.reserve(ids_dim[0]);
      for (int64_t i = 0; i < ids_dim[0]; i++) {
        new_rows.push_back(ids_data[i]);
      }
      d_table->set_rows(new_rows);
106

107 108 109 110 111 112 113 114 115 116 117 118
      auto* d_table_value = d_table->mutable_value();
      d_table_value->Resize({ids_dim[0], table->dims()[1]});
      d_table_value->mutable_data<T>(context.GetPlace());

      d_table->set_height(table->dims()[0]);

      auto* d_output_data = d_output->data<T>();
      auto* d_table_data = d_table_value->data<T>();

      PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output->dims());
      memcpy(d_table_data, d_output_data, sizeof(T) * d_output->numel());
    } else {
F
fengjiayi 已提交
119 120 121 122
      auto* ids = context.Input<LoDTensor>("Ids");
      auto* d_output = context.Input<LoDTensor>(framework::GradVarName("Out"));
      auto* d_table = context.Output<LoDTensor>(framework::GradVarName("W"));
      auto* table = context.Input<LoDTensor>("W");
123 124 125 126 127 128 129 130 131 132

      auto* ids_data = ids->data<int64_t>();
      auto ids_dim = ids->dims();

      int N = table->dims()[0];
      int D = d_output->dims()[1];

      auto* d_output_data = d_output->data<T>();
      auto* d_table_data = d_table->mutable_data<T>(context.GetPlace());

133 134
      memset(d_table_data, 0, d_table->numel() * sizeof(T));

135 136 137 138
      for (int64_t i = 0; i < ids->numel(); ++i) {
        PADDLE_ENFORCE_LT(ids_data[i], N);
        PADDLE_ENFORCE_GE(ids_data[i], 0);
        for (int j = 0; j < D; ++j) {
139
          d_table_data[ids_data[i] * D + j] += d_output_data[i * D + j];
140
        }
141 142 143 144 145 146 147
      }
    }
  }
};

}  // namespace operators
}  // namespace paddle