matrix_bit_code.cc 7.5 KB
Newer Older
Y
Yancey1989 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

W
weixing02 已提交
15 16
#include "paddle/fluid/operators/math/matrix_bit_code.h"
#include <iostream>
Y
Yancey1989 已提交
17 18 19 20
namespace paddle {
namespace operators {
namespace math {

Y
Yancey1989 已提交
21
template <typename T>
22 23
void MatrixBitCodeFunctor<T>::Add(const framework::LoDTensor& vec,
                                  framework::LoDTensor* tmat) {
W
weixing02 已提交
24 25
  size_t batch_size = tmat->dims()[0];
  size_t width = tmat->dims()[1];
Y
Yancey1989 已提交
26
  for (size_t i = 0; i < batch_size; ++i) {
27 28
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
29
    for (int j = 0; j < code_length; ++j) {
30
      size_t index = code->calc_index(j);
W
weixing02 已提交
31
      tmat->data<T>()[i * width + j] += vec.data<T>()[index];
Y
Yancey1989 已提交
32 33 34 35
    }
  }
}

Y
Yancey1989 已提交
36
template <typename T>
J
JiabinYang 已提交
37 38
void MatrixBitCodeFunctor<T>::AddGrad(const framework::LoDTensor& tmat,
                                      framework::LoDTensor* vec) {
Y
Yancey1989 已提交
39 40 41
  size_t batch_size = tmat.dims()[0];
  size_t width = tmat.dims()[1];
  for (size_t i = 0; i < batch_size; ++i) {
42 43
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
44
    for (int j = 0; j < code_length; ++j) {
45
      size_t index = code->calc_index(j);
W
weixing02 已提交
46
      vec->data<T>()[index] += tmat.data<T>()[i * width + j];
Y
Yancey1989 已提交
47 48
    }
  }
Y
Yancey1989 已提交
49 50
}

J
JiabinYang 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
template <typename T>
void MatrixBitCodeFunctor<T>::AddGrad(const framework::LoDTensor& tmat,
                                      framework::SelectedRows* vec) {
  size_t batch_size = tmat.dims()[0];
  size_t width = tmat.dims()[1];
  for (size_t i = 0; i < batch_size; ++i) {
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
    for (int j = 0; j < code_length; ++j) {
      size_t index = code->calc_index(j);
      int64_t row_index =
          vec->AutoGrownIndex(static_cast<int64_t>(index), false, true);
      vec->mutable_value()->data<T>()[row_index] +=
          tmat.data<T>()[i * width + j];
    }
  }
}

Y
Yancey1989 已提交
69
template <typename T>
J
JiabinYang 已提交
70 71
void MatrixBitCodeFunctor<T>::Sum(const framework::LoDTensor& tmat,
                                  framework::LoDTensor* sum, T scale_sum) {
Y
Yancey1989 已提交
72 73 74
  size_t num_samples = tmat.dims()[0];
  size_t o_width = tmat.dims()[1];
  for (size_t i = 0; i < num_samples; ++i) {
Y
Yancey1989 已提交
75
    T sm = static_cast<T>(0.0);
76 77
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
78
    for (int j = 0; j < code_length; ++j) {
79
      if (code->calc_bit(j)) {
80 81
        // calc_bit starts from right most bit, while data in tmat[i] is in the
        // reverse order.
Y
Yancey1989 已提交
82 83 84
        sm += tmat.data<T>()[i * o_width + j];
      }
    }
W
weixing02 已提交
85
    sum->data<T>()[i] = scale_sum * sm;
Y
Yancey1989 已提交
86 87
  }
}
Y
Yancey1989 已提交
88

Y
Yancey1989 已提交
89
template <typename T>
J
JiabinYang 已提交
90 91 92
void MatrixBitCodeFunctor<T>::Mul(framework::LoDTensor* tmat,
                                  const framework::LoDTensor& weight,
                                  const framework::LoDTensor& input) {
W
weixing02 已提交
93 94
  size_t num_samples = tmat->dims()[0];
  size_t tmat_width = tmat->dims()[1];
Y
Yancey1989 已提交
95
  size_t input_width = input.dims()[1];
W
weixing02 已提交
96 97
  size_t weight_width = weight.dims()[1];
  auto tmat_value = tmat->data<T>();
Y
Yancey1989 已提交
98 99
  auto weight_value = weight.data<T>();
  auto input_value = input.data<T>();
Y
Yancey1989 已提交
100
  for (size_t i = 0; i < num_samples; ++i) {
101 102
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
103
    for (int j = 0; j < code_length; ++j) {
104
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
105 106
      T sum = static_cast<T>(0.0);
      for (size_t k = 0; k < input_width; ++k) {
Y
Yancey1989 已提交
107 108
        sum += weight_value[weight_width * index + k] *
               input_value[input_width * i + k];
Y
Yancey1989 已提交
109
      }
Y
Yancey1989 已提交
110
      tmat_value[i * tmat_width + j] += sum;
Y
Yancey1989 已提交
111 112 113 114 115
    }
  }
}

template <typename T>
J
JiabinYang 已提交
116 117 118
void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::LoDTensor& tmat,
                                            framework::LoDTensor* weight,
                                            const framework::LoDTensor& input) {
Y
Yancey1989 已提交
119 120
  size_t num_samples = tmat.dims()[0];
  size_t input_width = input.dims()[1];
W
weixing02 已提交
121 122
  size_t tmat_width = tmat.dims()[1];
  size_t weight_width = weight->dims()[1];
Y
Yancey1989 已提交
123
  auto tmat_value = tmat.data<T>();
W
weixing02 已提交
124
  auto weight_value = weight->data<T>();
Y
Yancey1989 已提交
125
  auto input_value = input.data<T>();
Y
Yancey1989 已提交
126
  for (size_t i = 0; i < num_samples; ++i) {
127 128
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
129
    for (int j = 0; j < code_length; ++j) {
130
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
131

Y
Yancey1989 已提交
132
      for (size_t k = 0; k < input_width; ++k) {
W
weixing02 已提交
133 134
        weight_value[weight_width * index + k] +=
            tmat_value[i * tmat_width + j] * input_value[input_width * i + k];
Y
Yancey1989 已提交
135
      }
Y
Yancey1989 已提交
136
    }
Y
Yancey1989 已提交
137
  }
Y
Yancey1989 已提交
138
}
Y
Yancey1989 已提交
139

J
JiabinYang 已提交
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
template <typename T>
void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::LoDTensor& tmat,
                                            framework::SelectedRows* weight,
                                            const framework::LoDTensor& input) {
  size_t num_samples = tmat.dims()[0];
  size_t input_width = input.dims()[1];
  size_t tmat_width = tmat.dims()[1];
  size_t weight_width = weight->value().dims()[1];
  auto tmat_value = tmat.data<T>();
  auto weight_value = weight->mutable_value()->data<T>();
  auto input_value = input.data<T>();
  for (size_t i = 0; i < num_samples; ++i) {
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
    for (int j = 0; j < code_length; ++j) {
      size_t index = code->calc_index(j);
      for (size_t k = 0; k < input_width; ++k) {
        int64_t row_index =
J
JiabinYang 已提交
158
            weight->AutoGrownIndex(static_cast<int64_t>(index), false, true);
J
JiabinYang 已提交
159 160 161 162 163 164
        weight_value[row_index * weight_width + k] +=
            tmat_value[i * tmat_width + j] * input_value[input_width * i + k];
      }
    }
  }
}
J
JiabinYang 已提交
165

Y
Yancey1989 已提交
166
template <typename T>
J
JiabinYang 已提交
167 168 169
void MatrixBitCodeFunctor<T>::MulGradError(const framework::LoDTensor& tmat,
                                           const framework::LoDTensor& weight,
                                           framework::LoDTensor* input) {
Y
Yancey1989 已提交
170
  size_t num_samples = tmat.dims()[0];
W
weixing02 已提交
171 172
  size_t tmat_width = tmat.dims()[1];
  size_t input_width = input->dims()[1];
Y
Yancey1989 已提交
173
  size_t weight_width = weight.dims()[1];
Y
Yancey1989 已提交
174 175
  auto tmat_value = tmat.data<T>();
  auto weight_value = weight.data<T>();
W
weixing02 已提交
176
  auto input_value = input->data<T>();
Y
Yancey1989 已提交
177

Y
Yancey1989 已提交
178
  for (size_t i = 0; i < num_samples; ++i) {
179 180
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
181
    for (int j = 0; j < code_length; ++j) {
182
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
183 184

      for (size_t k = 0; k < input_width; ++k) {
W
weixing02 已提交
185 186 187
        input_value[input_width * i + k] +=
            tmat_value[i * tmat_width + j] *
            weight_value[weight_width * index + k];
Y
Yancey1989 已提交
188 189 190 191 192 193
      }
    }
  }
}

template <typename T>
J
JiabinYang 已提交
194
void MatrixBitCodeFunctor<T>::Sub(framework::LoDTensor* tmat) {
W
weixing02 已提交
195 196
  size_t num_samples = tmat->dims()[0];
  size_t o_width = tmat->dims()[1];
Y
Yancey1989 已提交
197
  for (size_t i = 0; i < num_samples; ++i) {
198 199
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
200
    for (int j = 0; j < code_length; ++j) {
201
      if (code->calc_bit(j)) {
W
weixing02 已提交
202
        tmat->data<T>()[i * o_width + j] -= 1;
Y
Yancey1989 已提交
203 204 205
      }
    }
  }
Y
Yancey1989 已提交
206 207
}

Y
Yancey1989 已提交
208 209 210
template class MatrixBitCodeFunctor<float>;
template class MatrixBitCodeFunctor<double>;

Y
Yancey1989 已提交
211 212 213
}  // namespace math
}  // namespace operators
}  // namespace paddle