matrix_bit_code.cc 7.7 KB
Newer Older
Y
Yancey1989 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

W
weixing02 已提交
15 16
#include "paddle/fluid/operators/math/matrix_bit_code.h"
#include <iostream>
Y
Yancey1989 已提交
17 18 19 20
namespace paddle {
namespace operators {
namespace math {

Y
Yancey1989 已提交
21
template <typename T>
J
JiabinYang 已提交
22 23
void MatrixBitCodeFunctor<T>::Add(framework::LoDTensor* tmat,
                                  const framework::LoDTensor& vec) {
W
weixing02 已提交
24 25
  size_t batch_size = tmat->dims()[0];
  size_t width = tmat->dims()[1];
Y
Yancey1989 已提交
26
  for (size_t i = 0; i < batch_size; ++i) {
27 28
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
29
    for (int j = 0; j < code_length; ++j) {
30
      size_t index = code->calc_index(j);
W
weixing02 已提交
31
      tmat->data<T>()[i * width + j] += vec.data<T>()[index];
Y
Yancey1989 已提交
32 33 34 35
    }
  }
}

Y
Yancey1989 已提交
36
template <typename T>
J
JiabinYang 已提交
37 38
void MatrixBitCodeFunctor<T>::AddGrad(const framework::LoDTensor& tmat,
                                      framework::LoDTensor* vec) {
Y
Yancey1989 已提交
39 40 41
  size_t batch_size = tmat.dims()[0];
  size_t width = tmat.dims()[1];
  for (size_t i = 0; i < batch_size; ++i) {
42 43
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
44
    for (int j = 0; j < code_length; ++j) {
45
      size_t index = code->calc_index(j);
W
weixing02 已提交
46
      vec->data<T>()[index] += tmat.data<T>()[i * width + j];
Y
Yancey1989 已提交
47 48
    }
  }
Y
Yancey1989 已提交
49 50
}

J
JiabinYang 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
template <typename T>
void MatrixBitCodeFunctor<T>::AddGrad(const framework::LoDTensor& tmat,
                                      framework::SelectedRows* vec) {
  size_t batch_size = tmat.dims()[0];
  size_t width = tmat.dims()[1];
  for (size_t i = 0; i < batch_size; ++i) {
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
    for (int j = 0; j < code_length; ++j) {
      size_t index = code->calc_index(j);
      int64_t row_index =
          vec->AutoGrownIndex(static_cast<int64_t>(index), false, true);
      vec->mutable_value()->data<T>()[row_index] +=
          tmat.data<T>()[i * width + j];
    }
  }
}

Y
Yancey1989 已提交
69
template <typename T>
J
JiabinYang 已提交
70 71
void MatrixBitCodeFunctor<T>::Sum(const framework::LoDTensor& tmat,
                                  framework::LoDTensor* sum, T scale_sum) {
Y
Yancey1989 已提交
72 73 74
  size_t num_samples = tmat.dims()[0];
  size_t o_width = tmat.dims()[1];
  for (size_t i = 0; i < num_samples; ++i) {
Y
Yancey1989 已提交
75
    T sm = static_cast<T>(0.0);
76 77
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
78
    for (int j = 0; j < code_length; ++j) {
79
      if (code->calc_bit(j)) {
80 81
        // calc_bit starts from right most bit, while data in tmat[i] is in the
        // reverse order.
Y
Yancey1989 已提交
82 83 84
        sm += tmat.data<T>()[i * o_width + j];
      }
    }
W
weixing02 已提交
85
    sum->data<T>()[i] = scale_sum * sm;
Y
Yancey1989 已提交
86 87
  }
}
Y
Yancey1989 已提交
88

Y
Yancey1989 已提交
89
template <typename T>
J
JiabinYang 已提交
90 91 92
void MatrixBitCodeFunctor<T>::Mul(framework::LoDTensor* tmat,
                                  const framework::LoDTensor& weight,
                                  const framework::LoDTensor& input) {
W
weixing02 已提交
93 94
  size_t num_samples = tmat->dims()[0];
  size_t tmat_width = tmat->dims()[1];
Y
Yancey1989 已提交
95
  size_t input_width = input.dims()[1];
W
weixing02 已提交
96 97
  size_t weight_width = weight.dims()[1];
  auto tmat_value = tmat->data<T>();
Y
Yancey1989 已提交
98 99
  auto weight_value = weight.data<T>();
  auto input_value = input.data<T>();
Y
Yancey1989 已提交
100
  for (size_t i = 0; i < num_samples; ++i) {
101 102
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
103
    for (int j = 0; j < code_length; ++j) {
104
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
105 106
      T sum = static_cast<T>(0.0);
      for (size_t k = 0; k < input_width; ++k) {
Y
Yancey1989 已提交
107 108
        sum += weight_value[weight_width * index + k] *
               input_value[input_width * i + k];
Y
Yancey1989 已提交
109
      }
Y
Yancey1989 已提交
110
      tmat_value[i * tmat_width + j] += sum;
Y
Yancey1989 已提交
111 112 113 114 115
    }
  }
}

template <typename T>
J
JiabinYang 已提交
116 117 118
void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::LoDTensor& tmat,
                                            framework::LoDTensor* weight,
                                            const framework::LoDTensor& input) {
Y
Yancey1989 已提交
119 120
  size_t num_samples = tmat.dims()[0];
  size_t input_width = input.dims()[1];
W
weixing02 已提交
121 122
  size_t tmat_width = tmat.dims()[1];
  size_t weight_width = weight->dims()[1];
J
temp  
JiabinYang 已提交
123 124
  VLOG(30) << "sparse w_grad dims is [" << weight->dims()[0] << " ,"
           << weight->dims()[1] << " ]";
Y
Yancey1989 已提交
125
  auto tmat_value = tmat.data<T>();
W
weixing02 已提交
126
  auto weight_value = weight->data<T>();
Y
Yancey1989 已提交
127
  auto input_value = input.data<T>();
Y
Yancey1989 已提交
128
  for (size_t i = 0; i < num_samples; ++i) {
129 130
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
131
    for (int j = 0; j < code_length; ++j) {
132
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
133

Y
Yancey1989 已提交
134
      for (size_t k = 0; k < input_width; ++k) {
W
weixing02 已提交
135 136
        weight_value[weight_width * index + k] +=
            tmat_value[i * tmat_width + j] * input_value[input_width * i + k];
Y
Yancey1989 已提交
137
      }
Y
Yancey1989 已提交
138
    }
Y
Yancey1989 已提交
139
  }
Y
Yancey1989 已提交
140
}
Y
Yancey1989 已提交
141

J
JiabinYang 已提交
142 143 144 145 146 147 148 149
template <typename T>
void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::LoDTensor& tmat,
                                            framework::SelectedRows* weight,
                                            const framework::LoDTensor& input) {
  size_t num_samples = tmat.dims()[0];
  size_t input_width = input.dims()[1];
  size_t tmat_width = tmat.dims()[1];
  size_t weight_width = weight->value().dims()[1];
J
temp  
JiabinYang 已提交
150 151
  VLOG(30) << "sparse w_grad dims is: [" << weight->value().dims()[0] << " ,"
           << weight->value().dims()[1] << " ]";
J
JiabinYang 已提交
152 153 154 155 156 157 158 159 160 161 162
  auto tmat_value = tmat.data<T>();
  auto weight_value = weight->mutable_value()->data<T>();
  auto input_value = input.data<T>();
  for (size_t i = 0; i < num_samples; ++i) {
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
    for (int j = 0; j < code_length; ++j) {
      size_t index = code->calc_index(j);

      for (size_t k = 0; k < input_width; ++k) {
        int64_t row_index =
J
JiabinYang 已提交
163
            weight->AutoGrownIndex(static_cast<int64_t>(index), false, true);
J
JiabinYang 已提交
164 165 166 167 168 169 170

        weight_value[row_index * weight_width + k] +=
            tmat_value[i * tmat_width + j] * input_value[input_width * i + k];
      }
    }
  }
}
J
JiabinYang 已提交
171

Y
Yancey1989 已提交
172
template <typename T>
J
JiabinYang 已提交
173 174 175
void MatrixBitCodeFunctor<T>::MulGradError(const framework::LoDTensor& tmat,
                                           const framework::LoDTensor& weight,
                                           framework::LoDTensor* input) {
Y
Yancey1989 已提交
176
  size_t num_samples = tmat.dims()[0];
W
weixing02 已提交
177 178
  size_t tmat_width = tmat.dims()[1];
  size_t input_width = input->dims()[1];
Y
Yancey1989 已提交
179
  size_t weight_width = weight.dims()[1];
Y
Yancey1989 已提交
180 181
  auto tmat_value = tmat.data<T>();
  auto weight_value = weight.data<T>();
W
weixing02 已提交
182
  auto input_value = input->data<T>();
Y
Yancey1989 已提交
183

Y
Yancey1989 已提交
184
  for (size_t i = 0; i < num_samples; ++i) {
185 186
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
187
    for (int j = 0; j < code_length; ++j) {
188
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
189 190

      for (size_t k = 0; k < input_width; ++k) {
W
weixing02 已提交
191 192 193
        input_value[input_width * i + k] +=
            tmat_value[i * tmat_width + j] *
            weight_value[weight_width * index + k];
Y
Yancey1989 已提交
194 195 196 197 198 199
      }
    }
  }
}

template <typename T>
J
JiabinYang 已提交
200
void MatrixBitCodeFunctor<T>::Sub(framework::LoDTensor* tmat) {
W
weixing02 已提交
201 202
  size_t num_samples = tmat->dims()[0];
  size_t o_width = tmat->dims()[1];
Y
Yancey1989 已提交
203
  for (size_t i = 0; i < num_samples; ++i) {
204 205
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
206
    for (int j = 0; j < code_length; ++j) {
207
      if (code->calc_bit(j)) {
W
weixing02 已提交
208
        tmat->data<T>()[i * o_width + j] -= 1;
Y
Yancey1989 已提交
209 210 211
      }
    }
  }
Y
Yancey1989 已提交
212 213
}

Y
Yancey1989 已提交
214 215 216
template class MatrixBitCodeFunctor<float>;
template class MatrixBitCodeFunctor<double>;

Y
Yancey1989 已提交
217 218 219
}  // namespace math
}  // namespace operators
}  // namespace paddle