matrix_bit_code.cc 7.1 KB
Newer Older
Y
Yancey1989 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

W
weixing02 已提交
15 16
#include "paddle/fluid/operators/math/matrix_bit_code.h"
#include <iostream>
Y
Yancey1989 已提交
17 18 19 20
namespace paddle {
namespace operators {
namespace math {

Y
Yancey1989 已提交
21
template <typename T>
J
JiabinYang 已提交
22 23
void MatrixBitCodeFunctor<T>::Add(framework::LoDTensor* tmat,
                                  const framework::LoDTensor& vec) {
W
weixing02 已提交
24 25
  size_t batch_size = tmat->dims()[0];
  size_t width = tmat->dims()[1];
Y
Yancey1989 已提交
26
  for (size_t i = 0; i < batch_size; ++i) {
27 28
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
29
    for (int j = 0; j < code_length; ++j) {
30
      size_t index = code->calc_index(j);
W
weixing02 已提交
31
      tmat->data<T>()[i * width + j] += vec.data<T>()[index];
Y
Yancey1989 已提交
32 33 34 35
    }
  }
}

Y
Yancey1989 已提交
36
template <typename T>
J
JiabinYang 已提交
37 38
void MatrixBitCodeFunctor<T>::AddGrad(const framework::LoDTensor& tmat,
                                      framework::LoDTensor* vec) {
Y
Yancey1989 已提交
39 40 41
  size_t batch_size = tmat.dims()[0];
  size_t width = tmat.dims()[1];
  for (size_t i = 0; i < batch_size; ++i) {
42 43
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
44
    for (int j = 0; j < code_length; ++j) {
45
      size_t index = code->calc_index(j);
W
weixing02 已提交
46
      vec->data<T>()[index] += tmat.data<T>()[i * width + j];
Y
Yancey1989 已提交
47 48
    }
  }
Y
Yancey1989 已提交
49 50
}

Y
Yancey1989 已提交
51
template <typename T>
J
JiabinYang 已提交
52 53
void MatrixBitCodeFunctor<T>::Sum(const framework::LoDTensor& tmat,
                                  framework::LoDTensor* sum, T scale_sum) {
Y
Yancey1989 已提交
54 55 56
  size_t num_samples = tmat.dims()[0];
  size_t o_width = tmat.dims()[1];
  for (size_t i = 0; i < num_samples; ++i) {
Y
Yancey1989 已提交
57
    T sm = static_cast<T>(0.0);
58 59
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
60
    for (int j = 0; j < code_length; ++j) {
61
      if (code->calc_bit(j)) {
62 63
        // calc_bit starts from right most bit, while data in tmat[i] is in the
        // reverse order.
Y
Yancey1989 已提交
64 65 66
        sm += tmat.data<T>()[i * o_width + j];
      }
    }
W
weixing02 已提交
67
    sum->data<T>()[i] = scale_sum * sm;
Y
Yancey1989 已提交
68 69
  }
}
Y
Yancey1989 已提交
70

Y
Yancey1989 已提交
71
template <typename T>
J
JiabinYang 已提交
72 73 74
void MatrixBitCodeFunctor<T>::Mul(framework::LoDTensor* tmat,
                                  const framework::LoDTensor& weight,
                                  const framework::LoDTensor& input) {
W
weixing02 已提交
75 76
  size_t num_samples = tmat->dims()[0];
  size_t tmat_width = tmat->dims()[1];
Y
Yancey1989 已提交
77
  size_t input_width = input.dims()[1];
W
weixing02 已提交
78 79
  size_t weight_width = weight.dims()[1];
  auto tmat_value = tmat->data<T>();
Y
Yancey1989 已提交
80 81
  auto weight_value = weight.data<T>();
  auto input_value = input.data<T>();
Y
Yancey1989 已提交
82
  for (size_t i = 0; i < num_samples; ++i) {
83 84
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
85
    for (int j = 0; j < code_length; ++j) {
86
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
87 88
      T sum = static_cast<T>(0.0);
      for (size_t k = 0; k < input_width; ++k) {
Y
Yancey1989 已提交
89 90
        sum += weight_value[weight_width * index + k] *
               input_value[input_width * i + k];
Y
Yancey1989 已提交
91
      }
Y
Yancey1989 已提交
92
      tmat_value[i * tmat_width + j] += sum;
Y
Yancey1989 已提交
93 94 95 96 97
    }
  }
}

template <typename T>
J
JiabinYang 已提交
98 99 100
void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::LoDTensor& tmat,
                                            framework::LoDTensor* weight,
                                            const framework::LoDTensor& input) {
Y
Yancey1989 已提交
101 102
  size_t num_samples = tmat.dims()[0];
  size_t input_width = input.dims()[1];
W
weixing02 已提交
103 104
  size_t tmat_width = tmat.dims()[1];
  size_t weight_width = weight->dims()[1];
J
temp  
JiabinYang 已提交
105 106
  VLOG(30) << "sparse w_grad dims is [" << weight->dims()[0] << " ,"
           << weight->dims()[1] << " ]";
Y
Yancey1989 已提交
107
  auto tmat_value = tmat.data<T>();
W
weixing02 已提交
108
  auto weight_value = weight->data<T>();
Y
Yancey1989 已提交
109
  auto input_value = input.data<T>();
Y
Yancey1989 已提交
110
  for (size_t i = 0; i < num_samples; ++i) {
111 112
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
113
    for (int j = 0; j < code_length; ++j) {
114
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
115

Y
Yancey1989 已提交
116
      for (size_t k = 0; k < input_width; ++k) {
W
weixing02 已提交
117 118
        weight_value[weight_width * index + k] +=
            tmat_value[i * tmat_width + j] * input_value[input_width * i + k];
Y
Yancey1989 已提交
119
      }
Y
Yancey1989 已提交
120
    }
Y
Yancey1989 已提交
121
  }
Y
Yancey1989 已提交
122
}
Y
Yancey1989 已提交
123

J
JiabinYang 已提交
124 125 126 127 128 129 130 131
template <typename T>
void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::LoDTensor& tmat,
                                            framework::SelectedRows* weight,
                                            const framework::LoDTensor& input) {
  size_t num_samples = tmat.dims()[0];
  size_t input_width = input.dims()[1];
  size_t tmat_width = tmat.dims()[1];
  size_t weight_width = weight->value().dims()[1];
J
temp  
JiabinYang 已提交
132 133
  VLOG(30) << "sparse w_grad dims is: [" << weight->value().dims()[0] << " ,"
           << weight->value().dims()[1] << " ]";
J
JiabinYang 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
  auto tmat_value = tmat.data<T>();
  auto weight_value = weight->mutable_value()->data<T>();
  auto input_value = input.data<T>();
  for (size_t i = 0; i < num_samples; ++i) {
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
    for (int j = 0; j < code_length; ++j) {
      size_t index = code->calc_index(j);

      for (size_t k = 0; k < input_width; ++k) {
        int64_t row_index =
            weight->AutoGrownIndex(static_cast<int64_t>(index), false);

        weight_value[row_index * weight_width + k] +=
            tmat_value[i * tmat_width + j] * input_value[input_width * i + k];
      }
    }
  }
}
J
JiabinYang 已提交
153

Y
Yancey1989 已提交
154
template <typename T>
J
JiabinYang 已提交
155 156 157
void MatrixBitCodeFunctor<T>::MulGradError(const framework::LoDTensor& tmat,
                                           const framework::LoDTensor& weight,
                                           framework::LoDTensor* input) {
Y
Yancey1989 已提交
158
  size_t num_samples = tmat.dims()[0];
W
weixing02 已提交
159 160
  size_t tmat_width = tmat.dims()[1];
  size_t input_width = input->dims()[1];
Y
Yancey1989 已提交
161
  size_t weight_width = weight.dims()[1];
Y
Yancey1989 已提交
162 163
  auto tmat_value = tmat.data<T>();
  auto weight_value = weight.data<T>();
W
weixing02 已提交
164
  auto input_value = input->data<T>();
Y
Yancey1989 已提交
165

Y
Yancey1989 已提交
166
  for (size_t i = 0; i < num_samples; ++i) {
167 168
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
169
    for (int j = 0; j < code_length; ++j) {
170
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
171 172

      for (size_t k = 0; k < input_width; ++k) {
W
weixing02 已提交
173 174 175
        input_value[input_width * i + k] +=
            tmat_value[i * tmat_width + j] *
            weight_value[weight_width * index + k];
Y
Yancey1989 已提交
176 177 178 179 180 181
      }
    }
  }
}

template <typename T>
J
JiabinYang 已提交
182
void MatrixBitCodeFunctor<T>::Sub(framework::LoDTensor* tmat) {
W
weixing02 已提交
183 184
  size_t num_samples = tmat->dims()[0];
  size_t o_width = tmat->dims()[1];
Y
Yancey1989 已提交
185
  for (size_t i = 0; i < num_samples; ++i) {
186 187
    auto code = code_table->get_code(i);
    int code_length = code->get_length();
Y
Yancey1989 已提交
188
    for (int j = 0; j < code_length; ++j) {
189
      if (code->calc_bit(j)) {
W
weixing02 已提交
190
        tmat->data<T>()[i * o_width + j] -= 1;
Y
Yancey1989 已提交
191 192 193
      }
    }
  }
Y
Yancey1989 已提交
194 195
}

Y
Yancey1989 已提交
196 197 198
template class MatrixBitCodeFunctor<float>;
template class MatrixBitCodeFunctor<double>;

Y
Yancey1989 已提交
199 200 201
}  // namespace math
}  // namespace operators
}  // namespace paddle