matrix_bit_code.cc 7.6 KB
Newer Older
Y
Yancey1989 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

W
weixing02 已提交
15 16
#include "paddle/fluid/operators/math/matrix_bit_code.h"
#include <iostream>
Y
Yancey1989 已提交
17 18 19 20
namespace paddle {
namespace operators {
namespace math {

Y
Yancey1989 已提交
21
template <typename T>
J
JiabinYang 已提交
22 23
void MatrixBitCodeFunctor<T>::Add(const framework::Tensor& vec,
                                  framework::Tensor* tmat) {
W
weixing02 已提交
24 25
  size_t batch_size = tmat->dims()[0];
  size_t width = tmat->dims()[1];
Y
Yancey1989 已提交
26
  for (size_t i = 0; i < batch_size; ++i) {
J
JiabinYang 已提交
27
    auto code = code_table_->get_code(i);
28
    int code_length = code->get_length();
Y
Yancey1989 已提交
29
    for (int j = 0; j < code_length; ++j) {
30
      size_t index = code->calc_index(j);
W
weixing02 已提交
31
      tmat->data<T>()[i * width + j] += vec.data<T>()[index];
Y
Yancey1989 已提交
32 33 34 35
    }
  }
}

Y
Yancey1989 已提交
36
template <typename T>
J
JiabinYang 已提交
37 38
void MatrixBitCodeFunctor<T>::AddGrad(const framework::Tensor& tmat,
                                      framework::Tensor* vec) {
Y
Yancey1989 已提交
39 40 41
  size_t batch_size = tmat.dims()[0];
  size_t width = tmat.dims()[1];
  for (size_t i = 0; i < batch_size; ++i) {
J
JiabinYang 已提交
42
    auto code = code_table_->get_code(i);
43
    int code_length = code->get_length();
Y
Yancey1989 已提交
44
    for (int j = 0; j < code_length; ++j) {
45
      size_t index = code->calc_index(j);
W
weixing02 已提交
46
      vec->data<T>()[index] += tmat.data<T>()[i * width + j];
Y
Yancey1989 已提交
47 48
    }
  }
Y
Yancey1989 已提交
49 50
}

Y
Yancey1989 已提交
51
template <typename T>
J
JiabinYang 已提交
52 53
void MatrixBitCodeFunctor<T>::Sum(const framework::Tensor& tmat,
                                  framework::Tensor* sum, T scale_sum) {
Y
Yancey1989 已提交
54 55 56
  size_t num_samples = tmat.dims()[0];
  size_t o_width = tmat.dims()[1];
  for (size_t i = 0; i < num_samples; ++i) {
Y
Yancey1989 已提交
57
    T sm = static_cast<T>(0.0);
J
JiabinYang 已提交
58
    auto code = code_table_->get_code(i);
59
    int code_length = code->get_length();
Y
Yancey1989 已提交
60
    for (int j = 0; j < code_length; ++j) {
61
      if (code->calc_bit(j)) {
62 63
        // calc_bit starts from right most bit, while data in tmat[i] is in the
        // reverse order.
Y
Yancey1989 已提交
64 65 66
        sm += tmat.data<T>()[i * o_width + j];
      }
    }
W
weixing02 已提交
67
    sum->data<T>()[i] = scale_sum * sm;
Y
Yancey1989 已提交
68 69
  }
}
Y
Yancey1989 已提交
70

Y
Yancey1989 已提交
71
template <typename T>
J
JiabinYang 已提交
72 73 74
void MatrixBitCodeFunctor<T>::Mul(framework::Tensor* tmat,
                                  const framework::Tensor& weight,
                                  const framework::Tensor& input) {
J
JiabinYang 已提交
75 76
  auto blas =
      GetBlas<platform::CPUDeviceContext, T>(platform::CPUDeviceContext());
W
weixing02 已提交
77 78
  size_t num_samples = tmat->dims()[0];
  size_t tmat_width = tmat->dims()[1];
Y
Yancey1989 已提交
79
  size_t input_width = input.dims()[1];
W
weixing02 已提交
80 81
  size_t weight_width = weight.dims()[1];
  auto tmat_value = tmat->data<T>();
Y
Yancey1989 已提交
82 83
  auto weight_value = weight.data<T>();
  auto input_value = input.data<T>();
Y
Yancey1989 已提交
84
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
85
    auto code = code_table_->get_code(i);
86
    int code_length = code->get_length();
J
JiabinYang 已提交
87
    const T* input_row = input_value + input_width * i;
Y
Yancey1989 已提交
88
    for (int j = 0; j < code_length; ++j) {
89
      size_t index = code->calc_index(j);
J
JiabinYang 已提交
90
      const T* weight_row = weight_value + weight_width * index;
Y
Yancey1989 已提交
91
      T sum = static_cast<T>(0.0);
J
JiabinYang 已提交
92
      sum = blas.DOT(input_width, weight_row, input_row);
Y
Yancey1989 已提交
93
      tmat_value[i * tmat_width + j] += sum;
Y
Yancey1989 已提交
94 95 96 97 98
    }
  }
}

template <typename T>
J
JiabinYang 已提交
99 100 101
void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::Tensor& tmat,
                                            framework::Tensor* weight,
                                            const framework::Tensor& input) {
J
JiabinYang 已提交
102 103
  auto blas =
      GetBlas<platform::CPUDeviceContext, T>(platform::CPUDeviceContext());
Y
Yancey1989 已提交
104 105
  size_t num_samples = tmat.dims()[0];
  size_t input_width = input.dims()[1];
W
weixing02 已提交
106 107
  size_t tmat_width = tmat.dims()[1];
  size_t weight_width = weight->dims()[1];
Y
Yancey1989 已提交
108
  auto tmat_value = tmat.data<T>();
W
weixing02 已提交
109
  auto weight_value = weight->data<T>();
Y
Yancey1989 已提交
110
  auto input_value = input.data<T>();
J
JiabinYang 已提交
111 112 113

  std::unordered_map<int, std::vector<std::pair<T, const T*>>> ops;

Y
Yancey1989 已提交
114
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
115
    auto code = code_table_->get_code(i);
116
    int code_length = code->get_length();
J
JiabinYang 已提交
117 118
    const T* input_value_row = input_value + input_width * i;
    const T* tmat_row = tmat_value + i * tmat_width;
Y
Yancey1989 已提交
119
    for (int j = 0; j < code_length; ++j) {
J
JiabinYang 已提交
120 121 122 123 124 125 126 127 128 129
      ops[code->calc_index(j)].emplace_back(tmat_row[j], input_value_row);
    }
  }
  for (auto& op : ops) {
    auto& op_in_row = op.second;
    for (auto& pair : op_in_row) {
      auto& scale = pair.first;
      auto* input_row = pair.second;
      T* weight_row = weight_value + op.first * weight_width;
      blas.AXPY(input_width, scale, input_row, weight_row);
Y
Yancey1989 已提交
130
    }
Y
Yancey1989 已提交
131
  }
Y
Yancey1989 已提交
132
}
Y
Yancey1989 已提交
133

J
JiabinYang 已提交
134
template <typename T>
J
JiabinYang 已提交
135
void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::Tensor& tmat,
J
JiabinYang 已提交
136
                                            framework::SelectedRows* weight,
J
JiabinYang 已提交
137
                                            const framework::Tensor& input) {
J
JiabinYang 已提交
138 139
  auto blas =
      GetBlas<platform::CPUDeviceContext, T>(platform::CPUDeviceContext());
J
JiabinYang 已提交
140 141 142 143 144 145 146
  size_t num_samples = tmat.dims()[0];
  size_t input_width = input.dims()[1];
  size_t tmat_width = tmat.dims()[1];
  size_t weight_width = weight->value().dims()[1];
  auto tmat_value = tmat.data<T>();
  auto weight_value = weight->mutable_value()->data<T>();
  auto input_value = input.data<T>();
J
JiabinYang 已提交
147 148 149 150

  std::unordered_map<int, std::vector<std::pair<T, const T*>>> ops;
  ops.reserve(weight->rows().size());

J
JiabinYang 已提交
151
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
152
    auto code = code_table_->get_code(i);
J
JiabinYang 已提交
153
    int code_length = code->get_length();
J
JiabinYang 已提交
154 155
    const T* input_value_row = input_value + input_width * i;
    const T* tmat_row = tmat_value + i * tmat_width;
J
JiabinYang 已提交
156
    for (int j = 0; j < code_length; ++j) {
J
JiabinYang 已提交
157 158 159 160 161 162 163 164 165 166
      ops[code->calc_index(j)].emplace_back(tmat_row[j], input_value_row);
    }
  }

  for (auto& row : weight->rows()) {
    auto& op_in_row = ops[row];
    for (auto& pair : op_in_row) {
      auto& scale = pair.first;
      auto* input_row = pair.second;
      blas.AXPY(input_width, scale, input_row, weight_value);
J
JiabinYang 已提交
167
    }
J
JiabinYang 已提交
168
    weight_value += weight_width;
J
JiabinYang 已提交
169 170
  }
}
J
JiabinYang 已提交
171

Y
Yancey1989 已提交
172
template <typename T>
J
JiabinYang 已提交
173 174 175
void MatrixBitCodeFunctor<T>::MulGradError(const framework::Tensor& tmat,
                                           const framework::Tensor& weight,
                                           framework::Tensor* input) {
Y
Yancey1989 已提交
176
  size_t num_samples = tmat.dims()[0];
W
weixing02 已提交
177 178
  size_t tmat_width = tmat.dims()[1];
  size_t input_width = input->dims()[1];
Y
Yancey1989 已提交
179
  size_t weight_width = weight.dims()[1];
Y
Yancey1989 已提交
180 181
  auto tmat_value = tmat.data<T>();
  auto weight_value = weight.data<T>();
W
weixing02 已提交
182
  auto input_value = input->data<T>();
Y
Yancey1989 已提交
183

Y
Yancey1989 已提交
184
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
185
    auto code = code_table_->get_code(i);
186
    int code_length = code->get_length();
Y
Yancey1989 已提交
187
    for (int j = 0; j < code_length; ++j) {
188
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
189 190

      for (size_t k = 0; k < input_width; ++k) {
W
weixing02 已提交
191 192 193
        input_value[input_width * i + k] +=
            tmat_value[i * tmat_width + j] *
            weight_value[weight_width * index + k];
Y
Yancey1989 已提交
194 195 196 197 198 199
      }
    }
  }
}

template <typename T>
J
JiabinYang 已提交
200
void MatrixBitCodeFunctor<T>::Sub(framework::Tensor* tmat) {
W
weixing02 已提交
201 202
  size_t num_samples = tmat->dims()[0];
  size_t o_width = tmat->dims()[1];
Y
Yancey1989 已提交
203
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
204
    auto code = code_table_->get_code(i);
205
    int code_length = code->get_length();
Y
Yancey1989 已提交
206
    for (int j = 0; j < code_length; ++j) {
207
      if (code->calc_bit(j)) {
W
weixing02 已提交
208
        tmat->data<T>()[i * o_width + j] -= 1;
Y
Yancey1989 已提交
209 210 211
      }
    }
  }
Y
Yancey1989 已提交
212 213
}

Y
Yancey1989 已提交
214 215 216
template class MatrixBitCodeFunctor<float>;
template class MatrixBitCodeFunctor<double>;

Y
Yancey1989 已提交
217 218 219
}  // namespace math
}  // namespace operators
}  // namespace paddle