matrix_bit_code.cc 8.2 KB
Newer Older
Y
Yancey1989 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

W
weixing02 已提交
15 16
#include "paddle/fluid/operators/math/matrix_bit_code.h"
#include <iostream>
Y
Yancey1989 已提交
17 18 19 20
namespace paddle {
namespace operators {
namespace math {

Y
Yancey1989 已提交
21
template <typename T>
J
JiabinYang 已提交
22 23
void MatrixBitCodeFunctor<T>::Add(const framework::Tensor& vec,
                                  framework::Tensor* tmat) {
W
weixing02 已提交
24 25
  size_t batch_size = tmat->dims()[0];
  size_t width = tmat->dims()[1];
Y
Yancey1989 已提交
26
  for (size_t i = 0; i < batch_size; ++i) {
J
JiabinYang 已提交
27
    auto code = code_table_->get_code(i);
28
    int code_length = code->get_length();
Y
Yancey1989 已提交
29
    for (int j = 0; j < code_length; ++j) {
30
      size_t index = code->calc_index(j);
W
weixing02 已提交
31
      tmat->data<T>()[i * width + j] += vec.data<T>()[index];
Y
Yancey1989 已提交
32 33 34 35
    }
  }
}

Y
Yancey1989 已提交
36
template <typename T>
J
JiabinYang 已提交
37 38
void MatrixBitCodeFunctor<T>::AddGrad(const framework::Tensor& tmat,
                                      framework::Tensor* vec) {
Y
Yancey1989 已提交
39 40 41
  size_t batch_size = tmat.dims()[0];
  size_t width = tmat.dims()[1];
  for (size_t i = 0; i < batch_size; ++i) {
J
JiabinYang 已提交
42
    auto code = code_table_->get_code(i);
43
    int code_length = code->get_length();
Y
Yancey1989 已提交
44
    for (int j = 0; j < code_length; ++j) {
45
      size_t index = code->calc_index(j);
W
weixing02 已提交
46
      vec->data<T>()[index] += tmat.data<T>()[i * width + j];
Y
Yancey1989 已提交
47 48
    }
  }
Y
Yancey1989 已提交
49 50
}

J
JiabinYang 已提交
51
template <typename T>
J
JiabinYang 已提交
52
void MatrixBitCodeFunctor<T>::AddGrad(const framework::Tensor& tmat,
J
JiabinYang 已提交
53 54 55 56
                                      framework::SelectedRows* vec) {
  size_t batch_size = tmat.dims()[0];
  size_t width = tmat.dims()[1];
  for (size_t i = 0; i < batch_size; ++i) {
J
JiabinYang 已提交
57
    auto code = code_table_->get_code(i);
J
JiabinYang 已提交
58 59 60
    int code_length = code->get_length();
    for (int j = 0; j < code_length; ++j) {
      size_t index = code->calc_index(j);
J
JiabinYang 已提交
61
      int64_t row_index = vec->GetIndexFromId(static_cast<int64_t>(index));
J
JiabinYang 已提交
62 63 64 65 66 67
      vec->mutable_value()->data<T>()[row_index] +=
          tmat.data<T>()[i * width + j];
    }
  }
}

Y
Yancey1989 已提交
68
template <typename T>
J
JiabinYang 已提交
69 70
void MatrixBitCodeFunctor<T>::Sum(const framework::Tensor& tmat,
                                  framework::Tensor* sum, T scale_sum) {
Y
Yancey1989 已提交
71 72 73
  size_t num_samples = tmat.dims()[0];
  size_t o_width = tmat.dims()[1];
  for (size_t i = 0; i < num_samples; ++i) {
Y
Yancey1989 已提交
74
    T sm = static_cast<T>(0.0);
J
JiabinYang 已提交
75
    auto code = code_table_->get_code(i);
76
    int code_length = code->get_length();
Y
Yancey1989 已提交
77
    for (int j = 0; j < code_length; ++j) {
78
      if (code->calc_bit(j)) {
79 80
        // calc_bit starts from right most bit, while data in tmat[i] is in the
        // reverse order.
Y
Yancey1989 已提交
81 82 83
        sm += tmat.data<T>()[i * o_width + j];
      }
    }
W
weixing02 已提交
84
    sum->data<T>()[i] = scale_sum * sm;
Y
Yancey1989 已提交
85 86
  }
}
Y
Yancey1989 已提交
87

Y
Yancey1989 已提交
88
template <typename T>
J
JiabinYang 已提交
89 90 91
void MatrixBitCodeFunctor<T>::Mul(framework::Tensor* tmat,
                                  const framework::Tensor& weight,
                                  const framework::Tensor& input) {
J
JiabinYang 已提交
92 93
  auto blas =
      GetBlas<platform::CPUDeviceContext, T>(platform::CPUDeviceContext());
W
weixing02 已提交
94 95
  size_t num_samples = tmat->dims()[0];
  size_t tmat_width = tmat->dims()[1];
Y
Yancey1989 已提交
96
  size_t input_width = input.dims()[1];
W
weixing02 已提交
97 98
  size_t weight_width = weight.dims()[1];
  auto tmat_value = tmat->data<T>();
Y
Yancey1989 已提交
99 100
  auto weight_value = weight.data<T>();
  auto input_value = input.data<T>();
Y
Yancey1989 已提交
101
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
102
    auto code = code_table_->get_code(i);
103
    int code_length = code->get_length();
J
JiabinYang 已提交
104
    const T* input_row = input_value + input_width * i;
Y
Yancey1989 已提交
105
    for (int j = 0; j < code_length; ++j) {
106
      size_t index = code->calc_index(j);
J
JiabinYang 已提交
107
      const T* weight_row = weight_value + weight_width * index;
Y
Yancey1989 已提交
108
      T sum = static_cast<T>(0.0);
J
JiabinYang 已提交
109
      sum = blas.DOT(input_width, weight_row, input_row);
Y
Yancey1989 已提交
110
      tmat_value[i * tmat_width + j] += sum;
Y
Yancey1989 已提交
111 112 113 114 115
    }
  }
}

template <typename T>
J
JiabinYang 已提交
116 117 118
void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::Tensor& tmat,
                                            framework::Tensor* weight,
                                            const framework::Tensor& input) {
J
JiabinYang 已提交
119 120
  auto blas =
      GetBlas<platform::CPUDeviceContext, T>(platform::CPUDeviceContext());
Y
Yancey1989 已提交
121 122
  size_t num_samples = tmat.dims()[0];
  size_t input_width = input.dims()[1];
W
weixing02 已提交
123 124
  size_t tmat_width = tmat.dims()[1];
  size_t weight_width = weight->dims()[1];
Y
Yancey1989 已提交
125
  auto tmat_value = tmat.data<T>();
W
weixing02 已提交
126
  auto weight_value = weight->data<T>();
Y
Yancey1989 已提交
127
  auto input_value = input.data<T>();
J
JiabinYang 已提交
128 129 130

  std::unordered_map<int, std::vector<std::pair<T, const T*>>> ops;

Y
Yancey1989 已提交
131
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
132
    auto code = code_table_->get_code(i);
133
    int code_length = code->get_length();
J
JiabinYang 已提交
134 135
    const T* input_value_row = input_value + input_width * i;
    const T* tmat_row = tmat_value + i * tmat_width;
Y
Yancey1989 已提交
136
    for (int j = 0; j < code_length; ++j) {
J
JiabinYang 已提交
137 138 139 140 141 142 143 144 145 146
      ops[code->calc_index(j)].emplace_back(tmat_row[j], input_value_row);
    }
  }
  for (auto& op : ops) {
    auto& op_in_row = op.second;
    for (auto& pair : op_in_row) {
      auto& scale = pair.first;
      auto* input_row = pair.second;
      T* weight_row = weight_value + op.first * weight_width;
      blas.AXPY(input_width, scale, input_row, weight_row);
Y
Yancey1989 已提交
147
    }
Y
Yancey1989 已提交
148
  }
Y
Yancey1989 已提交
149
}
Y
Yancey1989 已提交
150

J
JiabinYang 已提交
151
template <typename T>
J
JiabinYang 已提交
152
void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::Tensor& tmat,
J
JiabinYang 已提交
153
                                            framework::SelectedRows* weight,
J
JiabinYang 已提交
154
                                            const framework::Tensor& input) {
J
JiabinYang 已提交
155 156
  auto blas =
      GetBlas<platform::CPUDeviceContext, T>(platform::CPUDeviceContext());
J
JiabinYang 已提交
157 158 159 160 161 162 163
  size_t num_samples = tmat.dims()[0];
  size_t input_width = input.dims()[1];
  size_t tmat_width = tmat.dims()[1];
  size_t weight_width = weight->value().dims()[1];
  auto tmat_value = tmat.data<T>();
  auto weight_value = weight->mutable_value()->data<T>();
  auto input_value = input.data<T>();
J
JiabinYang 已提交
164 165 166 167

  std::unordered_map<int, std::vector<std::pair<T, const T*>>> ops;
  ops.reserve(weight->rows().size());

J
JiabinYang 已提交
168
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
169
    auto code = code_table_->get_code(i);
J
JiabinYang 已提交
170
    int code_length = code->get_length();
J
JiabinYang 已提交
171 172
    const T* input_value_row = input_value + input_width * i;
    const T* tmat_row = tmat_value + i * tmat_width;
J
JiabinYang 已提交
173
    for (int j = 0; j < code_length; ++j) {
J
JiabinYang 已提交
174 175 176 177 178 179 180 181 182 183
      ops[code->calc_index(j)].emplace_back(tmat_row[j], input_value_row);
    }
  }

  for (auto& row : weight->rows()) {
    auto& op_in_row = ops[row];
    for (auto& pair : op_in_row) {
      auto& scale = pair.first;
      auto* input_row = pair.second;
      blas.AXPY(input_width, scale, input_row, weight_value);
J
JiabinYang 已提交
184
    }
J
JiabinYang 已提交
185
    weight_value += weight_width;
J
JiabinYang 已提交
186 187
  }
}
J
JiabinYang 已提交
188

Y
Yancey1989 已提交
189
template <typename T>
J
JiabinYang 已提交
190 191 192
void MatrixBitCodeFunctor<T>::MulGradError(const framework::Tensor& tmat,
                                           const framework::Tensor& weight,
                                           framework::Tensor* input) {
Y
Yancey1989 已提交
193
  size_t num_samples = tmat.dims()[0];
W
weixing02 已提交
194 195
  size_t tmat_width = tmat.dims()[1];
  size_t input_width = input->dims()[1];
Y
Yancey1989 已提交
196
  size_t weight_width = weight.dims()[1];
Y
Yancey1989 已提交
197 198
  auto tmat_value = tmat.data<T>();
  auto weight_value = weight.data<T>();
W
weixing02 已提交
199
  auto input_value = input->data<T>();
Y
Yancey1989 已提交
200

Y
Yancey1989 已提交
201
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
202
    auto code = code_table_->get_code(i);
203
    int code_length = code->get_length();
Y
Yancey1989 已提交
204
    for (int j = 0; j < code_length; ++j) {
205
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
206 207

      for (size_t k = 0; k < input_width; ++k) {
W
weixing02 已提交
208 209 210
        input_value[input_width * i + k] +=
            tmat_value[i * tmat_width + j] *
            weight_value[weight_width * index + k];
Y
Yancey1989 已提交
211 212 213 214 215 216
      }
    }
  }
}

template <typename T>
J
JiabinYang 已提交
217
void MatrixBitCodeFunctor<T>::Sub(framework::Tensor* tmat) {
W
weixing02 已提交
218 219
  size_t num_samples = tmat->dims()[0];
  size_t o_width = tmat->dims()[1];
Y
Yancey1989 已提交
220
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
221
    auto code = code_table_->get_code(i);
222
    int code_length = code->get_length();
Y
Yancey1989 已提交
223
    for (int j = 0; j < code_length; ++j) {
224
      if (code->calc_bit(j)) {
W
weixing02 已提交
225
        tmat->data<T>()[i * o_width + j] -= 1;
Y
Yancey1989 已提交
226 227 228
      }
    }
  }
Y
Yancey1989 已提交
229 230
}

Y
Yancey1989 已提交
231 232 233
template class MatrixBitCodeFunctor<float>;
template class MatrixBitCodeFunctor<double>;

Y
Yancey1989 已提交
234 235 236
}  // namespace math
}  // namespace operators
}  // namespace paddle