matrix_bit_code.cc 7.4 KB
Newer Older
Y
Yancey1989 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

W
weixing02 已提交
15 16
#include "paddle/fluid/operators/math/matrix_bit_code.h"
#include <iostream>
Y
Yancey1989 已提交
17 18 19 20
namespace paddle {
namespace operators {
namespace math {

Y
Yancey1989 已提交
21
template <typename T>
J
JiabinYang 已提交
22 23
void MatrixBitCodeFunctor<T>::Add(const framework::Tensor& vec,
                                  framework::Tensor* tmat) {
W
weixing02 已提交
24 25
  size_t batch_size = tmat->dims()[0];
  size_t width = tmat->dims()[1];
Y
Yancey1989 已提交
26
  for (size_t i = 0; i < batch_size; ++i) {
J
JiabinYang 已提交
27
    auto code = code_table_->get_code(i);
28
    int code_length = code->get_length();
Y
Yancey1989 已提交
29
    for (int j = 0; j < code_length; ++j) {
30
      size_t index = code->calc_index(j);
W
weixing02 已提交
31
      tmat->data<T>()[i * width + j] += vec.data<T>()[index];
Y
Yancey1989 已提交
32 33 34 35
    }
  }
}

Y
Yancey1989 已提交
36
template <typename T>
J
JiabinYang 已提交
37 38
void MatrixBitCodeFunctor<T>::AddGrad(const framework::Tensor& tmat,
                                      framework::Tensor* vec) {
Y
Yancey1989 已提交
39 40 41
  size_t batch_size = tmat.dims()[0];
  size_t width = tmat.dims()[1];
  for (size_t i = 0; i < batch_size; ++i) {
J
JiabinYang 已提交
42
    auto code = code_table_->get_code(i);
43
    int code_length = code->get_length();
Y
Yancey1989 已提交
44
    for (int j = 0; j < code_length; ++j) {
45
      size_t index = code->calc_index(j);
W
weixing02 已提交
46
      vec->data<T>()[index] += tmat.data<T>()[i * width + j];
Y
Yancey1989 已提交
47 48
    }
  }
Y
Yancey1989 已提交
49 50
}

J
JiabinYang 已提交
51
template <typename T>
J
JiabinYang 已提交
52
void MatrixBitCodeFunctor<T>::AddGrad(const framework::Tensor& tmat,
J
JiabinYang 已提交
53 54 55 56
                                      framework::SelectedRows* vec) {
  size_t batch_size = tmat.dims()[0];
  size_t width = tmat.dims()[1];
  for (size_t i = 0; i < batch_size; ++i) {
J
JiabinYang 已提交
57
    auto code = code_table_->get_code(i);
J
JiabinYang 已提交
58 59 60
    int code_length = code->get_length();
    for (int j = 0; j < code_length; ++j) {
      size_t index = code->calc_index(j);
J
JiabinYang 已提交
61
      int64_t row_index = vec->GetIndexFromId(static_cast<int64_t>(index));
J
JiabinYang 已提交
62 63 64 65 66 67
      vec->mutable_value()->data<T>()[row_index] +=
          tmat.data<T>()[i * width + j];
    }
  }
}

Y
Yancey1989 已提交
68
template <typename T>
J
JiabinYang 已提交
69 70
void MatrixBitCodeFunctor<T>::Sum(const framework::Tensor& tmat,
                                  framework::Tensor* sum, T scale_sum) {
Y
Yancey1989 已提交
71 72 73
  size_t num_samples = tmat.dims()[0];
  size_t o_width = tmat.dims()[1];
  for (size_t i = 0; i < num_samples; ++i) {
Y
Yancey1989 已提交
74
    T sm = static_cast<T>(0.0);
J
JiabinYang 已提交
75
    auto code = code_table_->get_code(i);
76
    int code_length = code->get_length();
Y
Yancey1989 已提交
77
    for (int j = 0; j < code_length; ++j) {
78
      if (code->calc_bit(j)) {
79 80
        // calc_bit starts from right most bit, while data in tmat[i] is in the
        // reverse order.
Y
Yancey1989 已提交
81 82 83
        sm += tmat.data<T>()[i * o_width + j];
      }
    }
W
weixing02 已提交
84
    sum->data<T>()[i] = scale_sum * sm;
Y
Yancey1989 已提交
85 86
  }
}
Y
Yancey1989 已提交
87

Y
Yancey1989 已提交
88
template <typename T>
J
JiabinYang 已提交
89 90 91
void MatrixBitCodeFunctor<T>::Mul(framework::Tensor* tmat,
                                  const framework::Tensor& weight,
                                  const framework::Tensor& input) {
W
weixing02 已提交
92 93
  size_t num_samples = tmat->dims()[0];
  size_t tmat_width = tmat->dims()[1];
Y
Yancey1989 已提交
94
  size_t input_width = input.dims()[1];
W
weixing02 已提交
95 96
  size_t weight_width = weight.dims()[1];
  auto tmat_value = tmat->data<T>();
Y
Yancey1989 已提交
97 98
  auto weight_value = weight.data<T>();
  auto input_value = input.data<T>();
Y
Yancey1989 已提交
99
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
100
    auto code = code_table_->get_code(i);
101
    int code_length = code->get_length();
Y
Yancey1989 已提交
102
    for (int j = 0; j < code_length; ++j) {
103
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
104 105
      T sum = static_cast<T>(0.0);
      for (size_t k = 0; k < input_width; ++k) {
Y
Yancey1989 已提交
106 107
        sum += weight_value[weight_width * index + k] *
               input_value[input_width * i + k];
Y
Yancey1989 已提交
108
      }
Y
Yancey1989 已提交
109
      tmat_value[i * tmat_width + j] += sum;
Y
Yancey1989 已提交
110 111 112 113 114
    }
  }
}

template <typename T>
J
JiabinYang 已提交
115 116 117
void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::Tensor& tmat,
                                            framework::Tensor* weight,
                                            const framework::Tensor& input) {
Y
Yancey1989 已提交
118 119
  size_t num_samples = tmat.dims()[0];
  size_t input_width = input.dims()[1];
W
weixing02 已提交
120 121
  size_t tmat_width = tmat.dims()[1];
  size_t weight_width = weight->dims()[1];
Y
Yancey1989 已提交
122
  auto tmat_value = tmat.data<T>();
W
weixing02 已提交
123
  auto weight_value = weight->data<T>();
Y
Yancey1989 已提交
124
  auto input_value = input.data<T>();
Y
Yancey1989 已提交
125
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
126
    auto code = code_table_->get_code(i);
127
    int code_length = code->get_length();
Y
Yancey1989 已提交
128
    for (int j = 0; j < code_length; ++j) {
129
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
130

Y
Yancey1989 已提交
131
      for (size_t k = 0; k < input_width; ++k) {
W
weixing02 已提交
132 133
        weight_value[weight_width * index + k] +=
            tmat_value[i * tmat_width + j] * input_value[input_width * i + k];
Y
Yancey1989 已提交
134
      }
Y
Yancey1989 已提交
135
    }
Y
Yancey1989 已提交
136
  }
Y
Yancey1989 已提交
137
}
Y
Yancey1989 已提交
138

J
JiabinYang 已提交
139
template <typename T>
J
JiabinYang 已提交
140
void MatrixBitCodeFunctor<T>::MulGradWeight(const framework::Tensor& tmat,
J
JiabinYang 已提交
141
                                            framework::SelectedRows* weight,
J
JiabinYang 已提交
142
                                            const framework::Tensor& input) {
J
JiabinYang 已提交
143 144 145 146 147 148 149 150
  size_t num_samples = tmat.dims()[0];
  size_t input_width = input.dims()[1];
  size_t tmat_width = tmat.dims()[1];
  size_t weight_width = weight->value().dims()[1];
  auto tmat_value = tmat.data<T>();
  auto weight_value = weight->mutable_value()->data<T>();
  auto input_value = input.data<T>();
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
151
    auto code = code_table_->get_code(i);
J
JiabinYang 已提交
152 153 154 155
    int code_length = code->get_length();
    for (int j = 0; j < code_length; ++j) {
      size_t index = code->calc_index(j);
      for (size_t k = 0; k < input_width; ++k) {
J
JiabinYang 已提交
156
        int64_t row_index = weight->GetIndexFromId(static_cast<int64_t>(index));
J
JiabinYang 已提交
157 158 159 160 161 162
        weight_value[row_index * weight_width + k] +=
            tmat_value[i * tmat_width + j] * input_value[input_width * i + k];
      }
    }
  }
}
J
JiabinYang 已提交
163

Y
Yancey1989 已提交
164
template <typename T>
J
JiabinYang 已提交
165 166 167
void MatrixBitCodeFunctor<T>::MulGradError(const framework::Tensor& tmat,
                                           const framework::Tensor& weight,
                                           framework::Tensor* input) {
Y
Yancey1989 已提交
168
  size_t num_samples = tmat.dims()[0];
W
weixing02 已提交
169 170
  size_t tmat_width = tmat.dims()[1];
  size_t input_width = input->dims()[1];
Y
Yancey1989 已提交
171
  size_t weight_width = weight.dims()[1];
Y
Yancey1989 已提交
172 173
  auto tmat_value = tmat.data<T>();
  auto weight_value = weight.data<T>();
W
weixing02 已提交
174
  auto input_value = input->data<T>();
Y
Yancey1989 已提交
175

Y
Yancey1989 已提交
176
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
177
    auto code = code_table_->get_code(i);
178
    int code_length = code->get_length();
Y
Yancey1989 已提交
179
    for (int j = 0; j < code_length; ++j) {
180
      size_t index = code->calc_index(j);
Y
Yancey1989 已提交
181 182

      for (size_t k = 0; k < input_width; ++k) {
W
weixing02 已提交
183 184 185
        input_value[input_width * i + k] +=
            tmat_value[i * tmat_width + j] *
            weight_value[weight_width * index + k];
Y
Yancey1989 已提交
186 187 188 189 190 191
      }
    }
  }
}

template <typename T>
J
JiabinYang 已提交
192
void MatrixBitCodeFunctor<T>::Sub(framework::Tensor* tmat) {
W
weixing02 已提交
193 194
  size_t num_samples = tmat->dims()[0];
  size_t o_width = tmat->dims()[1];
Y
Yancey1989 已提交
195
  for (size_t i = 0; i < num_samples; ++i) {
J
JiabinYang 已提交
196
    auto code = code_table_->get_code(i);
197
    int code_length = code->get_length();
Y
Yancey1989 已提交
198
    for (int j = 0; j < code_length; ++j) {
199
      if (code->calc_bit(j)) {
W
weixing02 已提交
200
        tmat->data<T>()[i * o_width + j] -= 1;
Y
Yancey1989 已提交
201 202 203
      }
    }
  }
Y
Yancey1989 已提交
204 205
}

Y
Yancey1989 已提交
206 207 208
template class MatrixBitCodeFunctor<float>;
template class MatrixBitCodeFunctor<double>;

Y
Yancey1989 已提交
209 210 211
}  // namespace math
}  // namespace operators
}  // namespace paddle