math_function_impl.h 3.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#pragma once
16 17 18 19 20 21 22
#include "paddle/framework/data_type.h"
#include "paddle/operators/math/math_function.h"

namespace paddle {
namespace operators {
namespace math {

Q
QI JUN 已提交
23 24 25 26
template <typename DeviceContext, typename T>
void SetConstant<DeviceContext, T>::operator()(const DeviceContext& context,
                                               framework::Tensor* tensor,
                                               T num) {
27
  auto t = framework::EigenVector<T>::Flatten(*tensor);
Q
QI JUN 已提交
28
  t.device(*context.eigen_device()) = t.constant(static_cast<T>(num));
29 30
}

Q
QI JUN 已提交
31 32 33
template <typename DeviceContext, typename T, int Rank>
void Transpose<DeviceContext, T, Rank>::operator()(
    const DeviceContext& context, const framework::Tensor& in,
34 35 36 37 38 39 40 41 42 43
    framework::Tensor* out, const std::vector<int>& axis) {
  Eigen::array<int, Rank> permute;
  for (int i = 0; i < Rank; i++) {
    permute[i] = axis[i];
  }
  auto in_dim = in.dims();
  auto out_dim = out->dims();

  auto eigen_in = framework::EigenTensor<T, Rank>::From(in);
  auto eigen_out = framework::EigenTensor<T, Rank>::From(*out);
Q
QI JUN 已提交
44
  auto* dev = context.eigen_device();
45 46
  eigen_out.device(*dev) = eigen_in.shuffle(permute);
}
47

Q
QI JUN 已提交
48 49 50
template <typename DeviceContext, typename T>
void ColwiseSum<DeviceContext, T>::operator()(const DeviceContext& context,
                                              const framework::Tensor& input,
Y
Yu Yang 已提交
51
                                              framework::Tensor* out) {
52 53
  auto in_dims = input.dims();
  auto size = input.numel() / in_dims[0];
Y
Yu Yang 已提交
54
  PADDLE_ENFORCE_EQ(out->numel(), size);
55 56

  auto in = framework::EigenMatrix<T>::From(input);
Y
Yu Yang 已提交
57 58 59
  auto vec = framework::EigenVector<T>::Flatten(*out);

  vec.device(*context.eigen_device()) = in.sum(Eigen::array<int, 1>({{0}}));
60
}
61

Y
Yu Yang 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
// Specialize for CPU, since Eigen implement a general reduce. However,
// colwise-sum can be easily implemented. General reduce has a huge overhead in
// CPU
template <typename T>
class ColwiseSum<platform::CPUDeviceContext, T> {
 public:
  void operator()(const platform::CPUDeviceContext& context,
                  const framework::Tensor& input, framework::Tensor* out) {
    auto& in_dims = input.dims();
    auto height = in_dims[0];
    auto size = in_dims[1];
    PADDLE_ENFORCE_EQ(out->numel(), size);

    T* out_buf = out->mutable_data<T>(out->place());
    const T* in_buf = input.data<T>();

Q
qiaolongfei 已提交
78 79
    for (size_t i = 0; i < static_cast<size_t>(height); ++i) {
      for (size_t j = 0; j < static_cast<size_t>(size); ++j) {
Y
Yu Yang 已提交
80 81 82 83 84 85 86 87 88 89
        if (i == 0) {
          out_buf[j] = in_buf[i * size + j];
        } else {
          out_buf[j] += in_buf[i * size + j];
        }
      }
    }
  }
};

90 91 92
}  // namespace math
}  // namespace operators
}  // namespace paddle