uniform_random_op.cu 3.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
Luo Tao 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Q
qijun 已提交
14 15
#include <thrust/random.h>
#include <thrust/transform.h>
Y
Yi Wang 已提交
16 17
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
Q
qijun 已提交
18 19 20 21 22 23 24 25

namespace paddle {
namespace operators {

template <typename T>
struct UniformGenerator {
  T min_, max_;
  unsigned int seed_;
26 27 28 29 30 31 32 33 34 35 36
  T diag_val_;
  unsigned int diag_num_;
  unsigned int diag_step_;
  __host__ __device__ UniformGenerator(T min, T max, int seed, int diag_num,
                                       int diag_step, T diag_val)
      : min_(min),
        max_(max),
        seed_(seed),
        diag_num_(diag_num),
        diag_step_(diag_step),
        diag_val_(diag_val) {}
Q
qijun 已提交
37 38 39 40 41 42

  __host__ __device__ T operator()(const unsigned int n) const {
    thrust::minstd_rand rng;
    rng.seed(seed_);
    thrust::uniform_real_distribution<T> dist(min_, max_);
    rng.discard(n);
43 44 45 46 47 48
    T out = dist(rng);
    unsigned int remainder = n % (diag_step_ + 1);
    if (remainder == 0 && diag_num_ > n / (diag_step_ + 1)) {
      out = diag_val_;
    }
    return out;
Q
qijun 已提交
49 50 51 52 53 54 55
  }
};

// It seems that Eigen::Tensor::random in GPU will SEGFAULT.
// Use std::random and thrust::random(thrust is a std library in CUDA) to
// implement uniform random.
template <typename T>
Y
Yu Yang 已提交
56
class GPUUniformRandomKernel : public framework::OpKernel<T> {
Q
qijun 已提交
57 58
 public:
  void Compute(const framework::ExecutionContext& context) const override {
Y
Yancey1989 已提交
59
    framework::Tensor* tensor = nullptr;
Y
fix ci  
Yancey1989 已提交
60
    auto out_var = context.OutputVar("Out");
Y
Yancey1989 已提交
61 62 63
    if (out_var->IsType<framework::LoDTensor>()) {
      tensor = out_var->GetMutable<framework::LoDTensor>();
    } else if (out_var->IsType<framework::SelectedRows>()) {
T
tangwei12 已提交
64
      auto shape = context.Attr<std::vector<int64_t>>("shape");
Y
Yancey1989 已提交
65 66 67
      tensor = out_var->GetMutable<framework::SelectedRows>()->mutable_value();
      tensor->Resize(framework::make_ddim(shape));
    } else {
Y
Yancey1989 已提交
68 69
      PADDLE_THROW(
          "uniform_random_op's output only"
T
tangwei12 已提交
70
          "supports SelectedRows and LoDTensor");
Y
Yancey1989 已提交
71
    }
Q
qijun 已提交
72
    T* data = tensor->mutable_data<T>(context.GetPlace());
Y
Pass CI  
Yu Yang 已提交
73
    unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
Q
qijun 已提交
74 75 76 77
    if (seed == 0) {
      std::random_device rd;
      seed = rd();
    }
Y
Yu Yang 已提交
78 79
    T min = static_cast<T>(context.Attr<float>("min"));
    T max = static_cast<T>(context.Attr<float>("max"));
80 81 82 83 84
    unsigned int diag_num =
        static_cast<unsigned int>(context.Attr<int>("diag_num"));
    unsigned int diag_step =
        static_cast<unsigned int>(context.Attr<int>("diag_step"));
    T diag_val = static_cast<T>(context.Attr<float>("diag_val"));
Q
qijun 已提交
85
    thrust::counting_iterator<unsigned int> index_sequence_begin(0);
86
    int64_t size = tensor->numel();
87 88 89 90
    thrust::transform(
        index_sequence_begin, index_sequence_begin + size,
        thrust::device_ptr<T>(data),
        UniformGenerator<T>(min, max, seed, diag_num, diag_step, diag_val));
Q
qijun 已提交
91 92 93 94 95
  }
};

}  // namespace operators
}  // namespace paddle
Y
Yu Yang 已提交
96

97 98 99 100 101 102
REGISTER_OP_CUDA_KERNEL(uniform_random,
                        paddle::operators::GPUUniformRandomKernel<float>,
                        paddle::operators::GPUUniformRandomKernel<double>);
REGISTER_OP_CUDA_KERNEL(uniform_random_batch_size_like,
                        paddle::operators::GPUUniformRandomKernel<float>,
                        paddle::operators::GPUUniformRandomKernel<double>);