randint_kernel.cu 2.4 KB
Newer Older
L
Leo Chen 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/randint_kernel.h"

#include <random>

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"

// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/memory/memcpy.h"

namespace phi {

template <typename T, typename Context>
28
void RandintRawKernel(const Context& dev_ctx,
L
Leo Chen 已提交
29 30 31 32 33 34 35 36
                      int low,
                      int high,
                      const ScalarArray& shape,
                      DataType dtype,
                      int seed,
                      DenseTensor* out) {
  DenseTensor tmp;
  tmp.Resize(phi::make_ddim(shape.GetData()));
37
  T* tmp_data = dev_ctx.template HostAlloc<T>(&tmp);
L
Leo Chen 已提交
38

39 40
  out->Resize(tmp.dims());
  T* data = dev_ctx.template Alloc<T>(out);
L
Leo Chen 已提交
41 42 43 44 45 46

  std::shared_ptr<std::mt19937_64> engine;
  if (seed) {
    engine = std::make_shared<std::mt19937_64>();
    engine->seed(seed);
  } else {
47
    engine = dev_ctx.GetHostGenerator()->GetCPUEngine();
L
Leo Chen 已提交
48
  }
49

L
Leo Chen 已提交
50
  std::uniform_int_distribution<T> dist(low, high - 1);
51 52
  auto numel = out->numel();
  for (int64_t i = 0; i < numel; ++i) {
L
Leo Chen 已提交
53 54 55 56 57 58 59 60
    tmp_data[i] = dist(*engine);
  }

  paddle::memory::Copy<phi::GPUPlace, phi::Place>(
      out->place(),
      data,
      tmp.place(),
      tmp_data,
61
      numel * paddle::experimental::SizeOf(out->dtype()),
L
Leo Chen 已提交
62 63 64 65
      0);
}

template <typename T, typename Context>
66
void RandintKernel(const Context& dev_ctx,
L
Leo Chen 已提交
67 68 69 70 71
                   int low,
                   int high,
                   const ScalarArray& shape,
                   DataType dtype,
                   DenseTensor* out) {
72
  RandintRawKernel<T>(dev_ctx, low, high, shape, dtype, 0, out);
L
Leo Chen 已提交
73 74 75 76 77 78 79 80 81
}

}  // namespace phi

PD_REGISTER_KERNEL(
    randint_raw, GPU, ALL_LAYOUT, phi::RandintRawKernel, int, int64_t) {}

PD_REGISTER_KERNEL(randint, GPU, ALL_LAYOUT, phi::RandintKernel, int, int64_t) {
}