random_crop_op.h 7.4 KB
Newer Older
Y
yuyang18 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

F
stash  
fengjiayi 已提交
17
#include <vector>
Y
yuyang18 已提交
18 19 20
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/for_range.h"
21
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
F
stash  
fengjiayi 已提交
22 23
#include <thrust/random.h>
#endif
Y
yuyang18 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38

namespace paddle {
namespace operators {

template <typename DeviceContext>
struct Random;

template <>
struct Random<platform::CPUDeviceContext> {
  using Engine = std::minstd_rand;

  template <typename T>
  using UniformIntDist = std::uniform_int_distribution<T>;
};

39
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
yuyang18 已提交
40 41 42 43 44 45 46
template <>
struct Random<platform::CUDADeviceContext> {
  using Engine = thrust::minstd_rand;

  template <typename T>
  using UniformIntDist = thrust::uniform_int_distribution<T>;
};
F
stash  
fengjiayi 已提交
47
#endif
Y
yuyang18 已提交
48 49

template <typename T>
F
stash  
fengjiayi 已提交
50 51 52 53 54 55 56 57 58 59 60 61
HOSTDEVICE inline void StridedMemcpy(const T* x, const size_t* x_dims, T* out,
                                     const size_t* out_dims, int i, int rank,
                                     size_t prod_x_remain,
                                     size_t prod_out_remain,
                                     const size_t* offsets) {
  size_t x_dim_i = x_dims[i];
  size_t out_dim_i = out_dims[i];
  size_t x_stride = prod_x_remain / x_dim_i;
  size_t out_stride = prod_out_remain / out_dim_i;
  size_t offset_i = offsets[i];

  if (i == rank - 1) {
Y
yuyang18 已提交
62
    x += offset_i;
F
stash  
fengjiayi 已提交
63
    for (size_t j = 0; j < out_dim_i; ++j) {
Y
yuyang18 已提交
64 65 66 67
      *out++ = *x++;
    }
  } else {
    x += offset_i * x_stride;
Y
yuyang18 已提交
68
    for (size_t j = 0; j < out_dim_i; ++j) {
F
stash  
fengjiayi 已提交
69 70
      StridedMemcpy<T>(x, x_dims, out, out_dims, i + 1, rank, x_stride,
                       out_stride, offsets);
Y
yuyang18 已提交
71 72 73 74 75 76 77 78 79 80
      x += x_stride;
      out += out_stride;
    }
  }
}

template <typename DeviceContext, typename T>
struct RandomCropFunctor {
  const T* x_;
  T* out_;
F
stash  
fengjiayi 已提交
81 82 83
  size_t x_dims_[9];
  size_t out_dims_[9];
  int num_batchsize_dims_;
Y
yuyang18 已提交
84 85 86
  int rank_;
  int64_t seed_;

F
stash  
fengjiayi 已提交
87 88 89 90 91 92 93
  size_t prod_batchsize_dims_;
  size_t prod_x_ins_dims_;
  size_t prod_out_ins_dims_;

  RandomCropFunctor(const T* x, T* out, const framework::DDim& x_dims,
                    const framework::DDim& out_dims, int num_batchsize_dims,
                    int64_t seed)
Y
yuyang18 已提交
94 95
      : x_(x),
        out_(out),
F
stash  
fengjiayi 已提交
96 97
        num_batchsize_dims_(num_batchsize_dims),
        rank_(x_dims.size()),
Y
yuyang18 已提交
98
        seed_(seed) {
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
    PADDLE_ENFORCE_EQ(
        x_dims.size(), out_dims.size(),
        platform::errors::InvalidArgument(
            "The dimensions of Input(X) must equal to be the dimensions"
            "of Output(Out), but received dimensions of Input(X) is [%d],"
            "received dimensions of Output(Out) is [%d].",
            x_dims.size(), out_dims.size()));
    PADDLE_ENFORCE_GT(
        rank_, num_batchsize_dims_,
        platform::errors::InvalidArgument(
            "The dimensions of Input(X) must be greater than the diff"
            "value of Input(X)'s dimensions minus Atrr(shape)'s dimensions,"
            "But received Input(X)'s dimensions is [%d], received value of"
            "Input(X)'s dimensions minus Attr(shape)'s dimensions is [%d].",
            rank_, num_batchsize_dims_));
F
stash  
fengjiayi 已提交
114 115 116
    prod_batchsize_dims_ = 1;
    prod_x_ins_dims_ = 1;
    prod_out_ins_dims_ = 1;
Y
yuyang18 已提交
117
    for (size_t i = 0; i < static_cast<size_t>(rank_); ++i) {
F
stash  
fengjiayi 已提交
118 119 120 121
      size_t x_dim_i = x_dims[i];
      size_t out_dim_i = out_dims[i];
      x_dims_[i] = x_dim_i;
      out_dims_[i] = out_dim_i;
Y
yuyang18 已提交
122
      if (i < static_cast<size_t>(num_batchsize_dims_)) {
123 124 125 126 127 128 129
        PADDLE_ENFORCE_EQ(
            x_dim_i, out_dim_i,
            platform::errors::InvalidArgument(
                "The first [%d] dimension value of Input(X) and Output(Out)"
                "must be equal, but received the [%d] dimension value of"
                "Input(X) and Output(Out) respectively are [%d] and [%d].",
                num_batchsize_dims_, i, x_dim_i, out_dim_i));
F
stash  
fengjiayi 已提交
130 131 132 133 134 135
        prod_batchsize_dims_ *= x_dim_i;
      } else {
        prod_x_ins_dims_ *= x_dim_i;
        prod_out_ins_dims_ *= out_dim_i;
      }
    }
Y
yuyang18 已提交
136 137
  }

F
stash  
fengjiayi 已提交
138
  HOSTDEVICE void operator()(size_t ins_idx) {
Y
yuyang18 已提交
139
    typename Random<DeviceContext>::Engine engine(seed_);
F
stash  
fengjiayi 已提交
140
    engine.discard(ins_idx * (rank_ - num_batchsize_dims_));
141
    size_t offsets[9] = {};
F
stash  
fengjiayi 已提交
142
    for (int i = num_batchsize_dims_; i < rank_; ++i) {
Y
yuyang18 已提交
143
      typename Random<DeviceContext>::template UniformIntDist<size_t> dist(
F
stash  
fengjiayi 已提交
144
          0, x_dims_[i] - out_dims_[i]);
F
fengjiayi 已提交
145
      offsets[i - num_batchsize_dims_] = dist(engine);
Y
yuyang18 已提交
146
    }
F
stash  
fengjiayi 已提交
147 148 149 150 151 152 153 154

    const T* x = x_ + ins_idx * prod_x_ins_dims_;
    T* out = out_ + ins_idx * prod_out_ins_dims_;

    StridedMemcpy<T>(x, x_dims_ + num_batchsize_dims_, out,
                     out_dims_ + num_batchsize_dims_, 0,
                     rank_ - num_batchsize_dims_, prod_x_ins_dims_,
                     prod_out_ins_dims_, offsets);
Y
yuyang18 已提交
155 156 157 158 159 160
  }
};

template <typename DeviceContext, typename T>
class RandomCropKernel : public framework::OpKernel<T> {
 public:
F
stash  
fengjiayi 已提交
161
  virtual void Compute(const framework::ExecutionContext& ctx) const {
Y
yuyang18 已提交
162
    int64_t seed = 0;
163 164
    auto& seed_tensor = GET_DATA_SAFELY(ctx.Input<framework::LoDTensor>("Seed"),
                                        "Input", "Seed", "RandomCrop");
F
fengjiayi 已提交
165 166
    if (seed_tensor.IsInitialized()) {
      if (platform::is_cpu_place(seed_tensor.place())) {
167
        seed = *seed_tensor.template data<int64_t>();
F
fengjiayi 已提交
168 169 170 171 172 173 174
      } else {
        LOG(WARNING) << "It is slow to place seed in GPU memory. Please verify "
                        "your program";
        framework::LoDTensor cpu_seed;
        framework::TensorCopySync(seed_tensor, platform::CPUPlace(), &cpu_seed);
        seed = *cpu_seed.data<int64_t>();
      }
Y
yuyang18 已提交
175
    } else {
M
minqiyang 已提交
176 177
      VLOG(5) << "WARNING: The input 'Seed' is not initialized, use attribute "
                 "'startup_seed' instead.";
F
fengjiayi 已提交
178
      seed = ctx.Attr<int>("startup_seed");
Y
yuyang18 已提交
179
    }
F
stash  
fengjiayi 已提交
180
    auto shape = ctx.Attr<std::vector<int>>("shape");
181 182 183 184
    auto& x = GET_DATA_SAFELY(ctx.Input<framework::LoDTensor>("X"), "Input",
                              "X", "RandomCrop");
    auto& out = GET_DATA_SAFELY(ctx.Output<framework::LoDTensor>("Out"),
                                "Output", "Out", "RandomCrop");
F
stash  
fengjiayi 已提交
185 186 187

    int num_batchsize_dims = x.dims().size() - shape.size();
    RandomCropFunctor<DeviceContext, T> functor(
188 189
        x.template data<T>(), out.template mutable_data<T>(ctx.GetPlace()),
        x.dims(), out.dims(), num_batchsize_dims, seed);
Y
yuyang18 已提交
190
    platform::ForRange<DeviceContext> for_range(
F
stash  
fengjiayi 已提交
191 192
        ctx.template device_context<DeviceContext>(),
        functor.prod_batchsize_dims_);
Y
yuyang18 已提交
193 194 195 196

    for_range(functor);

    Random<platform::CPUDeviceContext>::Engine engine(seed);
F
stash  
fengjiayi 已提交
197 198 199
    engine.discard(functor.prod_batchsize_dims_ *
                   (functor.rank_ - functor.num_batchsize_dims_));
    *ctx.Output<framework::LoDTensor>("SeedOut")->mutable_data<int64_t>(
F
fengjiayi 已提交
200
        framework::make_ddim({1}), platform::CPUPlace()) = engine();
Y
yuyang18 已提交
201 202 203
  }
};

F
stash  
fengjiayi 已提交
204 205
// TODO(fengjiayi): Backward of random crop op

Y
yuyang18 已提交
206 207
}  // namespace operators
}  // namespace paddle