You need to sign in or sign up before continuing.
uniform_random_op.cc 11.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yu Yang 已提交
2

L
Luo Tao 已提交
3 4 5 6 7 8 9 10 11 12 13
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14
#include "paddle/fluid/operators/uniform_random_op.h"
L
Leo Chen 已提交
15

16
#include <string>
L
Leo Chen 已提交
17

Y
yaoxuefeng 已提交
18
#include "paddle/fluid/framework/generator.h"
Y
Yi Wang 已提交
19 20
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
Y
yaoxuefeng 已提交
21

Y
Yu Yang 已提交
22 23
namespace paddle {
namespace operators {
Y
Yu Yang 已提交
24

Q
qijun 已提交
25 26 27 28
// It seems that Eigen::Tensor::random in GPU will SEGFAULT.
// Use std::random and thrust::random(thrust is a std library in CUDA) to
// implement uniform random.
template <typename T>
Y
Yu Yang 已提交
29
class CPUUniformRandomKernel : public framework::OpKernel<T> {
Q
qijun 已提交
30
 public:
C
chengduo 已提交
31 32
  void Compute(const framework::ExecutionContext &ctx) const override {
    framework::Tensor *tensor = nullptr;
Y
Yancey1989 已提交
33
    auto out_var = ctx.OutputVar("Out");
34 35 36 37 38 39
    std::vector<int64_t> new_shape;
    auto list_new_shape_tensor =
        ctx.MultiInput<framework::Tensor>("ShapeTensorList");
    if (list_new_shape_tensor.size() > 0 || ctx.HasInput("ShapeTensor")) {
      if (ctx.HasInput("ShapeTensor")) {
        auto *shape_tensor = ctx.Input<framework::Tensor>("ShapeTensor");
40
        new_shape = GetNewDataFromShapeTensor(shape_tensor);
41
      } else if (list_new_shape_tensor.size() > 0) {
42
        new_shape = GetNewDataFromShapeTensorList(list_new_shape_tensor);
43 44 45 46
      }
    }

    if (out_var->IsType<framework::SelectedRows>()) {
C
chengduo 已提交
47
      auto *selected_rows = out_var->GetMutable<framework::SelectedRows>();
48
      tensor = selected_rows->mutable_value();
49 50
      auto shape = ctx.Attr<std::vector<int64_t>>("shape");
      if (!new_shape.empty()) shape = new_shape;
Y
Yancey1989 已提交
51
      tensor->Resize(framework::make_ddim(shape));
52
      selected_rows->mutable_rows()->reserve(shape[0]);
53 54 55
    } else if (out_var->IsType<framework::LoDTensor>()) {
      tensor = out_var->GetMutable<framework::LoDTensor>();
      if (!new_shape.empty()) tensor->Resize(framework::make_ddim(new_shape));
Y
Yancey1989 已提交
56
    } else {
57 58 59 60 61
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Expected type of Output(out) in uniform_random_op must be Tensor, "
          "SelectedRows. But got "
          "unsupport type: %s.",
          framework::ToTypeName(out_var->Type())));
Y
Yancey1989 已提交
62
    }
C
chengduo 已提交
63
    T *data = tensor->mutable_data<T>(ctx.GetPlace());
Y
yaoxuefeng 已提交
64 65

    int64_t size = tensor->numel();
Q
qijun 已提交
66
    std::uniform_real_distribution<T> dist(
Q
Qiao Longfei 已提交
67 68
        static_cast<T>(ctx.Attr<float>("min")),
        static_cast<T>(ctx.Attr<float>("max")));
L
Leo Chen 已提交
69 70 71 72 73
    unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
    auto engine = framework::GetCPURandomEngine(seed);

    for (int64_t i = 0; i < size; ++i) {
      data[i] = dist(*engine);
Q
qijun 已提交
74
    }
Y
yaoxuefeng 已提交
75

76 77 78 79 80 81
    unsigned int diag_num =
        static_cast<unsigned int>(ctx.Attr<int>("diag_num"));
    unsigned int diag_step =
        static_cast<unsigned int>(ctx.Attr<int>("diag_step"));
    auto diag_val = static_cast<T>(ctx.Attr<float>("diag_val"));
    if (diag_num > 0) {
82 83 84 85 86 87 88
      PADDLE_ENFORCE_GT(
          size, (diag_num - 1) * (diag_step + 1),
          platform::errors::InvalidArgument(
              "ShapeInvalid: the diagonal's elements is equal (num-1) "
              "* (step-1) with num %d, step %d,"
              "It should be smaller than %d, but received %d",
              diag_num, diag_step, (diag_num - 1) * (diag_step + 1), size));
89 90 91 92 93
      for (int64_t i = 0; i < diag_num; ++i) {
        int64_t pos = i * diag_step + i;
        data[pos] = diag_val;
      }
    }
Q
qijun 已提交
94 95 96
  }
};

Y
Yu Yang 已提交
97
class UniformRandomOp : public framework::OperatorWithKernel {
Y
Yu Yang 已提交
98 99 100
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

C
chengduo 已提交
101
  void InferShape(framework::InferShapeContext *ctx) const override {
102 103 104 105 106 107 108 109
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "UniformRandomOp");

    PADDLE_ENFORCE_LT(
        ctx->Attrs().Get<float>("min"), ctx->Attrs().Get<float>("max"),
        platform::errors::InvalidArgument(
            "The uniform_random's min must less then max. But received min = "
            "%f great than or equal max = %f.",
            ctx->Attrs().Get<float>("min"), ctx->Attrs().Get<float>("max")));
110
    PADDLE_ENFORCE_GE(ctx->Attrs().Get<int>("diag_num"), 0,
111
                      platform::errors::InvalidArgument(
112 113 114
                          "The uniform_random's diag_num must greater than or "
                          "equal 0. But recevied diag_num (%d) < 0.",
                          ctx->Attrs().Get<int>("diag_num")));
115
    PADDLE_ENFORCE_GE(ctx->Attrs().Get<int>("diag_step"), 0,
116
                      platform::errors::InvalidArgument(
117 118 119
                          "The uniform_random's diag_step must greater than or "
                          "equal 0. But recevied diag_step (%d) < 0.",
                          ctx->Attrs().Get<int>("diag_step")));
120 121 122 123

    if (ctx->HasInputs("ShapeTensorList")) {
      // top prority shape
      auto inputs_name = ctx->Inputs("ShapeTensorList");
L
Leo Chen 已提交
124 125 126 127 128 129
      PADDLE_ENFORCE_GT(inputs_name.size(), 0,
                        platform::errors::InvalidArgument(
                            "Input(ShapeTensorList)'size of "
                            "Op(uniform_random) can't be zero."
                            "Please check the Attr(shape)'s size of"
                            "Op(fluid.layers.uniform_random).)"));
130 131 132 133 134 135 136 137 138 139
      auto out_dims = std::vector<int>(inputs_name.size(), -1);
      ctx->SetOutputDim("Out", framework::make_ddim(out_dims));

      return;
    }
    auto &shape = ctx->Attrs().Get<std::vector<int64_t>>("shape");
    if (ctx->HasInput("ShapeTensor") && shape.empty()) {
      auto shape_dims = ctx->GetInputDim("ShapeTensor");
      PADDLE_ENFORCE_EQ(
          shape_dims.size(), 1,
140 141 142 143 144
          platform::errors::InvalidArgument(
              "ShapeError: Input(ShapeTensor)' dimension size of "
              "Op(uniform_random) must be 1."
              "But received ShapeTensor's dimensions = %d, shape = [%s]",
              shape_dims.size(), shape_dims));
145 146 147 148 149 150 151 152 153 154
      int num_ele = 1;
      for (int i = 0; i < shape_dims.size(); ++i) {
        num_ele *= shape_dims[i];
      }
      auto vec_dims = std::vector<int64_t>(num_ele, -1);
      auto out_dims = framework::make_ddim(vec_dims);
      ctx->SetOutputDim("Out", out_dims);
      return;
    }

155 156 157 158 159 160
    PADDLE_ENFORCE_EQ(shape.empty(), false,
                      platform::errors::InvalidArgument(
                          "if there is no Input(ShapeTensorList) and no "
                          "Input(ShapeTensor),the "
                          "attr(shape) information must "
                          "be set by Attr(shape)."));
161 162
    std::vector<int64_t> tensor_shape;
    tensor_shape.reserve(shape.size());
Q
QI JUN 已提交
163
    for (auto dim : shape) {
164
      tensor_shape.push_back(static_cast<int64_t>(dim));
Q
qijun 已提交
165
    }
166
    ctx->SetOutputDim("Out", framework::make_ddim(tensor_shape));
Y
Yu Yang 已提交
167
  }
Y
Yu Yang 已提交
168

169
 protected:
170
  framework::OpKernelType GetExpectedKernelType(
C
chengduo 已提交
171
      const framework::ExecutionContext &ctx) const override {
Y
Yu Yang 已提交
172
    return framework::OpKernelType(
173
        static_cast<framework::proto::VarType::Type>(ctx.Attr<int>("dtype")),
Q
QI JUN 已提交
174
        ctx.GetPlace());
Y
Yu Yang 已提交
175
  }
176 177 178 179 180 181 182 183 184 185

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    if (var_name == "ShapeTensorList" || var_name == "ShapeTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
Y
Yu Yang 已提交
186 187
};

Y
Yu Yang 已提交
188
class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker {
Y
Yu Yang 已提交
189
 public:
Y
Yu Yang 已提交
190
  void Make() override {
191
    AddInput("ShapeTensor",
192 193
             "(Tensor<int64_t> or Tensor<int32_t>, optional) . If provided, "
             "uniform_random "
194
             "according to "
195
             "this given shape. It means that it has a higher priority than "
196
             "the shape attribute, while the shape attribute still should be "
T
tianshuo78520a 已提交
197
             "set correctly to guarantee shape inference in compile time.")
198 199
        .AsDispensable();
    AddInput("ShapeTensorList",
200 201 202 203
             "(vector<Tensor<int64_t>> or vector<Tensor<int32_t>>, optional). "
             "If provided, uniform_random use this. The shape of the tensor "
             "must be [1], it has the highest priority comparing with "
             "Input(ShapeTensor) and attr(shape).")
204 205
        .AsDuplicable()
        .AsDispensable();
Y
yuyang18 已提交
206
    AddOutput("Out", "The output tensor of uniform random op");
207
    AddComment(R"DOC(
208
This operator initializes a tensor with random values sampled from a
209
uniform distribution. The random result is in set [min, max).
210

Y
Yu Yang 已提交
211
)DOC");
212 213
    AddAttr<std::vector<int64_t>>("shape", "The shape of the output tensor")
        .SetDefault({});
Y
yuyang18 已提交
214
    AddAttr<float>("min", "Minimum value of uniform random. [default -1.0].")
215
        .SetDefault(-1.0f);
Y
yuyang18 已提交
216
    AddAttr<float>("max", "Maximun value of uniform random. [default 1.0].")
217
        .SetDefault(1.0f);
Q
qijun 已提交
218
    AddAttr<int>("seed",
219
                 "Random seed used for generating samples. "
220 221
                 "0 means use a seed generated by the system."
                 "Note that if seed is not 0, this operator will always "
Y
yuyang18 已提交
222
                 "generate the same random numbers every time. [default 0].")
Q
qijun 已提交
223
        .SetDefault(0);
224 225 226 227 228 229 230 231
    AddAttr<int>("diag_num",
                 "The number of diag elements. Note that if "
                 "diag_num is 0, it means without diag init.[default 0].")
        .SetDefault(0);
    AddAttr<int>("diag_step", "The step between two diag element.[default 0].")
        .SetDefault(0);
    AddAttr<float>("diag_val", "The value of diag element. [default 1.0].")
        .SetDefault(1.0f);
Y
yuyang18 已提交
232
    AddAttr<int>("dtype", "Output tensor data type. [default 5(FP32)].")
233
        .SetDefault(framework::proto::VarType::FP32);
Y
Yu Yang 已提交
234 235
  }
};
Y
Yancey1989 已提交
236 237 238

class UniformRandomOpVarTypeInference : public framework::VarTypeInference {
 public:
M
minqiyang 已提交
239
  void operator()(framework::InferVarTypeContext *ctx) const override {
C
chengduo 已提交
240
    auto var_data_type = static_cast<framework::proto::VarType::Type>(
241
        BOOST_GET_CONST(int, ctx->GetAttr("dtype")));
C
chengduo 已提交
242

243 244
    if (ctx->GetOutputType("Out") != framework::proto::VarType::SELECTED_ROWS) {
      ctx->SetOutputType("Out", framework::proto::VarType::LOD_TENSOR);
Y
Yancey1989 已提交
245
    }
246
    ctx->SetOutputDataType("Out", var_data_type);
Y
Yancey1989 已提交
247 248 249
  }
};

Y
Yu Yang 已提交
250 251 252
}  // namespace operators
}  // namespace paddle

H
hong 已提交
253 254 255 256 257 258
REGISTER_OPERATOR(
    uniform_random, paddle::operators::UniformRandomOp,
    paddle::operators::UniformRandomOpMaker,
    paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
    paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
    paddle::operators::UniformRandomOpVarTypeInference);
Y
Yancey1989 已提交
259

Q
qijun 已提交
260
REGISTER_OP_CPU_KERNEL(uniform_random,
261 262
                       paddle::operators::CPUUniformRandomKernel<float>,
                       paddle::operators::CPUUniformRandomKernel<double>);
263 264 265
REGISTER_OP_CPU_KERNEL(uniform_random_batch_size_like,
                       paddle::operators::CPUUniformRandomKernel<float>,
                       paddle::operators::CPUUniformRandomKernel<double>);