uniform_random_op.cc 11.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yu Yang 已提交
2

L
Luo Tao 已提交
3 4 5 6 7 8 9 10 11 12 13
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14
#include "paddle/fluid/operators/uniform_random_op.h"
L
Leo Chen 已提交
15

16
#include <string>
L
Leo Chen 已提交
17

Y
yaoxuefeng 已提交
18
#include "paddle/fluid/framework/generator.h"
Y
Yi Wang 已提交
19 20
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
Y
yaoxuefeng 已提交
21

Y
Yu Yang 已提交
22 23
namespace paddle {
namespace operators {
Y
Yu Yang 已提交
24

Q
qijun 已提交
25 26 27 28
// It seems that Eigen::Tensor::random in GPU will SEGFAULT.
// Use std::random and thrust::random(thrust is a std library in CUDA) to
// implement uniform random.
template <typename T>
Y
Yu Yang 已提交
29
class CPUUniformRandomKernel : public framework::OpKernel<T> {
Q
qijun 已提交
30
 public:
C
chengduo 已提交
31 32
  void Compute(const framework::ExecutionContext &ctx) const override {
    framework::Tensor *tensor = nullptr;
Y
Yancey1989 已提交
33
    auto out_var = ctx.OutputVar("Out");
34 35 36 37 38 39
    std::vector<int64_t> new_shape;
    auto list_new_shape_tensor =
        ctx.MultiInput<framework::Tensor>("ShapeTensorList");
    if (list_new_shape_tensor.size() > 0 || ctx.HasInput("ShapeTensor")) {
      if (ctx.HasInput("ShapeTensor")) {
        auto *shape_tensor = ctx.Input<framework::Tensor>("ShapeTensor");
40
        new_shape = GetNewDataFromShapeTensor(shape_tensor);
41
      } else if (list_new_shape_tensor.size() > 0) {
42
        new_shape = GetNewDataFromShapeTensorList(list_new_shape_tensor);
43 44 45 46
      }
    }

    if (out_var->IsType<framework::SelectedRows>()) {
C
chengduo 已提交
47
      auto *selected_rows = out_var->GetMutable<framework::SelectedRows>();
48
      tensor = selected_rows->mutable_value();
49 50
      auto shape = ctx.Attr<std::vector<int64_t>>("shape");
      if (!new_shape.empty()) shape = new_shape;
Y
Yancey1989 已提交
51
      tensor->Resize(framework::make_ddim(shape));
52
      selected_rows->mutable_rows()->reserve(shape[0]);
53 54 55
    } else if (out_var->IsType<framework::LoDTensor>()) {
      tensor = out_var->GetMutable<framework::LoDTensor>();
      if (!new_shape.empty()) tensor->Resize(framework::make_ddim(new_shape));
Y
Yancey1989 已提交
56
    } else {
Y
Yancey1989 已提交
57 58
      PADDLE_THROW(
          "uniform_random_op's output only"
T
tangwei12 已提交
59
          "supports SelectedRows and LoDTensor");
Y
Yancey1989 已提交
60
    }
C
chengduo 已提交
61
    T *data = tensor->mutable_data<T>(ctx.GetPlace());
Y
yaoxuefeng 已提交
62 63

    int64_t size = tensor->numel();
Q
qijun 已提交
64
    std::uniform_real_distribution<T> dist(
Q
Qiao Longfei 已提交
65 66
        static_cast<T>(ctx.Attr<float>("min")),
        static_cast<T>(ctx.Attr<float>("max")));
L
Leo Chen 已提交
67 68 69 70 71
    unsigned int seed = static_cast<unsigned int>(ctx.Attr<int>("seed"));
    auto engine = framework::GetCPURandomEngine(seed);

    for (int64_t i = 0; i < size; ++i) {
      data[i] = dist(*engine);
Q
qijun 已提交
72
    }
Y
yaoxuefeng 已提交
73

74 75 76 77 78 79
    unsigned int diag_num =
        static_cast<unsigned int>(ctx.Attr<int>("diag_num"));
    unsigned int diag_step =
        static_cast<unsigned int>(ctx.Attr<int>("diag_step"));
    auto diag_val = static_cast<T>(ctx.Attr<float>("diag_val"));
    if (diag_num > 0) {
80 81 82 83 84 85 86
      PADDLE_ENFORCE_GT(
          size, (diag_num - 1) * (diag_step + 1),
          platform::errors::InvalidArgument(
              "ShapeInvalid: the diagonal's elements is equal (num-1) "
              "* (step-1) with num %d, step %d,"
              "It should be smaller than %d, but received %d",
              diag_num, diag_step, (diag_num - 1) * (diag_step + 1), size));
87 88 89 90 91
      for (int64_t i = 0; i < diag_num; ++i) {
        int64_t pos = i * diag_step + i;
        data[pos] = diag_val;
      }
    }
Q
qijun 已提交
92 93 94
  }
};

Y
Yu Yang 已提交
95
class UniformRandomOp : public framework::OperatorWithKernel {
Y
Yu Yang 已提交
96 97 98
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

C
chengduo 已提交
99
  void InferShape(framework::InferShapeContext *ctx) const override {
100 101 102 103 104 105 106 107
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "UniformRandomOp");

    PADDLE_ENFORCE_LT(
        ctx->Attrs().Get<float>("min"), ctx->Attrs().Get<float>("max"),
        platform::errors::InvalidArgument(
            "The uniform_random's min must less then max. But received min = "
            "%f great than or equal max = %f.",
            ctx->Attrs().Get<float>("min"), ctx->Attrs().Get<float>("max")));
108
    PADDLE_ENFORCE_GE(ctx->Attrs().Get<int>("diag_num"), 0,
109
                      platform::errors::InvalidArgument(
110 111 112
                          "The uniform_random's diag_num must greater than or "
                          "equal 0. But recevied diag_num (%d) < 0.",
                          ctx->Attrs().Get<int>("diag_num")));
113
    PADDLE_ENFORCE_GE(ctx->Attrs().Get<int>("diag_step"), 0,
114
                      platform::errors::InvalidArgument(
115 116 117
                          "The uniform_random's diag_step must greater than or "
                          "equal 0. But recevied diag_step (%d) < 0.",
                          ctx->Attrs().Get<int>("diag_step")));
118 119 120 121

    if (ctx->HasInputs("ShapeTensorList")) {
      // top prority shape
      auto inputs_name = ctx->Inputs("ShapeTensorList");
L
Leo Chen 已提交
122 123 124 125 126 127
      PADDLE_ENFORCE_GT(inputs_name.size(), 0,
                        platform::errors::InvalidArgument(
                            "Input(ShapeTensorList)'size of "
                            "Op(uniform_random) can't be zero."
                            "Please check the Attr(shape)'s size of"
                            "Op(fluid.layers.uniform_random).)"));
128 129 130 131 132 133 134 135 136 137
      auto out_dims = std::vector<int>(inputs_name.size(), -1);
      ctx->SetOutputDim("Out", framework::make_ddim(out_dims));

      return;
    }
    auto &shape = ctx->Attrs().Get<std::vector<int64_t>>("shape");
    if (ctx->HasInput("ShapeTensor") && shape.empty()) {
      auto shape_dims = ctx->GetInputDim("ShapeTensor");
      PADDLE_ENFORCE_EQ(
          shape_dims.size(), 1,
138 139 140 141 142
          platform::errors::InvalidArgument(
              "ShapeError: Input(ShapeTensor)' dimension size of "
              "Op(uniform_random) must be 1."
              "But received ShapeTensor's dimensions = %d, shape = [%s]",
              shape_dims.size(), shape_dims));
143 144 145 146 147 148 149 150 151 152
      int num_ele = 1;
      for (int i = 0; i < shape_dims.size(); ++i) {
        num_ele *= shape_dims[i];
      }
      auto vec_dims = std::vector<int64_t>(num_ele, -1);
      auto out_dims = framework::make_ddim(vec_dims);
      ctx->SetOutputDim("Out", out_dims);
      return;
    }

153 154 155 156 157 158
    PADDLE_ENFORCE_EQ(shape.empty(), false,
                      platform::errors::InvalidArgument(
                          "if there is no Input(ShapeTensorList) and no "
                          "Input(ShapeTensor),the "
                          "attr(shape) information must "
                          "be set by Attr(shape)."));
159 160
    std::vector<int64_t> tensor_shape;
    tensor_shape.reserve(shape.size());
Q
QI JUN 已提交
161
    for (auto dim : shape) {
162
      tensor_shape.push_back(static_cast<int64_t>(dim));
Q
qijun 已提交
163
    }
164
    ctx->SetOutputDim("Out", framework::make_ddim(tensor_shape));
Y
Yu Yang 已提交
165
  }
Y
Yu Yang 已提交
166

167
 protected:
168
  framework::OpKernelType GetExpectedKernelType(
C
chengduo 已提交
169
      const framework::ExecutionContext &ctx) const override {
Y
Yu Yang 已提交
170
    return framework::OpKernelType(
171
        static_cast<framework::proto::VarType::Type>(ctx.Attr<int>("dtype")),
Q
QI JUN 已提交
172
        ctx.GetPlace());
Y
Yu Yang 已提交
173
  }
174 175 176 177 178 179 180 181 182 183

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    if (var_name == "ShapeTensorList" || var_name == "ShapeTensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
Y
Yu Yang 已提交
184 185
};

Y
Yu Yang 已提交
186
class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker {
Y
Yu Yang 已提交
187
 public:
Y
Yu Yang 已提交
188
  void Make() override {
189
    AddInput("ShapeTensor",
190 191
             "(Tensor<int64_t> or Tensor<int32_t>, optional) . If provided, "
             "uniform_random "
192
             "according to "
193
             "this given shape. It means that it has a higher priority than "
194
             "the shape attribute, while the shape attribute still should be "
T
tianshuo78520a 已提交
195
             "set correctly to guarantee shape inference in compile time.")
196 197
        .AsDispensable();
    AddInput("ShapeTensorList",
198 199 200 201
             "(vector<Tensor<int64_t>> or vector<Tensor<int32_t>>, optional). "
             "If provided, uniform_random use this. The shape of the tensor "
             "must be [1], it has the highest priority comparing with "
             "Input(ShapeTensor) and attr(shape).")
202 203
        .AsDuplicable()
        .AsDispensable();
Y
yuyang18 已提交
204
    AddOutput("Out", "The output tensor of uniform random op");
205
    AddComment(R"DOC(
206
This operator initializes a tensor with random values sampled from a
207
uniform distribution. The random result is in set [min, max).
208

Y
Yu Yang 已提交
209
)DOC");
210 211
    AddAttr<std::vector<int64_t>>("shape", "The shape of the output tensor")
        .SetDefault({});
Y
yuyang18 已提交
212
    AddAttr<float>("min", "Minimum value of uniform random. [default -1.0].")
213
        .SetDefault(-1.0f);
Y
yuyang18 已提交
214
    AddAttr<float>("max", "Maximun value of uniform random. [default 1.0].")
215
        .SetDefault(1.0f);
Q
qijun 已提交
216
    AddAttr<int>("seed",
217
                 "Random seed used for generating samples. "
218 219
                 "0 means use a seed generated by the system."
                 "Note that if seed is not 0, this operator will always "
Y
yuyang18 已提交
220
                 "generate the same random numbers every time. [default 0].")
Q
qijun 已提交
221
        .SetDefault(0);
222 223 224 225 226 227 228 229
    AddAttr<int>("diag_num",
                 "The number of diag elements. Note that if "
                 "diag_num is 0, it means without diag init.[default 0].")
        .SetDefault(0);
    AddAttr<int>("diag_step", "The step between two diag element.[default 0].")
        .SetDefault(0);
    AddAttr<float>("diag_val", "The value of diag element. [default 1.0].")
        .SetDefault(1.0f);
Y
yuyang18 已提交
230
    AddAttr<int>("dtype", "Output tensor data type. [default 5(FP32)].")
231
        .SetDefault(framework::proto::VarType::FP32);
Y
Yu Yang 已提交
232 233
  }
};
Y
Yancey1989 已提交
234 235 236

class UniformRandomOpVarTypeInference : public framework::VarTypeInference {
 public:
M
minqiyang 已提交
237
  void operator()(framework::InferVarTypeContext *ctx) const override {
C
chengduo 已提交
238
    auto var_data_type = static_cast<framework::proto::VarType::Type>(
239
        BOOST_GET_CONST(int, ctx->GetAttr("dtype")));
C
chengduo 已提交
240

241 242
    if (ctx->GetOutputType("Out") != framework::proto::VarType::SELECTED_ROWS) {
      ctx->SetOutputType("Out", framework::proto::VarType::LOD_TENSOR);
Y
Yancey1989 已提交
243
    }
244
    ctx->SetOutputDataType("Out", var_data_type);
Y
Yancey1989 已提交
245 246 247
  }
};

Y
Yu Yang 已提交
248 249 250
}  // namespace operators
}  // namespace paddle

H
hong 已提交
251 252 253 254 255 256
REGISTER_OPERATOR(
    uniform_random, paddle::operators::UniformRandomOp,
    paddle::operators::UniformRandomOpMaker,
    paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
    paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
    paddle::operators::UniformRandomOpVarTypeInference);
Y
Yancey1989 已提交
257

Q
qijun 已提交
258
REGISTER_OP_CPU_KERNEL(uniform_random,
259 260
                       paddle::operators::CPUUniformRandomKernel<float>,
                       paddle::operators::CPUUniformRandomKernel<double>);
261 262 263
REGISTER_OP_CPU_KERNEL(uniform_random_batch_size_like,
                       paddle::operators::CPUUniformRandomKernel<float>,
                       paddle::operators::CPUUniformRandomKernel<double>);