uniform_random_op.cc 3.4 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at
   http://www.apache.org/licenses/LICENSE-2.0
   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */

Q
qijun 已提交
12 13 14 15
#include <random>
#include <type_traits>
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
Y
Yu Yang 已提交
16 17 18

namespace paddle {
namespace operators {
Y
Yu Yang 已提交
19

Q
qijun 已提交
20 21 22 23 24 25 26 27 28
// It seems that Eigen::Tensor::random in GPU will SEGFAULT.
// Use std::random and thrust::random(thrust is a std library in CUDA) to
// implement uniform random.
template <typename T>
class CPUUniformRandomKernel : public framework::OpKernel {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    auto* tensor = context.Output<framework::Tensor>("Out");
    T* data = tensor->mutable_data<T>(context.GetPlace());
Q
qiaolongfei 已提交
29
    unsigned int seed = static_cast<unsigned int>(context.GetAttr<int>("seed"));
Q
qijun 已提交
30 31 32 33 34 35
    std::minstd_rand engine;
    if (seed == 0) {
      seed = std::random_device()();
    }
    engine.seed(seed);
    std::uniform_real_distribution<T> dist(
Q
qiaolongfei 已提交
36 37
        static_cast<T>(context.GetAttr<float>("min")),
        static_cast<T>(context.GetAttr<float>("max")));
Q
qijun 已提交
38 39
    int64_t size = framework::product(tensor->dims());
    for (int64_t i = 0; i < size; ++i) {
Q
qijun 已提交
40 41 42 43 44
      data[i] = dist(engine);
    }
  }
};

Y
Yu Yang 已提交
45
class UniformRandomOp : public framework::OperatorWithKernel {
Y
Yu Yang 已提交
46 47 48
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

Y
Yu Yang 已提交
49
 protected:
Y
Yu Yang 已提交
50
  void InferShape(const framework::InferShapeContext& ctx) const override {
Y
Yu Yang 已提交
51 52
    PADDLE_ENFORCE(GetAttr<float>("min") < GetAttr<float>("max"),
                   "uniform_random's min must less then max");
Y
Yu Yang 已提交
53
    auto* tensor = ctx.Output<framework::Tensor>("Out");
Y
Yu Yang 已提交
54
    auto dims = GetAttr<std::vector<int>>("dims");
Q
qijun 已提交
55 56 57 58 59
    std::vector<int64_t> temp(dims.size());
    for (auto dim : dims) {
      temp.push_back(static_cast<int64_t>(dim));
    }
    tensor->Resize(framework::make_ddim(temp));
Y
Yu Yang 已提交
60 61 62
  }
};

Y
Yu Yang 已提交
63
class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker {
Y
Yu Yang 已提交
64
 public:
Y
Yu Yang 已提交
65 66 67
  UniformRandomOpMaker(framework::OpProto* proto,
                       framework::OpAttrChecker* op_checker)
      : framework::OpProtoAndCheckerMaker(proto, op_checker) {
Y
Yu Yang 已提交
68 69 70 71 72 73 74
    AddOutput("Out", "The output tensor of uniform random op");
    AddComment(R"DOC(Uniform random operator.
Used to initialize tensor with uniform random generator.
)DOC");
    AddAttr<std::vector<int>>("dims", "the dimension of random tensor");
    AddAttr<float>("min", "Minimum value of uniform random").SetDefault(-1.0f);
    AddAttr<float>("max", "Maximun value of uniform random").SetDefault(1.0f);
Q
qijun 已提交
75 76 77 78
    AddAttr<int>("seed",
                 "Random seed of uniform random. "
                 "0 means generate a seed by system")
        .SetDefault(0);
Y
Yu Yang 已提交
79 80 81 82 83
  }
};
}  // namespace operators
}  // namespace paddle

F
fengjiayi 已提交
84 85
REGISTER_OP_WITHOUT_GRADIENT(uniform_random, paddle::operators::UniformRandomOp,
                             paddle::operators::UniformRandomOpMaker);
Q
qijun 已提交
86
REGISTER_OP_CPU_KERNEL(uniform_random,
Q
qijun 已提交
87
                       paddle::operators::CPUUniformRandomKernel<float>);