dirichlet_op.cc 4.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/operators/dirichlet_op.h"

#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/fluid/operators/reduce_ops/reduce_op.h"
#include "paddle/fluid/operators/reduce_ops/reduce_sum_op.h"

namespace paddle {
namespace operators {
template <typename T, typename UniformSamplerT, typename NormalSamplerT>
struct GammaCPUFunctor {
26 27
  GammaCPUFunctor(const T* alpha,
                  T* gamma,
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
                  BaseSampler<T, UniformSamplerT> uniform,
                  BaseSampler<T, NormalSamplerT> normal)
      : alpha_(alpha), gamma_(gamma), uniform_(uniform), normal_(normal) {}

  HOST void operator()(int64_t index) {
    auto sample = sample_gamma<T, T, UniformSamplerT, NormalSamplerT>(
        alpha_[index], uniform_, normal_);
    gamma_[index] = std::max(std::numeric_limits<T>::min(), sample);
  }

  const T* alpha_;
  T* gamma_;
  BaseSampler<T, UniformSamplerT> uniform_;
  BaseSampler<T, NormalSamplerT> normal_;
};

template <typename T>
L
Leo Chen 已提交
45
struct DirichletSampler<phi::CPUContext, T> {
46 47
  void operator()(const framework::ExecutionContext& ctx,
                  const Tensor* alpha,
48
                  Tensor* out) {
L
Leo Chen 已提交
49
    auto& dev_ctx = ctx.device_context<phi::CPUContext>();
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69

    auto p_gen = framework::DefaultCPUGenerator();
    auto generator = p_gen->GetCPUEngine();

    auto uniform = [&generator]() -> T {
      std::uniform_real_distribution<T> u(0.0, 1.0);
      return u(*generator);
    };
    BaseSampler<T, decltype(uniform)> standard_uniform(uniform);

    auto normal = [&generator]() {
      std::normal_distribution<T> n(0.0, 1.0);
      return n(*generator);
    };
    BaseSampler<T, decltype(normal)> standard_normal(normal);

    // sample from K gamma distributions, where K=alpha.numel()
    framework::Tensor gamma_samples;
    gamma_samples.mutable_data<T>(alpha->dims(), dev_ctx.GetPlace());
    GammaCPUFunctor<T, decltype(uniform), decltype(normal)> gamma_functor(
70 71 72
        alpha->data<T>(),
        gamma_samples.data<T>(),
        standard_uniform,
73
        standard_normal);
L
Leo Chen 已提交
74
    platform::ForRange<phi::CPUContext> for_range(dev_ctx, alpha->numel());
75 76 77 78 79 80 81 82
    for_range(gamma_functor);

    // normalize them into a simplex, along the last axis
    framework::Tensor gamma_sum;
    auto new_shape = gamma_samples.dims();
    new_shape[new_shape.size() - 1] = 1;
    gamma_sum.mutable_data<T>(new_shape, dev_ctx.GetPlace());

L
Leo Chen 已提交
83
    ReduceKernelFunctor<phi::CPUContext, T, SumFunctor>(
84 85
        &gamma_samples, &gamma_sum, {new_shape.size() - 1}, true, false, ctx)
        .template apply<T>();
L
Leo Chen 已提交
86
    ElementwiseComputeEx<DivFunctor<T>, phi::CPUContext, T, T>(
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
        ctx, &gamma_samples, &gamma_sum, -1, DivFunctor<T>(), out);
  }
};

class DirichletOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("Alpha", "(Tensor), The dirichlet Alpha parameter");
    AddOutput("Out", "(Tensor), The output tensor of sample");
    AddComment(R"DOC(Sample random data from dirichlet distribution.)DOC");
  }
};

class DirichletOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    OP_INOUT_CHECK(ctx->HasInput("Alpha"), "Input", "Alpha", "dirichlet");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "dirichlet");
    const auto alpha_dim = ctx->GetInputDim("Alpha");
108 109
    PADDLE_ENFORCE_GE(alpha_dim.size(),
                      1,
110 111 112 113 114 115 116 117 118 119 120 121
                      platform::errors::InvalidArgument(
                          "ShapeError: The number of dimensions of 'Alpha' "
                          "must be greater than or euqal to 1. "
                          "But received Alpha's dimensions = %d,",
                          alpha_dim.size()));
    ctx->ShareDim("Alpha", /*->*/ "Out");
  }
};

}  // namespace operators
}  // namespace paddle

122 123
REGISTER_OP_WITHOUT_GRADIENT(dirichlet,
                             paddle::operators::DirichletOp,
124 125 126
                             paddle::operators::DirichletOpMaker);
REGISTER_OP_CPU_KERNEL(
    dirichlet,
L
Leo Chen 已提交
127 128
    paddle::operators::DirichletKernel<phi::CPUContext, float>,
    paddle::operators::DirichletKernel<phi::CPUContext, double>);