randperm_op.cc 3.5 KB
Newer Older
C
cc 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/randperm_op.h"
#include <string>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"

namespace paddle {
namespace operators {

class RandpermOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext *ctx) const override {
    PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
                      platform::errors::NotFound(
                          "The output(Out) of randperm op must not be null."));
    int n = ctx->Attrs().Get<int>("n");
    PADDLE_ENFORCE_GT(
        n, 0, platform::errors::InvalidArgument(
34 35 36
                  "The input 'n' of randperm op should be greater than 0. "
                  "But received %d.",
                  n));
C
cc 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

    ctx->SetOutputDim("Out", framework::make_ddim({n}));
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
    auto data_type =
        static_cast<framework::proto::VarType::Type>(ctx.Attr<int>("dtype"));
    return framework::OpKernelType(data_type, ctx.GetPlace());
  }
};

class RandpermOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddOutput("Out", "The output tensor of randperm op.");

    AddAttr<int>(
        "n", "The upper bound (exclusive), and it should be greater than 0.");
    AddAttr<int>("dtype",
                 "The data type of output tensor. "
                 "Default: 3[int64].")
        .SetDefault(framework::proto::VarType::INT64);
    AddAttr<int>("seed",
                 "Random seed used for permute samples. "
                 "0 means use a seed generated by the system."
                 "Note that if seed is not 0, this operator will always "
                 "generate the same random permutation every time. "
                 "Default: 0.")
        .SetDefault(0);

    AddComment(R"DOC( 
This operator returns a random permutation of integers from 0 to n-1.
)DOC");
  }
};

class RandpermOpVarTypeInference : public framework::VarTypeInference {
 public:
  void operator()(framework::InferVarTypeContext *ctx) const override {
    auto var_data_type = static_cast<framework::proto::VarType::Type>(
79
        BOOST_GET_CONST(int, ctx->GetAttr("dtype")));
80
    ctx->SetOutputDataType("Out", var_data_type);
C
cc 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
  }
};

}  // namespace operators
}  // namespace paddle

REGISTER_OPERATOR(
    randperm, paddle::operators::RandpermOp, paddle::operators::RandpermOpMaker,
    paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
    paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
    paddle::operators::RandpermOpVarTypeInference);

template <typename T>
using kernel =
    paddle::operators::RandpermKernel<paddle::platform::CPUDeviceContext, T>;

97 98
REGISTER_OP_CPU_KERNEL(randperm, kernel<int64_t>, kernel<int>, kernel<float>,
                       kernel<double>);