one_hot_op.cc 4.8 KB
Newer Older
1
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Y
Yang yaming 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/one_hot_op.h"
16 17
#include <string>
#include <vector>
Y
Yi Wang 已提交
18
#include "paddle/fluid/framework/framework.pb.h"
Y
Yang yaming 已提交
19 20 21 22 23 24 25 26

namespace paddle {
namespace operators {

class OneHotOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override {
27 28
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "OneHot");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "OneHot");
Y
Yang yaming 已提交
29 30 31

    auto x_dims = ctx->GetInputDim("X");
    PADDLE_ENFORCE_GE(x_dims.size(), 2,
32 33 34 35 36
                      platform::errors::InvalidArgument(
                          "Input(input) rank should be at least 2, "
                          "but received input rank (%d) less than 2",
                          x_dims.size()));

37 38
    if (ctx->IsRuntime() || x_dims[x_dims.size() - 1] > 0) {
      PADDLE_ENFORCE_GE(x_dims[x_dims.size() - 1], 1U,
39 40 41 42
                        platform::errors::InvalidArgument(
                            "Last dimension of Input(input) should be 1, "
                            "but received input Last dimension(%d) != 1",
                            x_dims[x_dims.size() - 1]));
43
    }
Y
Yang yaming 已提交
44 45

    framework::DDim out_dims(x_dims);
46 47 48 49 50
    int depth = ctx->Attrs().Get<int>("depth");
    if (ctx->HasInput("depth_tensor")) {
      depth = -1;
    }

Y
Yang yaming 已提交
51 52 53 54
    out_dims[out_dims.size() - 1] = depth;
    ctx->SetOutputDim("Out", out_dims);
    ctx->ShareLoD("X", /* --> */ "Out");
  }
55 56 57 58

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
59 60 61
    return framework::OpKernelType(
        OperatorWithKernel::IndicateVarDataType(ctx, "X"),
        ctx.device_context());
62 63 64 65 66 67 68 69 70 71 72
  }

  framework::OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const framework::OpKernelType& expected_kernel_type) const override {
    if (var_name == "depth_tensor") {
      return expected_kernel_type;
    }
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(), tensor.layout());
  }
Y
Yang yaming 已提交
73 74 75 76
};

class OneHotOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
77
  void Make() override {
Y
Yang yaming 已提交
78 79 80 81
    AddInput("X",
             "(LoDTensor, LoDTensor<int>) Input variable with rank at least 2. "
             "The last dimension of X should be 1. Each value of X is an index "
             "to indicate the position.");
82 83
    AddInput("depth_tensor", "(Tensor, Tensor<int>), Length of one-hot vector")
        .AsDispensable();
Y
Yang yaming 已提交
84 85 86
    AddOutput("Out",
              "(Tensor, Tensor<float>) Output tensor with same rank as X. "
              "The tensor consists of one-hot representations of values in X.");
87

Y
Yang yaming 已提交
88
    AddAttr<int>("depth",
89 90
                 "A positive integer to specify the length of one-hot vector.")
        .SetDefault(-1);
Y
Yang yaming 已提交
91 92 93
    AddAttr<int>("dtype",
                 "An integer to specify the data type of one-hot "
                 "vector. The default value is FP32.")
94
        .SetDefault(paddle::framework::proto::VarType::FP32);
95 96 97 98 99
    AddAttr<bool>("allow_out_of_range",
                  "If it is set true and the input data is out of range, "
                  "the output tensor will be filled zeros. The default value "
                  "is false.")
        .SetDefault(false);
Y
Yang yaming 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
    AddComment(R"DOC(
One Hot Operator. This operator creates the one-hot representations for input
index values. The following example will help to explain the function of this
operator:

X is a LoDTensor:
  X.lod = [[0, 1, 4]]
  X.shape = [4, 1]
  X.data = [[1], [1], [3], [0]]

set depth = 4

Out is a LoDTensor:
  Out.lod = [[0, 1, 4]]
  Out.shape = [4, 4]
  Out.data = [[0., 1., 0., 0.],
              [0., 1., 0., 0.],
              [0., 0., 0., 1.],
              [1., 0., 0., 0.]]
)DOC");
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
H
hong 已提交
127 128 129 130
REGISTER_OPERATOR(
    one_hot, ops::OneHotOp, ops::OneHotOpMaker,
    paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
    paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
Y
Yang yaming 已提交
131 132 133
REGISTER_OP_CPU_KERNEL(
    one_hot, ops::OneHotKernel<paddle::platform::CPUDeviceContext, int>,
    ops::OneHotKernel<paddle::platform::CPUDeviceContext, int64_t>);