hierarchical_sigmoid_op.cc 8.4 KB
Newer Older
Y
Yancey1989 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

W
weixing02 已提交
15 16
#include "paddle/fluid/operators/hierarchical_sigmoid_op.h"
#include <vector>
Y
Yancey1989 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

namespace paddle {
namespace operators {

/**
 * Organize the classes into a binary tree. At each node, a sigmoid function
 * is used to calculate the probability of belonging to the right branch.
 * This idea is from "F. Morin, Y. Bengio (AISTATS 05):
 * Hierarchical Probabilistic Neural Network Language Model."
 *
 * Here we uses a simple way of making the binary tree.
 * Assuming the number of classes C = 6,
 * The classes are organized as a binary tree in the following way:
 *
 * @code{.py}
 * *-*-*- 2
 * | | |- 3
 * | |
 * | |-*- 4
 * |   |- 5
 * |
 * |-*- 0
 *   |- 1
 * @endcode
 *
 * where * indicates an internal node, and each leaf node represents a class.
 * - Node 0 ... C-2 are internal nodes.
 * - Node C-1 ... 2C-2 are leaf nodes.
 * - Class c is represented by leaf node \f$c+C-1\f$.
 *
 * We assign an id for each node:
 * - the id of root be 0.
 * - the left child of a node i is 2*i+1.
 * - the right child of a node i is 2*i+2.
 *
 * It's easy to see that:
 * - the parent of node i is \f$\left\lfloor(i-1)/2\right\rfloor\f$.
 * - the j-th level ancestor of node i is
 * \f$\left\lfloor(i+1)/2^{j+1}\right\rfloor - 1\f$.
 * - A node i is a left child of its parent if \f$(i-1)\%2==0\f$.
 *
 */

class HierarchicalSigmoidOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override {
Y
Yancey1989 已提交
64
    PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
W
weixing02 已提交
65
    PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should not be null.");
Y
Yancey1989 已提交
66
    PADDLE_ENFORCE(ctx->HasInput("W"), "Input(W) should not be null.");
Y
Yancey1989 已提交
67
    PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null.");
W
weixing02 已提交
68 69
    PADDLE_ENFORCE(ctx->HasOutput("PreOut"),
                   "Output(PreOut) should not be null.");
Y
Yancey1989 已提交
70
    const int64_t batch_size = ctx->GetInputDim("X")[0];
Y
Yancey1989 已提交
71
    std::vector<int64_t> output_shape({batch_size, 1});
Y
Yancey1989 已提交
72
    ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
J
JiabinYang 已提交
73
    ctx->ShareLoD("X", /*->*/ "Out");
Y
Yancey1989 已提交
74
  }
Y
Yancey1989 已提交
75 76

 protected:
W
weixing02 已提交
77
  framework::OpKernelType GetExpectedKernelType(
Y
Yancey1989 已提交
78 79
      const framework::ExecutionContext& ctx) const override {
    return framework::OpKernelType(
J
JiabinYang 已提交
80
        framework::ToDataType(ctx.Input<framework::LoDTensor>("X")->type()),
Y
Yancey1989 已提交
81 82
        ctx.GetPlace());
  }
Y
Yancey1989 已提交
83 84
};

W
weixing02 已提交
85
template <typename AttrType>
Y
Yancey1989 已提交
86 87
class HierarchicalSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
W
weixing02 已提交
88
  void Make() override {
Y
Yancey1989 已提交
89
    AddInput("X",
J
JiabinYang 已提交
90
             "(LoDTensor, required) The input tensor with shape [N, D], "
G
guosheng 已提交
91
             "where N is the size of mini-batch, and D is the feature size.");
Y
Yancey1989 已提交
92
    AddInput("W",
J
JiabinYang 已提交
93
             "(LoDTensor, required), The parameters of hierarchical "
G
guosheng 已提交
94
             "sigmoid operator, each of them is a 2-D tensor, the shape is"
95
             "[K, D]. Which K is the num of non-leaf node in Path Tree");
W
weixing02 已提交
96
    AddInput("Label",
J
JiabinYang 已提交
97
             "(LoDTensor, required), The labels of training data. It's a"
G
guosheng 已提交
98
             "tensor with shape [N, 1].");
99
    AddInput("PTable",
J
JiabinYang 已提交
100
             "(LoDTensor, optional), The Path Table from root to current word"
101 102
             "it should have shape like [N, L], L is the length of the Path")
        .AsDispensable();
J
JiabinYang 已提交
103 104 105 106 107
    AddInput(
        "PCode",
        "(LoDTensor, optional), The Code on each Node of the Path from root "
        "to current word"
        "it should have shape like [N, L], L is the length of the Path")
108
        .AsDispensable();
Y
Yancey1989 已提交
109
    AddInput("Bias",
J
JiabinYang 已提交
110
             "(LoDTensor, optional), The bias is a tensor with shape"
G
guosheng 已提交
111
             "[1, num_classes - 1].");
J
JiabinYang 已提交
112 113 114 115
    AddOutput(
        "Out",
        "(LoDTensor, required) The output of hierarchical sigmoid operator."
        "The shape is [N, 1].");
W
weixing02 已提交
116
    AddOutput("PreOut",
J
JiabinYang 已提交
117
              "(LoDTensor, required) A intermedia 2-D tensor with shape "
G
guosheng 已提交
118 119
              "[batch_size, code_length], where code_length represents the "
              "maximum path length from root to leaf nodes.")
W
weixing02 已提交
120
        .AsIntermediate();
J
JiabinYang 已提交
121
    AddAttr<AttrType>("num_classes", "(int, optional), The number of classes")
Y
Yancey1989 已提交
122
        .SetDefault(2);
Y
Yancey1989 已提交
123 124
    AddComment(R"DOC(
The hierarchical sigmoid operator organize the classes into a binary tree.
W
weixing02 已提交
125
At each node, a sigmoid function is used to calculate the probability of
W
weixing02 已提交
126 127
belonging to the right branch. This idea is from
"F. Morin, Y. Bengio (AISTATS 05):
Y
Yancey1989 已提交
128 129
Hierarchical Probabilistic Neural Network Language Model."
      )DOC");
J
JiabinYang 已提交
130 131 132 133
    AddAttr<bool>("is_sparse",
                  "(boolean, default false) "
                  "Sparse update.")
        .SetDefault(false);
Y
Yancey1989 已提交
134 135 136
  }
};

W
weixing02 已提交
137 138 139 140 141
class HierarchicalSigmoidGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("W"), "Input(W) should not be null.");
W
weixing02 已提交
142
    PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should not be null.");
J
JiabinYang 已提交
143 144
    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                   "Input(Out@Grad) should not be null");
W
weixing02 已提交
145 146 147 148 149 150 151 152 153
    PADDLE_ENFORCE(ctx->HasInput("PreOut"),
                   "Input(Preout) should not be null.");
    PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("W")),
                   "Output(W@Grad should not be null.)");
    PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")));
    if (ctx->HasOutput(framework::GradVarName("Bias"))) {
      ctx->SetOutputDim(framework::GradVarName("Bias"),
                        ctx->GetInputDim("Bias"));
    }
J
JiabinYang 已提交
154 155 156
    if (!ctx->Attrs().Get<bool>("is_sparse")) {
      ctx->SetOutputDim(framework::GradVarName("W"), ctx->GetInputDim("W"));
    }
W
weixing02 已提交
157 158 159 160 161 162 163
    ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    return framework::OpKernelType(
J
JiabinYang 已提交
164
        framework::ToDataType(ctx.Input<framework::LoDTensor>("X")->type()),
W
weixing02 已提交
165 166 167 168
        ctx.GetPlace());
  }
};

J
JiabinYang 已提交
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
class HierarchicalSigmoidGradOpGradVarTypeInference
    : public framework::VarTypeInference {
 public:
  void operator()(const framework::OpDesc& op_desc,
                  framework::BlockDesc* block) const override {
    auto out_var_name = op_desc.Output(framework::GradVarName("W")).front();
    auto attr = op_desc.GetAttr("is_sparse");
    bool is_sparse = boost::get<bool>(attr);
    if (is_sparse) {
      VLOG(3) << "hierarchical_sigmoid_grad op " << framework::GradVarName("W")
              << " is set to SelectedRows";
      block->Var(out_var_name)
          ->SetType(framework::proto::VarType::SELECTED_ROWS);
    } else {
      VLOG(3) << "hierarchical_sigmoid_grad op " << framework::GradVarName("W")
              << " is set to LoDTensor";
      block->Var(out_var_name)->SetType(framework::proto::VarType::LOD_TENSOR);
    }
    block->Var(out_var_name)->SetDataType(block->Var("W")->GetDataType());
  }
};

Y
Yancey1989 已提交
191 192 193 194
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
W
weixing02 已提交
195 196 197
REGISTER_OPERATOR(hierarchical_sigmoid, ops::HierarchicalSigmoidOp,
                  ops::HierarchicalSigmoidOpMaker<int>,
                  paddle::framework::DefaultGradOpDescMaker<true>);
J
JiabinYang 已提交
198 199
REGISTER_OPERATOR(hierarchical_sigmoid_grad, ops::HierarchicalSigmoidGradOp,
                  ops::HierarchicalSigmoidGradOpGradVarTypeInference);
W
weixing02 已提交
200 201 202 203 204 205 206 207 208 209 210
REGISTER_OP_CPU_KERNEL(
    hierarchical_sigmoid,
    ops::HierarchicalSigmoidOpKernel<paddle::platform::CPUDeviceContext, float>,
    ops::HierarchicalSigmoidOpKernel<paddle::platform::CPUDeviceContext,
                                     double>);
REGISTER_OP_CPU_KERNEL(
    hierarchical_sigmoid_grad,
    ops::HierarchicalSigmoidGradOpKernel<paddle::platform::CPUDeviceContext,
                                         float>,
    ops::HierarchicalSigmoidGradOpKernel<paddle::platform::CPUDeviceContext,
                                         double>);