nce_op.h 18.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
W
wanghaoshuang 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
W
wanghaoshuang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
W
wanghaoshuang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
W
wanghaoshuang 已提交
14 15 16

#pragma once

W
wanghaoshuang 已提交
17
#include <math.h>
T
tangwei12 已提交
18
#include <iterator>
W
wanghaoshuang 已提交
19
#include <random>
20
#include <set>
T
tangwei12 已提交
21
#include <string>
22
#include <vector>
Y
Yi Wang 已提交
23 24
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
25
#include "paddle/fluid/framework/selected_rows.h"
26
#include "paddle/fluid/operators/math/sampler.h"
W
wanghaoshuang 已提交
27
#include "unsupported/Eigen/CXX11/Tensor"
28

T
tangwei12 已提交
29 30 31 32
#ifdef PADDLE_WITH_DISTRIBUTE
#include "paddle/fluid/operators/distributed/parameter_prefetch.h"
#endif

W
wanghaoshuang 已提交
33 34 35
namespace paddle {
namespace operators {

36
using Tensor = framework::Tensor;
37 38
using LoDTensor = framework::LoDTensor;
using SelectedRows = framework::SelectedRows;
39
using Sampler = math::Sampler;
40
using DDim = framework::DDim;
W
wanghaoshuang 已提交
41 42 43 44 45

template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;

Q
QI JUN 已提交
46
template <typename DeviceContext, typename T>
47 48
void PrepareSamples(const framework::ExecutionContext &context,
                    Sampler *sampler) {
W
wanghaoshuang 已提交
49
  auto label = context.Input<Tensor>("Label");
50
  const int64_t *label_data = label->data<int64_t>();
W
wanghaoshuang 已提交
51
  auto label_dims = label->dims();
W
wanghaoshuang 已提交
52
  // for unitest
W
wanghaoshuang 已提交
53 54
  std::vector<int> custom_neg_classes =
      context.Attr<std::vector<int>>("custom_neg_classes");
W
wanghaoshuang 已提交
55 56 57

  auto sample_labels = context.Output<Tensor>("SampleLabels");
  auto sample_labels_dims = sample_labels->dims();
58
  int64_t *sample_labels_data =
W
wanghaoshuang 已提交
59
      sample_labels->mutable_data<int64_t>(context.GetPlace());
W
wanghaoshuang 已提交
60 61

  int num_label = label_dims.size() == 2 ? label_dims[1] : 1;
W
wanghaoshuang 已提交
62
  int index = 0;
63
  for (int64_t i = 0; i < label_dims[0]; ++i) {
W
wanghaoshuang 已提交
64 65
    int j = 0;
    for (; j < num_label; ++j) {
W
wanghaoshuang 已提交
66
      sample_labels_data[index++] = label_data[i * num_label + j];
W
wanghaoshuang 已提交
67
    }
W
wanghaoshuang 已提交
68 69
    if (custom_neg_classes.size() > 0) {
      for (auto label : custom_neg_classes) {
W
wanghaoshuang 已提交
70 71 72 73
        sample_labels_data[index++] = label;
      }
    } else {
      for (; j < sample_labels_dims[1]; ++j) {
W
wanghaoshuang 已提交
74
        // TODO(wanghaoshuang): support more distribution sampling
75
        sample_labels_data[index++] = sampler->Sample();
W
wanghaoshuang 已提交
76
      }
W
wanghaoshuang 已提交
77 78 79 80
    }
  }
}

Q
QI JUN 已提交
81
template <typename DeviceContext, typename T>
W
wanghaoshuang 已提交
82 83
class NCEKernel : public framework::OpKernel<T> {
 public:
84
  void Compute(const framework::ExecutionContext &context) const override {
85 86 87 88 89
    int sampler_type = context.Attr<int>("sampler");
    int seed = context.Attr<int>("seed");
    int num_total_classes = context.Attr<int>("num_total_classes");
    int num_neg_samples = context.Attr<int>("num_neg_samples");

90
    Sampler *sampler;
91 92 93 94 95 96 97 98 99 100
    switch (sampler_type) {
      case 0: {
        sampler = new math::UniformSampler(num_total_classes - 1, seed);
        break;
      }
      case 1: {
        sampler = new math::LogUniformSampler(num_total_classes - 1, seed);
        break;
      }
      case 2: {
101 102 103 104
        auto dist_probs = context.Input<Tensor>("CustomDistProbs");
        auto dist_alias = context.Input<Tensor>("CustomDistAlias");
        auto dist_alias_probs = context.Input<Tensor>("CustomDistAliasProbs");

105 106
        PADDLE_ENFORCE_EQ(
            dist_probs->numel(), num_total_classes,
107 108 109 110 111 112
            platform::errors::InvalidArgument(
                "ShapeError: The number of elements in Input(CustomDistProbs) "
                "should be equal to the number of total classes. But Received: "
                "Input(CustomDistProbs).numel() = %d, Attr(num_total_classes) "
                "= %d.",
                dist_probs->numel(), num_total_classes));
113 114
        PADDLE_ENFORCE_EQ(
            dist_alias->numel(), num_total_classes,
115 116 117 118 119 120
            platform::errors::InvalidArgument(
                "ShapeError: The number of elements in Input(CustomDistAlias) "
                "should be equal to the number of total classes. But Received: "
                "Input(CustomDistAlias).numel() = %d, Attr(num_total_classes) "
                "= %d.",
                dist_alias->numel(), num_total_classes));
121 122
        PADDLE_ENFORCE_EQ(
            dist_alias_probs->numel(), num_total_classes,
123 124 125 126 127 128 129
            platform::errors::InvalidArgument(
                "ShapeError: The number of elements in "
                "Input(CustomDistAliasProbs) "
                "should be equal to the number of total classes. But Received: "
                "Input(CustomDistAliasProbs).numel() = %d, "
                "Attr(num_total_classes) = %d.",
                dist_alias_probs->numel(), num_total_classes));
130 131 132 133 134 135

        const float *probs_data = dist_probs->data<float>();
        const int *alias_data = dist_alias->data<int>();
        const float *alias_probs_data = dist_alias_probs->data<float>();
        sampler = new math::CustomSampler(num_total_classes - 1, probs_data,
                                          alias_data, alias_probs_data, seed);
136 137
        break;
      }
F
Feiyu Chan 已提交
138 139 140 141 142 143
      default: {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "Unsupported SamplerType. SamplerType should be 0: Uniform, "
            "1: LogUniform or 2: CostumDist. Received SamplerType: %d",
            sampler_type));
      }
144 145 146
    }

    PrepareSamples<DeviceContext, T>(context, sampler);
W
wanghaoshuang 已提交
147
    auto sample_labels = context.Output<Tensor>("SampleLabels");
148
    const int64_t *sample_labels_data = sample_labels->data<int64_t>();
149 150

    for (int x = 0; x < sample_labels->numel(); x++) {
151
      PADDLE_ENFORCE_GE(sample_labels_data[x], 0,
152 153 154 155 156
                        platform::errors::InvalidArgument(
                            "ValueError: Every sample label should be "
                            "non-negative. But received: "
                            "Input(SampleLabels)[%d] = %d",
                            x, sample_labels_data[x]));
157 158
    }

W
wanghaoshuang 已提交
159
    auto sample_out = context.Output<Tensor>("SampleLogits");
160
    T *sample_out_data = sample_out->mutable_data<T>(context.GetPlace());
W
wanghaoshuang 已提交
161 162
    auto label = context.Input<Tensor>("Label");
    auto sample_weight = context.Input<Tensor>("SampleWeight");
163
    const T *sample_weight_data = nullptr;
W
wanghaoshuang 已提交
164 165 166
    if (sample_weight != nullptr) {
      sample_weight_data = sample_weight->data<T>();
    }
W
wanghaoshuang 已提交
167
    auto out = context.Output<Tensor>("Cost");
168
    T *out_data = out->mutable_data<T>(context.GetPlace());
169
    int64_t num_true_class = 1;
W
wanghaoshuang 已提交
170 171 172
    if (label != nullptr) {
      num_true_class = label->dims()[1];
    }
173 174
    int64_t sampled_labels_num = sample_labels->dims()[1];
    //    T b = 1. / num_total_classes * num_neg_samples;
W
wanghaoshuang 已提交
175
    // forward bias
W
wanghaoshuang 已提交
176
    auto bias = context.Input<Tensor>("Bias");
W
wanghaoshuang 已提交
177
    if (bias != nullptr) {
178
      const T *bias_data = bias->data<T>();
179
      for (int64_t i = 0; i < sample_labels->numel(); ++i) {
W
wanghaoshuang 已提交
180 181 182
        sample_out_data[i] = bias_data[sample_labels_data[i]];
      }
    } else {
183
      for (int64_t i = 0; i < sample_labels->numel(); ++i) {
W
wanghaoshuang 已提交
184 185 186 187
        sample_out_data[i] = 0;
      }
    }
    // forward mul
W
wanghaoshuang 已提交
188
    auto input_mat = EigenMatrix<T>::From(*(context.Input<Tensor>("Input")));
T
tangwei12 已提交
189 190

    // for remote prefetch
191
    auto remote_prefetch = context.Attr<bool>("remote_prefetch");
T
tangwei12 已提交
192 193
    auto epmap = context.Attr<std::vector<std::string>>("epmap");

194
    if (remote_prefetch && !epmap.empty()) {
T
tangwei12 已提交
195 196 197 198 199 200 201 202 203 204 205
      // if epmap is not empty, then the parameter will be fetched from remote
      // parameter
      // server

      std::vector<int64_t> labels;
      for (int64_t i = 0; i < sample_labels->numel(); ++i) {
        labels.push_back(sample_labels_data[i]);
      }
      std::set<T> st(labels.begin(), labels.end());
      labels.assign(st.begin(), st.end());

T
tangwei12 已提交
206 207
      framework::Scope &local_scope = context.scope().NewScope();

T
tangwei12 已提交
208 209
      auto table_names = context.Attr<std::vector<std::string>>("table_names");

T
tangwei12 已提交
210
      auto *ids = local_scope.Var("Ids@Prefetch");
T
tangwei12 已提交
211 212 213 214 215 216 217 218
      auto *x_tensor = ids->GetMutable<framework::LoDTensor>();
      x_tensor->mutable_data<int64_t>(
          framework::make_ddim({static_cast<int64_t>(labels.size()), 1}),
          context.GetPlace());
      // copy.
      std::memcpy(x_tensor->data<int64_t>(), labels.data(),
                  labels.size() * sizeof(int64_t));

219
      std::vector<int> w_dims = paddle::framework::vectorize<int>(
T
tangwei12 已提交
220 221 222 223 224 225
          context.Input<Tensor>("Weight")->dims());
      w_dims[0] = static_cast<int>(labels.size());

      auto *w_tensor = local_scope.Var("Weight@Prefetch")
                           ->GetMutable<framework::LoDTensor>();
      w_tensor->Resize(framework::make_ddim(w_dims));
T
tangwei12 已提交
226 227

#ifdef PADDLE_WITH_DISTRIBUTE
H
hong 已提交
228
      auto weight = context.InputNames("Weight").front();
T
tangwei12 已提交
229
      operators::distributed::prefetch("Ids@Prefetch", "Weight@Prefetch",
230
                                       weight, false, table_names, epmap,
231
                                       context, local_scope);
T
tangwei12 已提交
232
#else
F
Feiyu Chan 已提交
233
      PADDLE_THROW(platform::errors::PreconditionNotMet(
T
tangwei12 已提交
234
          "paddle is not compiled with distribute support, can not do "
F
Feiyu Chan 已提交
235
          "parameter prefetch!"));
T
tangwei12 已提交
236
#endif
T
tangwei12 已提交
237

T
tangwei12 已提交
238
      auto weight_mat = EigenMatrix<T>::From(
T
tangwei12 已提交
239
          (local_scope.Var("Weight@Prefetch")->Get<framework::LoDTensor>()));
T
tangwei12 已提交
240 241 242 243 244 245 246 247 248 249 250 251
      for (int64_t i = 0; i < sample_labels->numel(); ++i) {
        std::vector<int64_t>::iterator it =
            std::find(labels.begin(), labels.end(), sample_labels_data[i]);
        int idx = std::distance(labels.begin(), it);

        Eigen::Tensor<T, 0, Eigen::RowMajor, Eigen::DenseIndex> result =
            (input_mat.chip(static_cast<int>(i / sample_labels->dims()[1]), 0) *
             weight_mat.chip(idx, 0))
                .sum();
        sample_out_data[i] += result(0);
        sample_out_data[i] = (1. / (1. + exp(-sample_out_data[i])));
      }
T
tangwei12 已提交
252
      context.scope().DeleteScope(&local_scope);
T
tangwei12 已提交
253 254 255 256 257 258 259 260 261 262 263
    } else {
      auto weight_mat =
          EigenMatrix<T>::From(*(context.Input<Tensor>("Weight")));
      for (int64_t i = 0; i < sample_labels->numel(); ++i) {
        Eigen::Tensor<T, 0, Eigen::RowMajor, Eigen::DenseIndex> result =
            (input_mat.chip(static_cast<int>(i / sample_labels->dims()[1]), 0) *
             weight_mat.chip(sample_labels_data[i], 0))
                .sum();
        sample_out_data[i] += result(0);
        sample_out_data[i] = (1. / (1. + exp(-sample_out_data[i])));
      }
W
wanghaoshuang 已提交
264
    }
T
tangwei12 已提交
265

W
wanghaoshuang 已提交
266
    // forward cost
267
    for (int64_t i = 0; i < sample_labels->dims()[0]; ++i) {
W
wanghaoshuang 已提交
268 269
      out_data[i] = 0;
      T w = sample_weight == nullptr ? 1. : sample_weight_data[i];
270 271 272 273 274
      for (int64_t j = 0; j < sampled_labels_num; ++j) {
        int64_t target = sample_labels_data[i * sampled_labels_num + j];
        T o = sample_out_data[i * sampled_labels_num + j];
        float b = sampler->Probability(target) * num_neg_samples;
        T cost = (j < num_true_class) ? -log(o / (o + b)) : -log(b / (o + b));
W
wanghaoshuang 已提交
275 276 277
        out_data[i] += w * cost;
      }
    }
278
    delete sampler;
W
wanghaoshuang 已提交
279 280 281
  }
};

Q
QI JUN 已提交
282
template <typename DeviceContext, typename T>
W
wanghaoshuang 已提交
283 284
class NCEGradKernel : public framework::OpKernel<T> {
 public:
285
  void Compute(const framework::ExecutionContext &context) const override {
W
wanghaoshuang 已提交
286
    auto d_out = context.Input<Tensor>(framework::GradVarName("Cost"));
287
    const T *d_out_data = d_out->data<T>();
W
wanghaoshuang 已提交
288 289
    auto label = context.Input<Tensor>("Label");
    auto sample_out = context.Input<Tensor>("SampleLogits");
290
    const T *sample_out_data = sample_out->data<T>();
W
wanghaoshuang 已提交
291
    auto sample_labels = context.Input<Tensor>("SampleLabels");
292
    const int64_t *sample_labels_data = sample_labels->data<int64_t>();
W
wanghaoshuang 已提交
293
    auto sample_weight = context.Input<Tensor>("SampleWeight");
294
    const T *sample_weight_data = nullptr;
W
wanghaoshuang 已提交
295 296 297
    if (sample_weight != nullptr) {
      sample_weight_data = sample_weight->data<T>();
    }
W
wanghaoshuang 已提交
298 299
    int num_neg_samples = context.Attr<int>("num_neg_samples");
    int num_total_classes = context.Attr<int>("num_total_classes");
W
wanghaoshuang 已提交
300 301 302 303
    int num_true_class = 1;
    if (label != nullptr) {
      num_true_class = label->dims()[1];
    }
304 305 306

    int sampler_type = context.Attr<int>("sampler");
    int seed = context.Attr<int>("seed");
307
    Sampler *sampler;
308 309 310 311 312 313 314 315 316 317
    switch (sampler_type) {
      case 0: {
        sampler = new math::UniformSampler(num_total_classes - 1, seed);
        break;
      }
      case 1: {
        sampler = new math::LogUniformSampler(num_total_classes - 1, seed);
        break;
      }
      case 2: {
318 319 320 321
        auto dist_probs = context.Input<Tensor>("CustomDistProbs");
        auto dist_alias = context.Input<Tensor>("CustomDistAlias");
        auto dist_alias_probs = context.Input<Tensor>("CustomDistAliasProbs");

322 323
        PADDLE_ENFORCE_EQ(
            dist_probs->numel(), num_total_classes,
324 325 326 327 328 329
            platform::errors::InvalidArgument(
                "ShapeError: The number of elements in Input(CustomDistProbs) "
                "should be equal to the number of total classes. But Received: "
                "Input(CustomDistProbs).numel() = %d, Attr(num_total_classes) "
                "= %d.",
                dist_probs->numel(), num_total_classes));
330 331
        PADDLE_ENFORCE_EQ(
            dist_alias->numel(), num_total_classes,
332 333 334 335 336 337
            platform::errors::InvalidArgument(
                "ShapeError: The number of elements in Input(CustomDistAlias) "
                "should be equal to the number of total classes. But Received: "
                "Input(CustomDistAlias).numel() = %d, Attr(num_total_classes) "
                "= %d.",
                dist_alias->numel(), num_total_classes));
338 339
        PADDLE_ENFORCE_EQ(
            dist_alias_probs->numel(), num_total_classes,
340 341 342 343 344 345 346
            platform::errors::InvalidArgument(
                "ShapeError: The number of elements in "
                "Input(CustomDistAliasProbs) "
                "should be equal to the number of total classes. But Received: "
                "Input(CustomDistAliasProbs).numel() = %d, "
                "Attr(num_total_classes) = %d.",
                dist_alias_probs->numel(), num_total_classes));
347 348 349 350 351 352

        const float *probs_data = dist_probs->data<float>();
        const int *alias_data = dist_alias->data<int>();
        const float *alias_probs_data = dist_alias_probs->data<float>();
        sampler = new math::CustomSampler(num_total_classes - 1, probs_data,
                                          alias_data, alias_probs_data, seed);
353 354
        break;
      }
F
Feiyu Chan 已提交
355 356 357 358 359 360
      default: {
        PADDLE_THROW(platform::errors::InvalidArgument(
            "Unsupported SamplerType. SamplerType should be 0: Uniform, "
            "1: LogUniform or 2: CostumDist. Received SamplerType: %d",
            sampler_type));
      }
361 362 363
    }

    //    T b = 1. / num_total_classes * num_neg_samples;
W
wanghaoshuang 已提交
364
    Tensor sample_grad;  // tmp tensor
365
    T *sample_grad_data =
W
wanghaoshuang 已提交
366 367
        sample_grad.mutable_data<T>(sample_labels->dims(), context.GetPlace());
    // backward cost
368
    for (int64_t i = 0; i < sample_labels->numel(); ++i) {
369 370 371
      int64_t label_idx = i % sample_labels->dims()[1];
      int64_t sample_idx = i / sample_labels->dims()[1];
      float b = sampler->Probability(sample_labels_data[i]) * num_neg_samples;
W
wanghaoshuang 已提交
372
      T o = sample_out_data[i];
373 374
      T w = sample_weight == nullptr ? 1 : sample_weight_data[sample_idx];
      sample_grad_data[i] = label_idx < num_true_class
W
wanghaoshuang 已提交
375 376
                                ? w * (b / (o + b)) * (o - 1)
                                : w * (o * (1 - o) / (o + b));
377
      sample_grad_data[i] *= d_out_data[sample_idx];
W
wanghaoshuang 已提交
378
    }
379

380 381 382 383 384 385 386 387 388 389
    // get d_bias
    auto d_bias = context.Output<Tensor>(framework::GradVarName("Bias"));
    if (d_bias != nullptr) {
      T *d_bias_data = d_bias->mutable_data<T>(context.GetPlace());
      std::fill(d_bias_data, d_bias_data + d_bias->numel(), 0.0);
      for (int64_t i = 0; i < sample_labels->numel(); ++i) {
        d_bias_data[sample_labels_data[i]] += sample_grad_data[i];
      }
    }

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
    bool is_sparse = context.Attr<bool>("is_sparse");

    if (!is_sparse) {
      // get d_w
      auto d_w = context.Output<Tensor>(framework::GradVarName("Weight"));
      if (d_w != nullptr) {
        auto d_w_data = d_w->mutable_data<T>(context.GetPlace());
        std::fill(d_w_data, d_w_data + d_w->numel(), 0.0);
        auto d_w_matrix = EigenMatrix<T>::From(*d_w);
        auto x_matrix = EigenMatrix<T>::From(*(context.Input<Tensor>("Input")));
        for (int64_t i = 0; i < sample_labels->numel(); ++i) {
          d_w_matrix.chip(sample_labels_data[i], 0) +=
              x_matrix.chip(static_cast<int>(i / sample_labels->dims()[1]), 0) *
              sample_grad_data[i];
        }
      }
    } else {
      std::vector<int64_t> labels;
408
      for (int64_t i = 0; i < sample_labels->numel(); ++i) {
409
        labels.push_back(sample_labels_data[i]);
W
wanghaoshuang 已提交
410
      }
411 412 413 414 415 416 417 418 419 420 421
      std::set<T> st(labels.begin(), labels.end());
      labels.assign(st.begin(), st.end());

      auto *table_var = context.InputVar("Weight");
      DDim table_dim;
      if (table_var->IsType<LoDTensor>()) {
        table_dim = context.Input<LoDTensor>("Weight")->dims();
      } else if (table_var->IsType<SelectedRows>()) {
        auto *table_t = context.Input<SelectedRows>("Weight");
        table_dim = table_t->value().dims();
      } else {
F
Feiyu Chan 已提交
422
        PADDLE_THROW(platform::errors::InvalidArgument(
423
            "The parameter Weight of a NCE_OP "
F
Feiyu Chan 已提交
424
            "must be either LoDTensor or SelectedRows"));
425 426 427 428 429 430 431 432 433 434 435 436 437 438
      }

      auto d_w = context.Output<SelectedRows>(framework::GradVarName("Weight"));

      d_w->set_rows(labels);
      d_w->set_height(table_dim[0]);

      auto *d_table_value = d_w->mutable_value();
      d_table_value->Resize(
          {static_cast<int64_t>(labels.size()), table_dim[1]});
      auto d_w_data = d_table_value->mutable_data<T>(context.GetPlace());
      std::fill(d_w_data, d_w_data + d_table_value->numel(), 0.0);

      auto d_w_matrix = EigenMatrix<T>::From(*d_table_value);
W
wanghaoshuang 已提交
439
      auto x_matrix = EigenMatrix<T>::From(*(context.Input<Tensor>("Input")));
440
      for (int64_t i = 0; i < sample_labels->numel(); ++i) {
441
        d_w_matrix.chip(d_w->Index(sample_labels_data[i]), 0) +=
442
            x_matrix.chip(static_cast<int>(i / sample_labels->dims()[1]), 0) *
W
wanghaoshuang 已提交
443 444 445
            sample_grad_data[i];
      }
    }
446

W
wanghaoshuang 已提交
447
    // get d_x
W
wanghaoshuang 已提交
448
    auto d_x = context.Output<Tensor>(framework::GradVarName("Input"));
W
wanghaoshuang 已提交
449
    if (d_x != nullptr) {
450
      auto *d_x_data = d_x->mutable_data<T>(context.GetPlace());
Y
Yang Yu 已提交
451
      std::fill(d_x_data, d_x_data + d_x->numel(), 0.0);
W
wanghaoshuang 已提交
452
      auto d_x_matrix = EigenMatrix<T>::From(*d_x);
W
wanghaoshuang 已提交
453
      auto w_matrix = EigenMatrix<T>::From(*(context.Input<Tensor>("Weight")));
454
      for (int64_t i = 0; i < sample_labels->numel(); ++i) {
455
        d_x_matrix.chip(static_cast<int>(i / sample_labels->dims()[1]), 0) +=
W
wanghaoshuang 已提交
456 457 458
            w_matrix.chip(sample_labels_data[i], 0) * sample_grad_data[i];
      }
    }
459

460
    delete sampler;
W
wanghaoshuang 已提交
461 462 463 464
  }
};
}  // namespace operators
}  // namespace paddle