pull_box_sparse_op.h 4.5 KB
Newer Older
H
hutuxian 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
//   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <memory>
#include <vector>
#include "paddle/fluid/framework/fleet/box_wrapper.h"
T
Thunderbrook 已提交
19
#include "paddle/fluid/framework/fleet/ps_gpu_wrapper.h"
H
hutuxian 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"

namespace paddle {
namespace operators {

template <typename T>
static void PullBoxSparseFunctor(const framework::ExecutionContext &ctx) {
  auto inputs = ctx.MultiInput<framework::Tensor>("Ids");
  auto outputs = ctx.MultiOutput<framework::Tensor>("Out");
  const auto slot_size = inputs.size();
  std::vector<const uint64_t *> all_keys(slot_size);
  // BoxPS only supports float now
  std::vector<float *> all_values(slot_size);
  std::vector<int64_t> slot_lengths(slot_size);
  for (size_t i = 0; i < slot_size; i++) {
    const auto *slot = inputs[i];
    const uint64_t *single_slot_keys =
        reinterpret_cast<const uint64_t *>(slot->data<int64_t>());
    all_keys[i] = single_slot_keys;
    slot_lengths[i] = slot->numel();
    auto *output = outputs[i]->mutable_data<T>(ctx.GetPlace());
    all_values[i] = output;
  }
H
hutuxian 已提交
44 45
#ifdef PADDLE_WITH_BOX_PS
  auto hidden_size = ctx.Attr<int>("size");
H
hutuxian 已提交
46 47
  auto box_ptr = paddle::framework::BoxWrapper::GetInstance();
  box_ptr->PullSparse(ctx.GetPlace(), all_keys, all_values, slot_lengths,
S
ShenLiang 已提交
48
                      hidden_size, 0);
H
hutuxian 已提交
49
#endif
T
Thunderbrook 已提交
50 51 52 53 54 55
#if (defined PADDLE_WITH_NCCL) && (defined PADDLE_WITH_PSLIB)
  auto hidden_size = ctx.Attr<int>("size");
  auto gpu_ps_ptr = paddle::framework::PSGPUWrapper::GetInstance();
  gpu_ps_ptr->PullSparse(ctx.GetPlace(), 0, all_keys, all_values, slot_lengths,
                         hidden_size);
#endif
H
hutuxian 已提交
56 57 58 59
}

template <typename T>
static void PushBoxSparseFunctor(const framework::ExecutionContext &ctx) {
H
hutuxian 已提交
60
  auto inputs = ctx.MultiInput<framework::LoDTensor>("Ids");
H
hutuxian 已提交
61 62 63 64 65 66
  auto d_output =
      ctx.MultiInput<framework::Tensor>(framework::GradVarName("Out"));
  const auto slot_size = inputs.size();
  std::vector<const uint64_t *> all_keys(slot_size);
  std::vector<const float *> all_grad_values(slot_size);
  std::vector<int64_t> slot_lengths(slot_size);
H
hutuxian 已提交
67
  int batch_size = -1;
H
hutuxian 已提交
68 69 70 71 72 73
  for (size_t i = 0; i < slot_size; i++) {
    const auto *slot = inputs[i];
    const uint64_t *single_slot_keys =
        reinterpret_cast<const uint64_t *>(slot->data<int64_t>());
    all_keys[i] = single_slot_keys;
    slot_lengths[i] = slot->numel();
H
hutuxian 已提交
74 75 76 77 78 79 80 81 82 83
    int cur_batch_size =
        slot->lod().size() ? slot->lod()[0].size() - 1 : slot->dims()[0];
    if (batch_size == -1) {
      batch_size = cur_batch_size;
    } else {
      PADDLE_ENFORCE_EQ(batch_size, cur_batch_size,
                        platform::errors::PreconditionNotMet(
                            "The batch size of all input slots should be same, "
                            "please cheack"));
    }
H
hutuxian 已提交
84 85 86
    const float *grad_value = d_output[i]->data<float>();
    all_grad_values[i] = grad_value;
  }
H
hutuxian 已提交
87 88
#ifdef PADDLE_WITH_BOX_PS
  auto hidden_size = ctx.Attr<int>("size");
H
hutuxian 已提交
89 90
  auto box_ptr = paddle::framework::BoxWrapper::GetInstance();
  box_ptr->PushSparseGrad(ctx.GetPlace(), all_keys, all_grad_values,
S
ShenLiang 已提交
91
                          slot_lengths, hidden_size, 0, batch_size);
H
hutuxian 已提交
92
#endif
T
Thunderbrook 已提交
93 94 95 96 97 98
#if (defined PADDLE_WITH_NCCL) && (defined PADDLE_WITH_PSLIB)
  auto hidden_size = ctx.Attr<int>("size");
  auto gpu_ps_ptr = paddle::framework::PSGPUWrapper::GetInstance();
  gpu_ps_ptr->PushSparseGrad(ctx.GetPlace(), 0, all_keys, all_grad_values,
                             slot_lengths, hidden_size, batch_size);
#endif
H
hutuxian 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
}

using LoDTensor = framework::LoDTensor;
template <typename T>
class PullBoxSparseCPUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    PullBoxSparseFunctor<T>(ctx);
  }
};

template <typename T>
class PushBoxSparseCPUKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    PushBoxSparseFunctor<T>(ctx);
  }
};
}  // namespace operators
}  // namespace paddle